From b7b33d415c24cb866e0801639b0d0df861d9845e Mon Sep 17 00:00:00 2001
From: Ad-Bean
Date: Tue, 19 Nov 2024 15:50:41 -0500
Subject: [PATCH 01/10] feat(ci): add workflow for broken links check
---
.github/workflows/brokenlinks-check.yml | 29 +++++++++++++++++++++++++
1 file changed, 29 insertions(+)
create mode 100644 .github/workflows/brokenlinks-check.yml
diff --git a/.github/workflows/brokenlinks-check.yml b/.github/workflows/brokenlinks-check.yml
new file mode 100644
index 00000000..d44a3498
--- /dev/null
+++ b/.github/workflows/brokenlinks-check.yml
@@ -0,0 +1,29 @@
+name: Broken Links Check
+
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+ branches:
+ - main
+
+jobs:
+ broken-links-check:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ - name: Use Node.js ${{ matrix.node-version }}
+ uses: actions/setup-node@v3
+ with:
+ node-version: ${{ matrix.node-version }}
+ cache: "yarn"
+
+ - name: Install Mintlify
+ run: npm i -g mintlify
+
+ - name: Run Broken Links Check
+ run: mintlify check-links
From 9ab58014035f8334bcb959f93616f5a7c761d6a7 Mon Sep 17 00:00:00 2001
From: Ad-Bean
Date: Tue, 19 Nov 2024 15:54:18 -0500
Subject: [PATCH 02/10] chore(ci): update broken links check workflow to use
latest actions and Node.js version
---
.github/workflows/brokenlinks-check.yml | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
diff --git a/.github/workflows/brokenlinks-check.yml b/.github/workflows/brokenlinks-check.yml
index d44a3498..f39a52ad 100644
--- a/.github/workflows/brokenlinks-check.yml
+++ b/.github/workflows/brokenlinks-check.yml
@@ -1,12 +1,7 @@
name: Broken Links Check
on:
- push:
- branches:
- - main
pull_request:
- branches:
- - main
jobs:
broken-links-check:
@@ -14,13 +9,13 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
- - name: Use Node.js ${{ matrix.node-version }}
- uses: actions/setup-node@v3
+ - name: Use Node.js
+ uses: actions/setup-node@v4
with:
- node-version: ${{ matrix.node-version }}
- cache: "yarn"
+ node-version: "20.x"
+ cache: "npm"
- name: Install Mintlify
run: npm i -g mintlify
From d6f1d18f22b61e467d273902a2f0ca1c186a66ac Mon Sep 17 00:00:00 2001
From: Ad-Bean
Date: Tue, 19 Nov 2024 15:55:41 -0500
Subject: [PATCH 03/10] chore(ci): remove npm cache from broken links check
workflow
---
.github/workflows/brokenlinks-check.yml | 1 -
1 file changed, 1 deletion(-)
diff --git a/.github/workflows/brokenlinks-check.yml b/.github/workflows/brokenlinks-check.yml
index f39a52ad..3a391834 100644
--- a/.github/workflows/brokenlinks-check.yml
+++ b/.github/workflows/brokenlinks-check.yml
@@ -15,7 +15,6 @@ jobs:
uses: actions/setup-node@v4
with:
node-version: "20.x"
- cache: "npm"
- name: Install Mintlify
run: npm i -g mintlify
From 87e7fc69b1bd4ec0bd035a0732e3614cb4a95a56 Mon Sep 17 00:00:00 2001
From: Ad-Bean
Date: Tue, 19 Nov 2024 16:17:44 -0500
Subject: [PATCH 04/10] feat(ci): update broken links check command and
configure npm global path
---
.github/workflows/brokenlinks-check.yml | 4 +++-
.gitignore | 1 +
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/brokenlinks-check.yml b/.github/workflows/brokenlinks-check.yml
index 3a391834..a673f1de 100644
--- a/.github/workflows/brokenlinks-check.yml
+++ b/.github/workflows/brokenlinks-check.yml
@@ -15,9 +15,11 @@ jobs:
uses: actions/setup-node@v4
with:
node-version: "20.x"
+ path: ~/.npm-global
+ key: ${{ runner.os }}-build-${{ env.cache-name }}
- name: Install Mintlify
run: npm i -g mintlify
- name: Run Broken Links Check
- run: mintlify check-links
+ run: mintlify broken-links
diff --git a/.gitignore b/.gitignore
index 9414550b..4a1230ed 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,4 @@
.DS_Store
*.py
rename_files.py.save
+node_modules/
\ No newline at end of file
From 0107005d0ba464939c936548ea86348715d274c9 Mon Sep 17 00:00:00 2001
From: WanYixian
Date: Mon, 25 Nov 2024 16:11:34 +0800
Subject: [PATCH 05/10] save work
---
changelog/product-lifecycle.mdx | 66 +++++++++----------
changelog/release-notes.mdx | 8 +--
client-libraries/go.mdx | 2 +-
client-libraries/java.mdx | 2 +-
client-libraries/nodejs.mdx | 12 ++--
client-libraries/python.mdx | 2 +-
client-libraries/ruby.mdx | 2 +-
cloud/choose-a-project-plan.mdx | 4 +-
cloud/develop-overview.mdx | 4 +-
cloud/manage-sinks.mdx | 2 +-
cloud/project-byoc.mdx | 2 +-
delivery/overview.mdx | 22 ++++---
deploy/risingwave-kubernetes.mdx | 2 +-
faq/faq-using-risingwave.mdx | 2 +-
get-started/intro.mdx | 2 +-
get-started/rw-premium-edition-intro.mdx | 8 +--
.../change-data-capture-with-risingwave.mdx | 2 +-
ingestion/overview.mdx | 4 +-
ingestion/supported-sources-and-formats.mdx | 10 +--
integrations/destinations/apache-iceberg.mdx | 2 +-
integrations/destinations/apache-kafka.mdx | 2 +-
integrations/destinations/apache-pulsar.mdx | 2 +-
integrations/destinations/aws-kinesis.mdx | 2 +-
integrations/destinations/aws-s3.mdx | 2 +-
integrations/destinations/azure-blob.mdx | 2 +-
integrations/destinations/clickhouse.mdx | 4 +-
integrations/destinations/delta-lake.mdx | 2 +-
.../destinations/google-cloud-storage.mdx | 2 +-
integrations/destinations/mysql.mdx | 4 +-
integrations/destinations/postgresql.mdx | 4 +-
integrations/destinations/webhdfs.mdx | 2 +-
integrations/sources/amazon-msk.mdx | 2 +-
integrations/sources/apache-iceberg.mdx | 2 +-
integrations/sources/confluent-cloud.mdx | 2 +-
integrations/sources/instaclustr-kafka.mdx | 2 +-
integrations/sources/mysql-cdc.mdx | 2 +-
integrations/sources/overview.mdx | 2 +-
integrations/sources/postgresql-cdc.mdx | 2 +-
integrations/sources/supabase-cdc.mdx | 2 +-
.../visualization/beekeeper-studio.mdx | 2 +-
integrations/visualization/dbeaver.mdx | 2 +-
integrations/visualization/grafana.mdx | 2 +-
integrations/visualization/looker.mdx | 2 +-
integrations/visualization/metabase.mdx | 2 +-
integrations/visualization/superset.mdx | 2 +-
operate/alter-streaming.mdx | 2 +-
operate/meta-backup.mdx | 2 +-
performance/performance-best-practices.mdx | 2 +-
.../maintain-wide-table-with-table-sinks.mdx | 4 +-
processing/sql/temporal-filters.mdx | 2 +-
reference/key-concepts.mdx | 2 +-
sql/commands/overview.mdx | 6 +-
sql/commands/sql-as-changelog.mdx | 2 +-
sql/commands/sql-begin.mdx | 2 +-
sql/commands/sql-commit.mdx | 2 +-
sql/commands/sql-create-mv.mdx | 2 +-
sql/commands/sql-create-secret.mdx | 2 +-
sql/commands/sql-create-sink.mdx | 18 ++---
sql/commands/sql-create-source.mdx | 4 +-
sql/commands/sql-drop-secret.mdx | 2 +-
sql/data-types/overview.mdx | 2 +-
sql/functions/window-functions.mdx | 2 +-
sql/query-syntax/generated-columns.mdx | 2 +-
sql/query-syntax/group-by-clause.mdx | 2 +-
sql/query-syntax/value-exp.mdx | 4 +-
sql/system-catalogs/rw-catalog.mdx | 2 +-
sql/udfs/use-udfs-in-python.mdx | 2 +-
67 files changed, 146 insertions(+), 140 deletions(-)
diff --git a/changelog/product-lifecycle.mdx b/changelog/product-lifecycle.mdx
index 68168e13..e70447d6 100644
--- a/changelog/product-lifecycle.mdx
+++ b/changelog/product-lifecycle.mdx
@@ -22,38 +22,38 @@ Below is a list of all features in the public preview phase:
| Feature name | Start version |
| :-- | :-- |
-| [Shared source](/sql/commands/sql-create-source/#shared-source) | 2.1 |
-| [ASOF join](/docs/current/query-syntax-join-clause/#asof-joins) | 2.1 |
-| [Partitioned Postgres CDC table](/docs/current/ingest-from-postgres-cdc/) | 2.1 |
-| [Map type](/docs/current/data-type-map/) | 2.0 |
-| [Azure Blob sink](/docs/current/sink-to-azure-blob/) | 2.0 |
-| [Approx percentile](/docs/current/sql-function-aggregate/#approx_percentile) | 2.0 |
-| [Auto schema change in MySQL CDC](/docs/current/ingest-from-mysql-cdc/#automatically-change-schema) | 2.0 |
-| [SQL Server CDC source](/docs/current/ingest-from-sqlserver-cdc/) | 2.0 |
-| [Sink data in parquet format](/docs/current/data-delivery/#sink-data-in-parquet-format) | 2.0 |
-| [Time travel queries](/docs/current/time-travel-queries/) | 2.0 |
-| [Manage secrets](/docs/current/manage-secrets/) | 2.0 |
-| [Amazon DynamoDB sink](../integrations/destinations/amazon-dynamodb) | 1.10 |
-| Auto-map upstream table schema in [MySQL](/docs/current/ingest-from-mysql-cdc/#automatically-map-upstream-table-schema) and [PostgreSQL](/docs/current/ingest-from-postgres-cdc/#automatically-map-upstream-table-schema) | 1.10 |
-| [Version column](/docs/current/sql-create-table/) | 1.9 |
-| [Snowflake sink](/docs/current/sink-to-snowflake/) | 1.9 |
-| [Subscription](/docs/current/subscription/) | 1.9 |
-| [RisingWave as PostgreSQL FDW](/docs/current/risingwave-as-postgres-fdw/) | 1.9 |
-| [Iceberg source](/docs/current/ingest-from-iceberg/) | 1.8 |
-| [Google BigQuery sink](/docs/current/sink-to-bigquery/) | 1.4 |
-| [SET BACKGROUND\_DDL command](/docs/current/sql-set-background-ddl/) | 1.3 |
-| [Decouple sinks](/docs/current/data-delivery/#sink-decoupling) | 1.3 |
-| [Pulsar sink](/docs/current/sink-to-pulsar/) | 1.3 |
-| [Cassandra sink](/docs/current/sink-to-cassandra/) | 1.2 |
-| [Elasticsearch sink](/docs/current/sink-to-elasticsearch/) | 1.2 |
-| [NATS sink](/docs/current/sink-to-nats/) | 1.2 |
-| [NATS source](/docs/current/ingest-from-nats/) | 1.2 |
-| [Append-only tables](/docs/current/sql-create-table/) | 1.1 |
-| [Emit on window close](/docs/current/emit-on-window-close/) | 1.1 |
-| [Read-only transactions](/docs/current/sql-start-transaction/) | 1.1 |
-| [AWS Kinesis sink](/docs/current/sink-to-aws-kinesis/) | 1.0 |
-| [CDC Citus source](/docs/current/ingest-from-citus-cdc/) | 0.19 |
-| [Iceberg sink](/docs/current/sink-to-iceberg/) | 0.18 |
-| [Pulsar source](/docs/current/ingest-from-pulsar/) | 0.1 |
+| [Shared source](/sql/commands/sql-create-source#shared-source) | 2.1 |
+| [ASOF join](/processing/sql/joins#asof-joins) | 2.1 |
+| [Partitioned Postgres CDC table](/integrations/sources/postgresql-cdc#ingest-data-from-a-partitioned-table) | 2.1 |
+| [Map type](/sql/data-types/map-type) | 2.0 |
+| [Azure Blob sink](/integrations/destinations/azure-blob) | 2.0 |
+| [Approx percentile](/sql/functions/aggregate#approx-percentile) | 2.0 |
+| [Auto schema change in MySQL CDC](/integrations/sources/mysql-cdc#automatically-change-schema) | 2.0 |
+| [SQL Server CDC source](/integrations/sources/sql-server-cdc) | 2.0 |
+| [Sink data in parquet encode](/delivery/overview#sink-data-in-parquet-or-json-encode) | 2.0 |
+| [Time travel queries](/processing/time-travel-queries) | 2.0 |
+| [Manage secrets](/operate/manage-secrets) | 2.0 |
+| [Amazon DynamoDB sink](/integrations/destinations/amazon-dynamodb) | 1.10 |
+| Auto-map upstream table schema in [MySQL](/integrations/sources/mysql-cdc#automatically-map-upstream-table-schema) and [PostgreSQL](/integrations/sources/postgresql-cdc#automatically-map-upstream-table-schema) | 1.10 |
+| [Version column](/sql/commands/sql-create-table#pk-conflict-behavior) | 1.9 |
+| [Snowflake sink](/integrations/destinations/snowflake) | 1.9 |
+| [Subscription](/delivery/subscription) | 1.9 |
+| [RisingWave as PostgreSQL FDW](/delivery/risingwave-as-postgres-fdw) | 1.9 |
+| [Iceberg source](/integrations/sources/apache-iceberg) | 1.8 |
+| [Google BigQuery sink](/integrations/destinations/bigquery) | 1.4 |
+| [SET BACKGROUND\_DDL command](/sql/commands/sql-set-background-ddl) | 1.3 |
+| [Decouple sinks](/delivery/overview#sink-decoupling) | 1.3 |
+| [Pulsar sink](/integrations/destinations/apache-pulsar) | 1.3 |
+| [Cassandra sink](/integrations/destinations/cassandra-or-scylladb) | 1.2 |
+| [Elasticsearch sink](/integrations/destinations/elasticsearch) | 1.2 |
+| [NATS sink](/integrations/destinations/nats-and-nats-jetstream) | 1.2 |
+| [NATS source](/integrations/sources/nats-jetstream) | 1.2 |
+| [Append-only tables](/sql/commands/sql-create-table#parameters) | 1.1 |
+| [Emit on window close](/processing/emit-on-window-close) | 1.1 |
+| [Read-only transactions](/sql/commands/sql-start-transaction) | 1.1 |
+| [AWS Kinesis sink](/integrations/destinations/aws-kinesis) | 1.0 |
+| [CDC Citus source](/integrations/sources/citus-cdc) | 0.19 |
+| [Iceberg sink](/integrations/destinations/apache-iceberg) | 0.18 |
+| [Pulsar source](/integrations/sources/pulsar) | 0.1 |
This table will be updated regularly to reflect the latest status of features as they progress through the release stages.
diff --git a/changelog/release-notes.mdx b/changelog/release-notes.mdx
index a3806a61..36e9f7c3 100644
--- a/changelog/release-notes.mdx
+++ b/changelog/release-notes.mdx
@@ -897,7 +897,7 @@ See the **Full Changelog** [here](https://github.com/risingwavelabs/risingwave/c
## Installation
-* Now, you can easily install RisingWave on your local machine with Homebrew by running `brew install risingwave`. See [Run RisingWave](/docs/current/get-started/#install-and-start-risingwave).
+* Now, you can easily install RisingWave on your local machine with Homebrew by running `brew install risingwave`. See [Run RisingWave](/get-started/quickstart#install-and-start-risingwave).
## Administration
@@ -1054,9 +1054,9 @@ See the **Full Changelog** [here](https://github.com/risingwavelabs/risingwave/c
## Connectors
-* Adds a new parameter `match_pattern` to the S3 connector. With the new parameter, users can specify the pattern to filter files that they want to ingest from S3 buckets. For documentation updates, see [Ingest data from S3 buckets](/docs/current/ingest-from-s3/). [#7565](https://github.com/risingwavelabs/risingwave/pull/7565)
-* Adds the PostgreSQL CDC connector. Users can use this connector to ingest data and CDC events from PostgreSQL directly. For documentation updates, see [Ingest data from PostgreSQL CDC](/docs/current/ingest-from-postgres-cdc/). [#6869](https://github.com/risingwavelabs/risingwave/pull/6869), [#7133](https://github.com/risingwavelabs/risingwave/pull/7133)
-* Adds the MySQL CDC connector. Users can use this connector to ingest data and CDC events from MySQL directly. For documentation updates, see [Ingest data from MySQL CDC](/docs/current/ingest-from-mysql-cdc/). [#6689](https://github.com/risingwavelabs/risingwave/pull/6689), [#6345](https://github.com/risingwavelabs/risingwave/pull/6345), [#6481](https://github.com/risingwavelabs/risingwave/pull/6481), [#7133](https://github.com/risingwavelabs/risingwave/pull/7133)
+* Adds a new parameter `match_pattern` to the S3 connector. With the new parameter, users can specify the pattern to filter files that they want to ingest from S3 buckets. For documentation updates, see [Ingest data from S3 buckets](/integrations/sources/s3). [#7565](https://github.com/risingwavelabs/risingwave/pull/7565)
+* Adds the PostgreSQL CDC connector. Users can use this connector to ingest data and CDC events from PostgreSQL directly. For documentation updates, see [Ingest data from PostgreSQL CDC](/integrations/sources/postgresql-cdc). [#6869](https://github.com/risingwavelabs/risingwave/pull/6869), [#7133](https://github.com/risingwavelabs/risingwave/pull/7133)
+* Adds the MySQL CDC connector. Users can use this connector to ingest data and CDC events from MySQL directly. For documentation updates, see [Ingest data from MySQL CDC](/integrations/sources/mysql-cdc). [#6689](https://github.com/risingwavelabs/risingwave/pull/6689), [#6345](https://github.com/risingwavelabs/risingwave/pull/6345), [#6481](https://github.com/risingwavelabs/risingwave/pull/6481), [#7133](https://github.com/risingwavelabs/risingwave/pull/7133)
* Adds the JDBC sink connector, with which users can sink data to MySQL, PostgreSQL, or other databases that are compliant with JDBC. [#6493](https://github.com/risingwavelabs/risingwave/pull/6493)
* Add new parameters to the Kafka sink connector.
* `force_append_only` : Specifies whether to force a sink to be append-only. [#7922](https://github.com/risingwavelabs/risingwave/pull/7922)
diff --git a/client-libraries/go.mdx b/client-libraries/go.mdx
index 23b09c7a..391d1373 100644
--- a/client-libraries/go.mdx
+++ b/client-libraries/go.mdx
@@ -9,7 +9,7 @@ In this guide, we use the [`pgx` driver](https://github.com/jackc/pgx) to connec
## Run RisingWave
-To learn about how to run RisingWave, see [Run RisingWave](../get-started/quickstart.mdx).
+To learn about how to run RisingWave, see [Run RisingWave](/get-started/quickstart).
## Install the `pgx` driver
diff --git a/client-libraries/java.mdx b/client-libraries/java.mdx
index 0d10b413..5608276b 100644
--- a/client-libraries/java.mdx
+++ b/client-libraries/java.mdx
@@ -9,7 +9,7 @@ In this guide, we use the [PostgreSQL JDBC](https://jdbc.postgresql.org/) driver
## Run RisingWave
-To learn about how to run RisingWave, see [Run RisingWave](../get-started/quickstart.mdx).
+To learn about how to run RisingWave, see [Run RisingWave](/get-started/quickstart).
> You do not need to connect to RisingWave at this stage.
## Download the PostgreSQL JDBC driver
diff --git a/client-libraries/nodejs.mdx b/client-libraries/nodejs.mdx
index 25840828..e99df00f 100644
--- a/client-libraries/nodejs.mdx
+++ b/client-libraries/nodejs.mdx
@@ -9,7 +9,7 @@ In this guide, we use the [Node.js pg driver](https://www.npmjs.com/package/pg)
## Run RisingWave
-To learn about how to run RisingWave, see [Run RisingWave](../get-started/quickstart.mdx).
+To learn about how to run RisingWave, see [Run RisingWave](/get-started/quickstart).
## Install npm
@@ -19,11 +19,11 @@ npm install pg
## Connect to RisingWave
-:::note
+
You can use either a client or a connection pool to connect to RisingWave. If you are working on a web application that makes frequent queries, we recommend that you use a connection pool. The code examples in this topic use connection pools.
-:::
+
Connecting to RisingWave and running a query is normally done together. Therefore, we include a basic query in the code. Replace it with the query that you want to run.
@@ -51,7 +51,7 @@ start().catch(console.error);
## Create a source
-The code below creates a source `walk` with the [`datagen`](/ingest/ingest-from-datagen.md) connector. The `datagen` connector is used to generate mock data. The `walk` source consists of two columns, `distance` and `duration`, which respectively represent the distance and the duration of a walk. The source is a simplified version of the data that is tracked by smart watches.
+The code below creates a source `walk` with the [`datagen`](/ingestion/generate-test-data) connector. The `datagen` connector is used to generate mock data. The `walk` source consists of two columns, `distance` and `duration`, which respectively represent the distance and the duration of a walk. The source is a simplified version of the data that is tracked by smart watches.
```js
const { Pool } = require('pg')
@@ -85,11 +85,11 @@ const start = async () => {
start().catch(console.error);
```
-:::note
+
All the code examples in this guide include a section for connecting to RisingWave. If you run multiple queries within one connection session, you do not need to repeat the connection code.
-:::
+
## Create a materialized view
diff --git a/client-libraries/python.mdx b/client-libraries/python.mdx
index f634b6bb..3f5dab87 100644
--- a/client-libraries/python.mdx
+++ b/client-libraries/python.mdx
@@ -13,7 +13,7 @@ In this section, we use the [`psycopg2`](https://pypi.org/project/psycopg2/) dri
### Run RisingWave
-To learn about how to run RisingWave, see [Run RisingWave](../get-started/quickstart.mdx).
+To learn about how to run RisingWave, see [Run RisingWave](/get-started/quickstart).
### Install the `psgcopg2` driver
diff --git a/client-libraries/ruby.mdx b/client-libraries/ruby.mdx
index 5504f1fe..165cbe5d 100644
--- a/client-libraries/ruby.mdx
+++ b/client-libraries/ruby.mdx
@@ -8,7 +8,7 @@ In this guide, we use the [`ruby-pg`](https://github.com/ged/ruby-pg) driver to
## Run RisingWave
-To learn about how to run RisingWave, see [Run RisingWave](../get-started/quickstart.mdx).
+To learn about how to run RisingWave, see [Run RisingWave](/get-started/quickstart).
## Install the `ruby-pg` driver
diff --git a/cloud/choose-a-project-plan.mdx b/cloud/choose-a-project-plan.mdx
index d0ad11ef..4a4625dc 100644
--- a/cloud/choose-a-project-plan.mdx
+++ b/cloud/choose-a-project-plan.mdx
@@ -79,7 +79,7 @@ You can choose the availability region closest to you to minimize latency.
Name of the project. Assigning a descriptive name to each project can be helpful when managing multiple projects.
* **Node configuration**
Configure each node's instance resources and numbers according to your actual needs.
-To learn more about the nodes, see the [architecture of RisingWave](/docs/current/architecture/).
+To learn more about the nodes, see the [architecture of RisingWave](/reference/architecture).
## Understanding nodes in RisingWave
@@ -91,7 +91,7 @@ RisingWave projects consist of three types of nodes, each serving a distinct rol
4. **Meta node**: Takes charge of managing the metadata of compute and compact nodes and orchestrating operations across the system.
5. **ETCD**: A distributed key-value store that provides a reliable way to store data across a project of machines. This node cannot be scaled manually after the project is created.
-For the architecture of RisingWave, see [RisingWave architecture](/docs/current/architecture/).
+For the architecture of RisingWave, see [RisingWave architecture](/reference/architecture).
## Pricing
diff --git a/cloud/develop-overview.mdx b/cloud/develop-overview.mdx
index e06c3eff..9efd8e46 100644
--- a/cloud/develop-overview.mdx
+++ b/cloud/develop-overview.mdx
@@ -50,7 +50,7 @@ Select the version of the corresponding docs when using the RisingWave user docs
See how RisingWave can integrate with your existing data stack. Vote for your favorite data tools and streaming services to help us prioritize the integration development. Connect to and ingest data from external sources such as databases and message brokers. See supported data sources.
- Stream processed data out of RisingWave to message brokers and databases. See supported data destinations.
+ Stream processed data out of RisingWave to message brokers and databases. See supported data destinations.
### Process data with RisingWave
@@ -119,7 +119,7 @@ Continue to learn about RisingWave.
RisingWave vs. Apache Flink
diff --git a/cloud/manage-sinks.mdx b/cloud/manage-sinks.mdx
index 0c60cee9..d902d906 100644
--- a/cloud/manage-sinks.mdx
+++ b/cloud/manage-sinks.mdx
@@ -3,7 +3,7 @@ title: "Manage sinks"
description: "To stream data out of RisingWave, you must create a sink. A sink refers to an external target that you can send data to. You can deliver data to downstream systems via our sink connectors."
---
-For the complete list of supported sink connectors and data formats, see [Data delivery](/docs/current/data-delivery/) in the RisingWave documentation.
+For the complete list of supported sink connectors and data formats, see [Data delivery](/delivery/overview) in the RisingWave documentation.
## Create a sink
diff --git a/cloud/project-byoc.mdx b/cloud/project-byoc.mdx
index 4e5ab6a8..a0a23d82 100644
--- a/cloud/project-byoc.mdx
+++ b/cloud/project-byoc.mdx
@@ -21,7 +21,7 @@ Follow the steps below to create your own cloud environment.
When you run the command `rwc byoc apply --name xxx`, it will deploy some resources in your AWS/GCP/Azure environment, such as AWS S3/Google Cloud Storage/Azure Blob Storage and EKS/GKE/AKS clusters. Please do not modify the configuration of these resources. If you encounter any issues during this process, please contact our [support team](mailto:cloud-support@risingwave-labs.com).
-5. Click **Next** to continue the configuration of cluster size and nodes. To learn more about the nodes, see the [architecture of RisingWave](/docs/current/architecture/).
+5. Click **Next** to continue the configuration of cluster size and nodes. To learn more about the nodes, see the [architecture of RisingWave](/reference/architecture).
6. Click **Next**, name your cluster, and execute the command that pops up to establish a BYOC cluster in your environment.
Once the cluster is successfully created, you can manage it through the portal just like hosted clusters.
diff --git a/delivery/overview.mdx b/delivery/overview.mdx
index bab9f6a9..2e8ee9cd 100644
--- a/delivery/overview.mdx
+++ b/delivery/overview.mdx
@@ -13,11 +13,11 @@ Currently, RisingWave supports the following sink connectors:
* Apache Doris sink connector (`connector = 'doris'`)
With this connector, you can sink data from RisingWave to Apache Doris. For details about the syntax and parameters, see [Sink data to Apache Doris](/docs/current/sink-to-doris/).
* Apache Iceberg sink connector (`connector = 'iceberg'`)
-With this connector, you can sink data from RisingWave to Apache Iceberg. For details about the syntax and parameters, see [Sink data to Apache Iceberg](/docs/current/sink-to-iceberg/).
+With this connector, you can sink data from RisingWave to Apache Iceberg. For details about the syntax and parameters, see [Sink data to Apache Iceberg](/integrations/destinations/apache-iceberg).
* AWS Kinesis sink connector (`connector = 'kinesis'`)
-With this connector, you can sink data from RisingWave to AWS Kinesis. For details about the syntax and parameters, see [Sink data to AWS Kinesis](/docs/current/sink-to-aws-kinesis/).
+With this connector, you can sink data from RisingWave to AWS Kinesis. For details about the syntax and parameters, see [Sink data to AWS Kinesis](/integrations/destinations/aws-kinesis).
* Cassandra and ScyllaDB sink connector (`connector = 'cassandra'`)
-With this connector, you can sink data from RisingWave to Cassandra or ScyllaDB. For details about the syntax and parameters, see [Sink data to Cassandra or ScyllaDB](/docs/current/sink-to-cassandra/).
+With this connector, you can sink data from RisingWave to Cassandra or ScyllaDB. For details about the syntax and parameters, see [Sink data to Cassandra or ScyllaDB](/integrations/destinations/cassandra-or-scylladb).
* ClickHouse sink connector (`connector = 'clickhouse'`)
With this connector, you can sink data from RisingWave to ClickHouse. For details about the syntax and parameters, see [Sink data to ClickHouse](/docs/current/sink-to-clickhouse/).
* CockroachDB sink connector (`connector = 'jdbc'`)
@@ -25,9 +25,9 @@ With this connector, you can sink data from RisingWave to CockroachDB. For detai
* Delta Lake sink connector (`connector = 'deltalake'`)
With this connector, you can sink data from RisingWave to Delta Lake. For details about the syntax and parameters, see [Sink data to Delta Lake](/docs/current/sink-to-delta-lake/).
* Elasticsearch sink connector (`connector = 'elasticsearch'`)
-With this connector, you can sink data from RisingWave to Elasticsearch. For details about the syntax and parameters, see [Sink data to Elasticsearch](/docs/current/sink-to-elasticsearch/).
+With this connector, you can sink data from RisingWave to Elasticsearch. For details about the syntax and parameters, see [Sink data to Elasticsearch](/integrations/destinations/elasticsearch).
* Google BigQuery sink connector (`connector = 'bigquery'`)
-With this connector, you can sink data from RisingWave to Google BigQuery. For details about the syntax and parameters, see [Sink data to Google BigQuery](/docs/current/sink-to-bigquery/).
+With this connector, you can sink data from RisingWave to Google BigQuery. For details about the syntax and parameters, see [Sink data to Google BigQuery](/integrations/destinations/bigquery).
* Google Pub/Sub sink connector (`connector = 'google_pubsub'`)
With this connector, you can sink data from RisingWave to Google Pub/Sub. For details about the syntax and parameters, see [Sink data to Google Pub/Sub](/docs/current/sink-to-google-pubsub/).
* JDBC sink connector for MySQL, PostgreSQL, or TiDB (`connector = 'jdbc'`)
@@ -37,13 +37,13 @@ With this connector, you can sink data from RisingWave to Kafka topics. For deta
* MQTT sink connector (`connector = 'mqtt'`)
With this connector, you can sink data from RisingWave to MQTT topics. For details about the syntax and parameters, see [Sink data to MQTT](/docs/current/sink-to-mqtt/).
* NATS sink connector (`connector = 'nats'`)
-With this connector, you can sink data from RisingWave to NATS. For details about the syntax and parameters, see [Sink data to NATS](/docs/current/sink-to-nats/).
+With this connector, you can sink data from RisingWave to NATS. For details about the syntax and parameters, see [Sink data to NATS](/integrations/destinations/nats-and-nats-jetstream).
* Pulsar sink connector (`connector = 'pulsar'`)
-With this connector, you can sink data from RisingWave to Pulsar. For details about the syntax and parameters, see [Sink data to Pulsar](/docs/current/sink-to-pulsar/).
+With this connector, you can sink data from RisingWave to Pulsar. For details about the syntax and parameters, see [Sink data to Pulsar](/integrations/destinations/apache-pulsar).
* Redis sink connector (`connector = 'redis'`)
With this connector, you can sink data from RisingWave to Redis. For details about the syntax and parameters, see [Sink data to Redis](/docs/current/sink-to-redis/).
* Snowflake sink connector (`connector = 'snowflake'`)
-With this connector, you can sink data from RisingWave to Snowflake. For details about the syntax and parameters, see [Sink data to Snowflake](/docs/current/sink-to-snowflake/).
+With this connector, you can sink data from RisingWave to Snowflake. For details about the syntax and parameters, see [Sink data to Snowflake](/integrations/destinations/snowflake).
* StarRocks sink connector (`connector = 'starrocks'`)
With this connector, you can sink data from RisingWave to StarRocks. For details about the syntax and parameters, see [Sink data to StarRocks](/docs/current/sink-to-starrocks/).
* Microsoft SQL Server sink connector(`connector = 'sqlserver'`)
@@ -55,6 +55,12 @@ Typically, sinks in RisingWave operate in a blocking manner. This means that if
Sink decoupling introduces a buffering queue between a RisingWave sink and the downstream system. This buffering mechanism helps maintain the stability and performance of the RisingWave instance, even when the downstream system is temporarily slow or unavailable.
+
+**PUBLIC PREVIEW**
+
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+
+
The `sink_decouple` session variable can be specified to enable or disable sink decoupling. The default value for the session variable is `default`.
To enable sink decoupling for all sinks created in the sessions, set `sink_decouple` as `true` or `enable`.
diff --git a/deploy/risingwave-kubernetes.mdx b/deploy/risingwave-kubernetes.mdx
index be451032..2d7855b5 100644
--- a/deploy/risingwave-kubernetes.mdx
+++ b/deploy/risingwave-kubernetes.mdx
@@ -555,4 +555,4 @@ psql -h ${RISINGWAVE_HOST} -p ${RISINGWAVE_PORT} -d dev -U root
-Now you can ingest and transform streaming data. See [Quick start](/docs/current/get-started/) for details.
+Now you can ingest and transform streaming data. See [Quick start](/get-started/quickstart) for details.
diff --git a/faq/faq-using-risingwave.mdx b/faq/faq-using-risingwave.mdx
index afc96857..44d0156d 100644
--- a/faq/faq-using-risingwave.mdx
+++ b/faq/faq-using-risingwave.mdx
@@ -47,7 +47,7 @@ By continuously improving the reserved memory feature, we strive to offer a more
The execution time for the `CREATE MATERIALIZED VIEW` statement can vary based on several factors. Here are two common reasons:
-1. **Backfilling of historical data**: RisingWave ensures consistent snapshots across materialized views (MVs). So when a new MV is created, it backfills all historical data from the upstream MV or tables and calculate them, which takes some time. And the created DDL statement will only end when the backfill ends. You can run `SHOW JOBS;` in SQL to check the DDL progress. If you want the create statement to not wait for the process to finish and not block the session, you can execute `SET BACKGROUND_DDL=true;` before running the `CREATE MATERIALIZED VIEW` statement. See details in [SET BACKGROUND\_DDL](/docs/current/sql-set-background-ddl/). But please notice that the newly created MV is still invisible in the catalog until the end of backfill when `BACKGROUND_DDL=true`.
+1. **Backfilling of historical data**: RisingWave ensures consistent snapshots across materialized views (MVs). So when a new MV is created, it backfills all historical data from the upstream MV or tables and calculate them, which takes some time. And the created DDL statement will only end when the backfill ends. You can run `SHOW JOBS;` in SQL to check the DDL progress. If you want the create statement to not wait for the process to finish and not block the session, you can execute `SET BACKGROUND_DDL=true;` before running the `CREATE MATERIALIZED VIEW` statement. See details in [SET BACKGROUND\_DDL](/sql/commands/sql-set-background-ddl). But please notice that the newly created MV is still invisible in the catalog until the end of backfill when `BACKGROUND_DDL=true`.
2. **High cluster latency**: If the cluster experiences high latency, it may take longer to apply changes to the streaming graph. If the `Progress` in the `SHOW JOBS;` result stays at 0.0%, high latency could be the cause. See details in [Troubleshoot high latency](/docs/current/troubleshoot-high-latency/)
diff --git a/get-started/intro.mdx b/get-started/intro.mdx
index 2b0045ab..ddbb74d4 100644
--- a/get-started/intro.mdx
+++ b/get-started/intro.mdx
@@ -74,7 +74,7 @@ RisingWave aims to help simplify event-driven architecture. You can think of Ris
-
+
diff --git a/get-started/rw-premium-edition-intro.mdx b/get-started/rw-premium-edition-intro.mdx
index 3dd36074..69fa5f69 100644
--- a/get-started/rw-premium-edition-intro.mdx
+++ b/get-started/rw-premium-edition-intro.mdx
@@ -18,17 +18,17 @@ RisingWave Premium 1.0 is the first major release of this new edition with sever
### SQL and security
-
+
### Schema management
-* Automatic schema mapping to the source tables for [PostgreSQL CDC](/docs/current/ingest-from-postgres-cdc/#automatically-map-upstream-table-schema) and [MySQL CDC](/docs/current/ingest-from-mysql-cdc/#automatically-map-upstream-table-schema)
-* [Automatic schema change for MySQL CDC](/docs/current/ingest-from-mysql-cdc/#automatically-change-schema)
+* Automatic schema mapping to the source tables for [PostgreSQL CDC](/integrations/sources/postgresql-cdc#automatically-map-upstream-table-schema) and [MySQL CDC](/integrations/sources/mysql-cdc#automatically-map-upstream-table-schema)
+* [Automatic schema change for MySQL CDC](/integrations/sources/mysql-cdc#automatically-change-schema)
* [AWS Glue Schema Registry](/docs/current/ingest-from-kafka/#read-schemas-from-aws-glue-schema-registry)
### Connectors
-
+
For users who are already using these features in 1.9.x or earlier versions, rest assured that the functionality of these features will be intact if you stay on the version. If you choose to upgrade to v2.0 or later versions, an error will show up to indicate you need a license to use the features.
diff --git a/ingestion/change-data-capture-with-risingwave.mdx b/ingestion/change-data-capture-with-risingwave.mdx
index 1cc4e9c3..83f5bb22 100644
--- a/ingestion/change-data-capture-with-risingwave.mdx
+++ b/ingestion/change-data-capture-with-risingwave.mdx
@@ -7,6 +7,6 @@ mode: wide
You can use event streaming systems like Apache Kafka, Pulsar, or Kinesis to stream changes from MySQL, PostgreSQL, and TiDB to RisingWave. In this case, you will need an additional CDC tool to stream the changes from the database and specify the corresponding formats when ingesting the streams into RisingWave.
-RisingWave also provides native MySQL and PostgreSQL CDC connectors. With these CDC connectors, you can ingest CDC data from these databases directly, without setting up additional services like Kafka. For complete step-to-step guides about using the native CDC connector to ingest MySQL and PostgreSQL data, see [Ingest data from MySQL](/docs/current/ingest-from-mysql-cdc/) and [Ingest data from PostgreSQL](/docs/current/ingest-from-postgres-cdc/). This topic only describes the configurations for using RisingWave to ingest CDC data from an event streaming system.
+RisingWave also provides native MySQL and PostgreSQL CDC connectors. With these CDC connectors, you can ingest CDC data from these databases directly, without setting up additional services like Kafka. For complete step-to-step guides about using the native CDC connector to ingest MySQL and PostgreSQL data, see [Ingest data from MySQL](/integrations/sources/mysql-cdc) and [Ingest data from PostgreSQL](/integrations/sources/postgresql-cdc). This topic only describes the configurations for using RisingWave to ingest CDC data from an event streaming system.
For the supported sources and corresponding formats, see [Supported sources and formats](/docs/current/supported-sources-and-formats/).
diff --git a/ingestion/overview.mdx b/ingestion/overview.mdx
index 50cfc66a..0a5e29cf 100644
--- a/ingestion/overview.mdx
+++ b/ingestion/overview.mdx
@@ -69,7 +69,7 @@ WITH (
The statement will create a streaming job that continuously ingests data from the Kafka topic to the table and the data will be stored in RisingWave's internal storage, which brings the following benefits:
1. **Improved ad-hoc query performance:** When users execute queries such as `SELECT * FROM table_on_kafka`, the query engine will directly access the data from RisingWave's internal storage, eliminating unnecessary network overhead and avoiding read pressure on upstream systems. Additionally, users can create [indexes](/docs/current/indexes/) on the table to accelerate queries.
-2. **Allow defining primary keys:** With the help of its internal storage, RisingWave can efficiently maintain primary key constraints. Users can define a primary key on a specific column of the table and define different behaviors for primary key conflicts with [ON CONFLICT clause](/docs/current/sql-create-table/#pk-conflict-behavior).
+2. **Allow defining primary keys:** With the help of its internal storage, RisingWave can efficiently maintain primary key constraints. Users can define a primary key on a specific column of the table and define different behaviors for primary key conflicts with [ON CONFLICT clause](/sql/commands/sql-create-table#pk-conflict-behavior).
3. **Ability to handle delete/update changes**: Based on the definition of primary keys, RisingWave can efficiently process upstream synchronized delete and update operations. For systems that synchronize delete/update operations from external systems, such as database's CDC and UPSERT format messages from message queues, we **do not** allow creating a source on it but require a table with connectors.
4. **Stronger consistency guarantee**: When using a table with connectors, all downstream jobs will be guaranteed to have a consistent view of the data persisted in the table; while for source, different jobs may see inconsistent results due to different ingestion speed or data retention in the external system.
5. **Greater flexibility**: Like regular tables, you can use DML statements like [INSERT](/docs/current/sql-insert/), [UPDATE](/docs/current/sql-update/) and [DELETE](/docs/current/sql-delete/) to insert or modify data in tables with connectors, and use [CREATE SINK INTO TABLE](/docs/current/sql-create-sink-into/) to merge other data streams into the table.
@@ -78,7 +78,7 @@ The statement will create a streaming job that continuously ingests data from th
### Insert data into tables
-You can load data in batch to RisingWave by creating a table ([CREATE TABLE](/docs/current/sql-create-table/)) and then inserting data into it ([INSERT](/docs/current/sql-insert/)). For example, the statement below creates a table `website_visits` and inserts 5 rows of data.
+You can load data in batch to RisingWave by creating a table ([CREATE TABLE](/sql/commands/sql-create-table)) and then inserting data into it ([INSERT](/docs/current/sql-insert/)). For example, the statement below creates a table `website_visits` and inserts 5 rows of data.
```sql
CREATE TABLE website_visits (
diff --git a/ingestion/supported-sources-and-formats.mdx b/ingestion/supported-sources-and-formats.mdx
index f51e89ea..a6a644e0 100644
--- a/ingestion/supported-sources-and-formats.mdx
+++ b/ingestion/supported-sources-and-formats.mdx
@@ -14,12 +14,12 @@ To ingest data in formats marked with "T", you need to create tables (with conne
| :------------ | :------------ | :------------------- |
| [Kafka](/docs/current/ingest-from-kafka/) | 3.1.0 or later versions | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Debezium AVRO](#debezium-avro) (T), [DEBEZIUM\_MONGO\_JSON](#debezium-mongo-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T), [Upsert JSON](#upsert-json) (T), [Upsert AVRO](#upsert-avro) (T), [Bytes](#bytes) |
| [Redpanda](/docs/current/ingest-from-redpanda/) | Latest | [Avro](#avro), [JSON](#json), [protobuf](#protobuf) |
-| [Pulsar](/docs/current/ingest-from-pulsar/) | 2.8.0 or later versions | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) |
+| [Pulsar](/integrations/sources/pulsar) | 2.8.0 or later versions | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) |
| [Kinesis](/docs/current/ingest-from-kinesis/) | Latest | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) |
-| [PostgreSQL CDC](/docs/current/ingest-from-postgres-cdc/) | 10, 11, 12, 13, 14 | [Debezium JSON](#debezium-json) (T) |
-| [MySQL CDC](/docs/current/ingest-from-mysql-cdc/) | 5.7, 8.0 | [Debezium JSON](#debezium-json) (T) |
+| [PostgreSQL CDC](/integrations/sources/postgresql-cdc) | 10, 11, 12, 13, 14 | [Debezium JSON](#debezium-json) (T) |
+| [MySQL CDC](/integrations/sources/mysql-cdc) | 5.7, 8.0 | [Debezium JSON](#debezium-json) (T) |
| [CDC via Kafka](/docs/current/ingest-from-cdc/) | [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) | |
-| [Amazon S3](/docs/current/ingest-from-s3/) | Latest | [JSON](#json), CSV |
+| [Amazon S3](/integrations/sources/s3) | Latest | [JSON](#json), CSV |
| [Load generator](/docs/current/ingest-from-datagen/) | Built-in | [JSON](#json) |
| [Google Pub/Sub](/docs/current/ingest-from-google-pubsub/) | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) | |
| [Google Cloud Storage](/docs/current/ingest-from-gcs/) | [JSON](#json) | |
@@ -53,7 +53,7 @@ ENCODE AVRO (
)
```
-You can ingest Avro map type into RisingWave [map type](/docs/current/data-type-map/) or jsonb:
+You can ingest Avro map type into RisingWave [map type](/sql/data-types/map-type) or jsonb:
```sql
FORMAT [ DEBEZIUM | UPSERT | PLAIN ] ENCODE AVRO (
diff --git a/integrations/destinations/apache-iceberg.mdx b/integrations/destinations/apache-iceberg.mdx
index 81839525..02e7ee5e 100644
--- a/integrations/destinations/apache-iceberg.mdx
+++ b/integrations/destinations/apache-iceberg.mdx
@@ -251,7 +251,7 @@ WITH (
) FORMAT PLAIN ENCODE JSON;
```
-Another option is to create an upsert table, which supports in-place updates. For more details on creating a table, see [CREATE TABLE](/docs/current/sql-create-table/) .
+Another option is to create an upsert table, which supports in-place updates. For more details on creating a table, see [CREATE TABLE](/sql/commands/sql-create-table) .
```sql
CREATE TABLE s1_table (
diff --git a/integrations/destinations/apache-kafka.mdx b/integrations/destinations/apache-kafka.mdx
index 6057407c..b299beae 100644
--- a/integrations/destinations/apache-kafka.mdx
+++ b/integrations/destinations/apache-kafka.mdx
@@ -78,7 +78,7 @@ These options should be set in `FORMAT data_format ENCODE data_encode (key = 'va
| Field | Notes |
| :------------------------ | :-------------------------- |
-| data\_format | Data format. Allowed formats:
`PLAIN`: Output data with insert operations.
`DEBEZIUM`: Output change data capture (CDC) log in Debezium format.
`UPSERT`: Output data as a changelog stream. `primary_key` must be specified in this case.
To learn about when to define the primary key if creating an UPSERT sink, see the [Overview](/docs/current/data-delivery/). |
+| data\_format | Data format. Allowed formats:
`PLAIN`: Output data with insert operations.
`DEBEZIUM`: Output change data capture (CDC) log in Debezium format.
`UPSERT`: Output data as a changelog stream. `primary_key` must be specified in this case.
To learn about when to define the primary key if creating an UPSERT sink, see the [Overview](/delivery/overview). |
| data\_encode | Data encode. Allowed encodes:
`JSON`: Supports `PLAIN JSON`, `UPSERT JSON` and `DEBEZIUM JSON` sinks.
`AVRO`: Supports `UPSERT AVRO` and `PLAIN AVRO` sinks.
`PROTOBUF`: Supports `PLAIN PROTOBUF` and `UPSERT PROTOBUF` sinks.
For `UPSERT PROTOBUF` sinks, you must specify `key encode text`, while it remains optional for other format/encode combinations. |
| force\_append\_only | If true, forces the sink to be `PLAIN` (also known as append-only), even if it cannot be. |
| timestamptz.handling.mode | Controls the timestamptz output format. This parameter specifically applies to append-only or upsert sinks using JSON encoding.
If omitted, the output format of timestamptz is `2023-11-11T18:30:09.453000Z` which includes the UTC suffix `Z`.
When `utc_without_suffix` is specified, the format is changed to `2023-11-11 18:30:09.453000`.
|
diff --git a/integrations/destinations/apache-pulsar.mdx b/integrations/destinations/apache-pulsar.mdx
index 9c58ec36..47031d8f 100644
--- a/integrations/destinations/apache-pulsar.mdx
+++ b/integrations/destinations/apache-pulsar.mdx
@@ -59,7 +59,7 @@ These options should be set in `FORMAT data_format ENCODE data_encode (key = 'va
| Field | Notes |
| :------------------------ | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| data\_format | Data format. Allowed formats:
`PLAIN`: Output data with insert operations.
`DEBEZIUM`: Output change data capture (CDC) log in Debezium format.
`UPSERT`: Output data as a changelog stream. `primary_key` must be specified in this case.
To learn about when to define the primary key if creating an UPSERT sink, see the [Overview](/docs/current/data-delivery/). |
+| data\_format | Data format. Allowed formats:
`PLAIN`: Output data with insert operations.
`DEBEZIUM`: Output change data capture (CDC) log in Debezium format.
`UPSERT`: Output data as a changelog stream. `primary_key` must be specified in this case.
To learn about when to define the primary key if creating an UPSERT sink, see the [Overview](/delivery/overview). |
| data\_encode | Data encode. Supported encode: JSON. |
| force\_append\_only | If true, forces the sink to be PLAIN (also known as append-only), even if it cannot be. |
| timestamptz.handling.mode | Controls the timestamptz output format. This parameter specifically applies to append-only or upsert sinks using JSON encoding.
If omitted, the output format of timestamptz is `2023-11-11T18:30:09.453000Z` which includes the UTC suffix `Z`.
When `utc_without_suffix` is specified, the format is changed to `2023-11-11 18:30:09.453000`.
|
diff --git a/integrations/destinations/aws-kinesis.mdx b/integrations/destinations/aws-kinesis.mdx
index 6d5eacb1..96e87eb5 100644
--- a/integrations/destinations/aws-kinesis.mdx
+++ b/integrations/destinations/aws-kinesis.mdx
@@ -51,7 +51,7 @@ These options should be set in `FORMAT data_format ENCODE data_encode (key = 'va
| Field | Notes |
| :---------------------------------- | :--------------------- |
-| data\_format | Data format. Allowed formats:
`PLAIN`: Output data with insert operations.
`DEBEZIUM`: Output change data capture (CDC) log in Debezium format.
`UPSERT`: Output data as a changelog stream. `primary_key` must be specified in this case.
To learn about when to define the primary key if creating an UPSERT sink, see the [Overview](/docs/current/data-delivery/). |
+| data\_format | Data format. Allowed formats:
`PLAIN`: Output data with insert operations.
`DEBEZIUM`: Output change data capture (CDC) log in Debezium format.
`UPSERT`: Output data as a changelog stream. `primary_key` must be specified in this case.
To learn about when to define the primary key if creating an UPSERT sink, see the [Overview](/delivery/overview). |
| data\_encode | Data encode. Supported encode: `JSON`. |
| force\_append\_only | If `true`, forces the sink to be `PLAIN` (also known as `append-only`), even if it cannot be. |
| timestamptz.handling.mode | Controls the timestamptz output format. This parameter specifically applies to append-only or upsert sinks using JSON encoding.
If omitted, the output format of timestamptz is `2023-11-11T18:30:09.453000Z` which includes the UTC suffix `Z`.
When `utc_without_suffix` is specified, the format is changed to `2023-11-11 18:30:09.453000`.
|
diff --git a/integrations/destinations/aws-s3.mdx b/integrations/destinations/aws-s3.mdx
index 6d9fabea..28111b7b 100644
--- a/integrations/destinations/aws-s3.mdx
+++ b/integrations/destinations/aws-s3.mdx
@@ -48,4 +48,4 @@ WITH (
)FORMAT PLAIN ENCODE PARQUET(force_append_only=true);
```
-For more information about encode `Parquet` or `JSON`, see [Sink data in parquet or json encode](/docs/current/data-delivery/).
\ No newline at end of file
+For more information about encode `Parquet` or `JSON`, see [Sink data in parquet or json encode](/delivery/overview).
\ No newline at end of file
diff --git a/integrations/destinations/azure-blob.mdx b/integrations/destinations/azure-blob.mdx
index 440620f9..d681410c 100644
--- a/integrations/destinations/azure-blob.mdx
+++ b/integrations/destinations/azure-blob.mdx
@@ -52,4 +52,4 @@ WITH (
)FORMAT PLAIN ENCODE PARQUET(force_append_only=true);
```
-For more information about encode `Parquet` or `JSON`, see [Sink data in parquet or json encode](/docs/current/data-delivery/).
+For more information about encode `Parquet` or `JSON`, see [Sink data in parquet or json encode](/delivery/overview).
diff --git a/integrations/destinations/clickhouse.mdx b/integrations/destinations/clickhouse.mdx
index d72fa9d4..7dee26f5 100644
--- a/integrations/destinations/clickhouse.mdx
+++ b/integrations/destinations/clickhouse.mdx
@@ -30,7 +30,7 @@ WITH (
| Parameter Names | Description |
| :--------------------------- | :------------------- |
-| type | Required. Specify if the sink should be upsert or append-only. If creating an upsert sink, see the [Overview](/docs/current/data-delivery/) on when to define the primary key and [Upsert sinks](#upsert-sinks) on limitations. |
+| type | Required. Specify if the sink should be upsert or append-only. If creating an upsert sink, see the [Overview](/delivery/overview) on when to define the primary key and [Upsert sinks](#upsert-sinks) on limitations. |
| primary\_key | Optional. A string of a list of column names, separated by commas, that specifies the primary key of the ClickHouse sink. |
| clickhouse.url | Required. Address of the ClickHouse server that you want to sink data to. Format: `http://ip:port`. The default port is 8123. |
| clickhouse.user | Required. User name for accessing the ClickHouse server. |
@@ -92,7 +92,7 @@ WITH (
) FORMAT PLAIN ENCODE JSON;
```
-Another option is to create an upsert table, which supports in-place updates. For more details on creating a table, see [CREATE TABLE](/docs/current/sql-create-table/) .
+Another option is to create an upsert table, which supports in-place updates. For more details on creating a table, see [CREATE TABLE](/sql/commands/sql-create-table) .
```sql
CREATE TABLE s1_table (
diff --git a/integrations/destinations/delta-lake.mdx b/integrations/destinations/delta-lake.mdx
index e2242816..61539350 100644
--- a/integrations/destinations/delta-lake.mdx
+++ b/integrations/destinations/delta-lake.mdx
@@ -78,7 +78,7 @@ WITH (
) FORMAT PLAIN ENCODE JSON;
```
-You can also choose to create an upsert table, which supports in-place updates. For more details on creating a table, see [CREATE TABLE](/docs/current/sql-create-table/).
+You can also choose to create an upsert table, which supports in-place updates. For more details on creating a table, see [CREATE TABLE](/sql/commands/sql-create-table).
```sql
CREATE TABLE s1_table (id int, name varchar)
diff --git a/integrations/destinations/google-cloud-storage.mdx b/integrations/destinations/google-cloud-storage.mdx
index cba12043..e159cbea 100644
--- a/integrations/destinations/google-cloud-storage.mdx
+++ b/integrations/destinations/google-cloud-storage.mdx
@@ -43,4 +43,4 @@ WITH (
)FORMAT PLAIN ENCODE PARQUET(force_append_only=true);
```
-For more information about encode `Parquet` or `JSON`, see [Sink data in parquet or json encode](/docs/current/data-delivery/).
\ No newline at end of file
+For more information about encode `Parquet` or `JSON`, see [Sink data in parquet or json encode](/delivery/overview).
\ No newline at end of file
diff --git a/integrations/destinations/mysql.mdx b/integrations/destinations/mysql.mdx
index a15d9aa6..d94c049b 100644
--- a/integrations/destinations/mysql.mdx
+++ b/integrations/destinations/mysql.mdx
@@ -95,7 +95,7 @@ CREATE TABLE personnel (
### Install and launch RisingWave
-To install and start RisingWave locally, see the [Get started](/docs/current/get-started/) guide. We recommend running RisingWave locally for testing purposes.
+To install and start RisingWave locally, see the [Get started](/get-started/quickstart) guide. We recommend running RisingWave locally for testing purposes.
### Notes about running RisingWave from binaries
If you are running RisingWave locally from binaries and intend to use the native CDC source connectors or the JDBC sink connector, make sure you have [JDK 11](https://openjdk.org/projects/jdk/11/) or later versions installed in your environment.
@@ -180,7 +180,7 @@ SELECT * FROM personnel;
## Data type mapping
-For the MySQL data type mapping table, see the [Data type mapping table](/docs/current/ingest-from-mysql-cdc/#data-type-mapping) under the Ingest data from MySQL CDC topic.
+For the MySQL data type mapping table, see the [Data type mapping table](/integrations/sources/mysql-cdc#data-type-mapping) under the Ingest data from MySQL CDC topic.
Additional notes regarding sinking data to MySQL:
diff --git a/integrations/destinations/postgresql.mdx b/integrations/destinations/postgresql.mdx
index fd655579..ec5cc673 100644
--- a/integrations/destinations/postgresql.mdx
+++ b/integrations/destinations/postgresql.mdx
@@ -69,7 +69,7 @@ CREATE TABLE target_count (
### Install and launch RisingWave
-To install and start RisingWave locally, see the [Get started](/docs/current/get-started/) guide. We recommend running RisingWave locally for testing purposes.
+To install and start RisingWave locally, see the [Get started](/get-started/quickstart) guide. We recommend running RisingWave locally for testing purposes.
### Notes about running RisingWave from binaries
@@ -168,7 +168,7 @@ LIMIT 10;
## Data type mapping
-For the PostgreSQL data type mapping table, see the [Data type mapping table](/docs/current/ingest-from-postgres-cdc/#data-type-mapping) under the Ingest data from PostgreSQL CDC topic.
+For the PostgreSQL data type mapping table, see the [Data type mapping table](/integrations/sources/postgresql-cdc#data-type-mapping) under the Ingest data from PostgreSQL CDC topic.
Additional notes regarding sinking data to PostgreSQL:
diff --git a/integrations/destinations/webhdfs.mdx b/integrations/destinations/webhdfs.mdx
index c3468368..6e133cbf 100644
--- a/integrations/destinations/webhdfs.mdx
+++ b/integrations/destinations/webhdfs.mdx
@@ -39,4 +39,4 @@ WITH (
)FORMAT PLAIN ENCODE PARQUET(force_append_only=true);
```
-For more information about encode `Parquet` or `JSON`, see [Sink data in parquet or json encode](/docs/current/data-delivery/).
\ No newline at end of file
+For more information about encode `Parquet` or `JSON`, see [Sink data in parquet or json encode](/delivery/overview).
\ No newline at end of file
diff --git a/integrations/sources/amazon-msk.mdx b/integrations/sources/amazon-msk.mdx
index 12f5aaf3..1e005647 100644
--- a/integrations/sources/amazon-msk.mdx
+++ b/integrations/sources/amazon-msk.mdx
@@ -153,7 +153,7 @@ After entering messages, you can close the console window or press Ctrl + C to e
### Install and launch RisingWave
-See [Quick start](/docs/current/get-started/) for options on how you can run RisingWave.
+See [Quick start](/get-started/quickstart) for options on how you can run RisingWave.
### Connect the cluster[](#connect-the-cluster "Direct link to Connect the cluster")
diff --git a/integrations/sources/apache-iceberg.mdx b/integrations/sources/apache-iceberg.mdx
index 7be6ffa8..0d362d20 100644
--- a/integrations/sources/apache-iceberg.mdx
+++ b/integrations/sources/apache-iceberg.mdx
@@ -225,7 +225,7 @@ SELECT * FROM t FOR SYSTEM_TIME AS OF '2024-04-03 08:54:22.488+00:00';
## Examples[](#examples "Direct link to Examples")
-Firstly, create an append-only Iceberg table, see [Append-only sink from upsert source](/docs/current/sink-to-iceberg/#append-only-sink-from-upsert-source) for details.
+Firstly, create an append-only Iceberg table, see [Append-only sink from upsert source](/integrations/destinations/apache-iceberg#append-only-sink-from-upsert-source) for details.
```sql Secondly, create an Iceberg source:
CREATE SOURCE iceberg_source
diff --git a/integrations/sources/confluent-cloud.mdx b/integrations/sources/confluent-cloud.mdx
index aead8f6e..05ca26d3 100644
--- a/integrations/sources/confluent-cloud.mdx
+++ b/integrations/sources/confluent-cloud.mdx
@@ -46,7 +46,7 @@ Note that you will need the API key when creating a Kafka source in RisingWave.
### Run RisingWave
-To start RisingWave, see the [Get started](/docs/current/get-started/) guide.
+To start RisingWave, see the [Get started](/get-started/quickstart) guide.
### Connect to the data stream
diff --git a/integrations/sources/instaclustr-kafka.mdx b/integrations/sources/instaclustr-kafka.mdx
index 898b3b0f..034d9ce6 100644
--- a/integrations/sources/instaclustr-kafka.mdx
+++ b/integrations/sources/instaclustr-kafka.mdx
@@ -43,7 +43,7 @@ After these steps, you are on your way to build stream processing applications a
### Create a RisingWave project
-You can create a RisingWave project and connect to it by following the steps in the [Quick Start](/docs/current/get-started/) in the RisingWave documentation.
+You can create a RisingWave project and connect to it by following the steps in the [Quick Start](/get-started/quickstart) in the RisingWave documentation.
### Create a source
diff --git a/integrations/sources/mysql-cdc.mdx b/integrations/sources/mysql-cdc.mdx
index f1ea39f2..4aa58ba4 100644
--- a/integrations/sources/mysql-cdc.mdx
+++ b/integrations/sources/mysql-cdc.mdx
@@ -100,7 +100,7 @@ If you are running RisingWave locally from binaries and intend to use the native
## Create a table using the native CDC connector in RisingWave
-To ensure all data changes are captured, you must create a table and specify primary keys. See the [CREATE TABLE](/docs/current/sql-create-table/) command for more details.
+To ensure all data changes are captured, you must create a table and specify primary keys. See the [CREATE TABLE](/sql/commands/sql-create-table) command for more details.
### Syntax
diff --git a/integrations/sources/overview.mdx b/integrations/sources/overview.mdx
index 71ee1841..7fd667fc 100644
--- a/integrations/sources/overview.mdx
+++ b/integrations/sources/overview.mdx
@@ -5,4 +5,4 @@ mode: wide
sidebarTitle: Overview
---
- 6 items 5 items 1 item 3 items 3 item
+ 6 items 5 items 1 item 3 items 3 item
diff --git a/integrations/sources/postgresql-cdc.mdx b/integrations/sources/postgresql-cdc.mdx
index 86365be7..40502ada 100644
--- a/integrations/sources/postgresql-cdc.mdx
+++ b/integrations/sources/postgresql-cdc.mdx
@@ -116,7 +116,7 @@ If you are running RisingWave locally from binaries and intend to use the native
## Create a table using the native CDC connector
-To ensure all data changes are captured, you must create a table or source and specify primary keys. See the [CREATE TABLE](/docs/current/sql-create-table/) command for more details.
+To ensure all data changes are captured, you must create a table or source and specify primary keys. See the [CREATE TABLE](/sql/commands/sql-create-table) command for more details.
### Syntax
diff --git a/integrations/sources/supabase-cdc.mdx b/integrations/sources/supabase-cdc.mdx
index 1c0e9c15..cc2308e7 100644
--- a/integrations/sources/supabase-cdc.mdx
+++ b/integrations/sources/supabase-cdc.mdx
@@ -12,7 +12,7 @@ Create a Supabase project and a source table. Enable real-time when creating the
## Ingest CDC data into RisingWave
-Since every Supabase project is a dedicated PostgreSQL database, use the PostgreSQL source connector to ingest CDC data from RisingWave. For the syntax, parameters, and examples, see [Ingest data from PostgreSQL CDC](/docs/current/ingest-from-postgres-cdc/#create-a-table-using-the-native-cdc-connector).
+Since every Supabase project is a dedicated PostgreSQL database, use the PostgreSQL source connector to ingest CDC data from RisingWave. For the syntax, parameters, and examples, see [Ingest data from PostgreSQL CDC](/integrations/sources/postgresql-cdc#create-a-table-using-the-native-cdc-connector).
To start ingesting data from Supabase, a connection with the database must be established first by using the `CREATE SOURCE` command.
diff --git a/integrations/visualization/beekeeper-studio.mdx b/integrations/visualization/beekeeper-studio.mdx
index eaf1b97c..a072a61a 100644
--- a/integrations/visualization/beekeeper-studio.mdx
+++ b/integrations/visualization/beekeeper-studio.mdx
@@ -10,7 +10,7 @@ RisingWave only supports connecting the Beekeeper Studio Community edition. The
## Prerequisites
* Ensure that Beekeeper Studio Community Edition is installed. To download Beekeeper Studio, see the [Beekeeper releases page](https://github.com/beekeeper-studio/beekeeper-studio/releases/).
-* Install and start RisingWave. For instructions on how to get started, see the [Quick start guide](/docs/current/get-started/).
+* Install and start RisingWave. For instructions on how to get started, see the [Quick start guide](/get-started/quickstart).
## Establish the connection
1. In the Beekeeper Studio interface, under **New connection**, select **Postgres** as the **Connection type**.
diff --git a/integrations/visualization/dbeaver.mdx b/integrations/visualization/dbeaver.mdx
index 4dd681b2..ad55c67f 100644
--- a/integrations/visualization/dbeaver.mdx
+++ b/integrations/visualization/dbeaver.mdx
@@ -9,7 +9,7 @@ This guide will go over how to connect DBeaver to RisingWave so you can seamless
## Prerequisites
* Ensure that DBeaver is installed. To download DBeaver, see the [DBeaver download page](https://dbeaver.io/download/). Please make sure that your DBeaver version is at least [v23.3.4](https://dbeaver.io/2024/02/04/dbeaver-23-3-4/).
-* Install and start RisingWave. For instructions on how to get started, see the [Quick start guide](/docs/current/get-started/).
+* Install and start RisingWave. For instructions on how to get started, see the [Quick start guide](/get-started/quickstart).
## Establish the connection
diff --git a/integrations/visualization/grafana.mdx b/integrations/visualization/grafana.mdx
index 310d30cb..57ba9323 100644
--- a/integrations/visualization/grafana.mdx
+++ b/integrations/visualization/grafana.mdx
@@ -11,7 +11,7 @@ This guide will go over how to add RisingWave as a data source in Grafana.
### Install and launch RisingWave
-To install and start RisingWave locally, see the [Get started](/docs/current/get-started/) guide. We recommend running RisingWave locally for testing purposes.
+To install and start RisingWave locally, see the [Get started](/get-started/quickstart) guide. We recommend running RisingWave locally for testing purposes.
Connect to streaming sources. For details on connecting to a streaming source and what connectors are supported with RisingWave, see [CREATE SOURCE](/docs/current/sql-create-source/).
diff --git a/integrations/visualization/looker.mdx b/integrations/visualization/looker.mdx
index 739c5573..f3909cdb 100644
--- a/integrations/visualization/looker.mdx
+++ b/integrations/visualization/looker.mdx
@@ -9,7 +9,7 @@ Since RisingWave is compatible with PostgreSQL, you can easily connect Looker to
## Prerequisites
* Ensure that [Looker](https://cloud.google.com/looker) is installed and accessible from the RisingWave cluster.
-* Install and start RisingWave. For instructions on how to get started, see the [Quick start guide](/docs/current/get-started/).
+* Install and start RisingWave. For instructions on how to get started, see the [Quick start guide](/get-started/quickstart).
## Establish the connection
diff --git a/integrations/visualization/metabase.mdx b/integrations/visualization/metabase.mdx
index d12171c7..36909597 100644
--- a/integrations/visualization/metabase.mdx
+++ b/integrations/visualization/metabase.mdx
@@ -9,7 +9,7 @@ Since RisingWave is compatible with PostgreSQL, you can connect Metabase to Risi
## Prerequisites
* Metabase installed and running.
-* Install and start RisingWave. For instructions on how to get started, see the [Quick start guide](/docs/current/get-started/).
+* Install and start RisingWave. For instructions on how to get started, see the [Quick start guide](/get-started/quickstart).
## Establish the connection
diff --git a/integrations/visualization/superset.mdx b/integrations/visualization/superset.mdx
index 065c0f78..4fffb3e6 100644
--- a/integrations/visualization/superset.mdx
+++ b/integrations/visualization/superset.mdx
@@ -13,7 +13,7 @@ This guide will go over how to:
### Install and start RisingWave
-To install and start RisingWave locally, see the [Get started](/docs/current/get-started/) guide. We recommend running RisingWave locally for demo purposes.
+To install and start RisingWave locally, see the [Get started](/get-started/quickstart) guide. We recommend running RisingWave locally for demo purposes.
Connect to a streaming source. For details on connecting to streaming sources and what sources are supported with RisingWave, see [CREATE SOURCE](/docs/current/sql-create-source/).
diff --git a/operate/alter-streaming.mdx b/operate/alter-streaming.mdx
index 72f0d795..b556d19d 100644
--- a/operate/alter-streaming.mdx
+++ b/operate/alter-streaming.mdx
@@ -137,4 +137,4 @@ CREATE MATERIALIZED VIEW adult_users AS
It was discovered later that the legal definition for adulthood should be set at ≥16\. Initially, one might consider modifying the filter condition from `age >= 18` to `age >= 16` as a straightforward solution. However, this is not feasible in stream processing since records with ages between 16 and 18 have already been filtered out. Therefore, the only option to restore the missing data is to recompute the entire stream from the beginning.
-Therefore, we recommend persistently storing the source data in a long-term storage solution, such as [a RisingWave table](/docs/current/sql-create-table/). This allows for the recomputation of the materialized view when altering the logic becomes necessary.
+Therefore, we recommend persistently storing the source data in a long-term storage solution, such as [a RisingWave table](/sql/commands/sql-create-table). This allows for the recomputation of the materialized view when altering the logic becomes necessary.
diff --git a/operate/meta-backup.mdx b/operate/meta-backup.mdx
index 81dac66e..8c087959 100644
--- a/operate/meta-backup.mdx
+++ b/operate/meta-backup.mdx
@@ -25,7 +25,7 @@ Here's an example of how to create a new meta snapshot with `risectl`:
risectl meta backup-meta
```
-`risectl` is included in the pre-built RisingWave binary. For details, see [Quick start](/docs/current/get-started/#binaries).
+`risectl` is included in the pre-built RisingWave binary. For details, see [Quick start](/get-started/quickstart#binaries).
## View existing meta snapshots
diff --git a/performance/performance-best-practices.mdx b/performance/performance-best-practices.mdx
index 46111e71..b4548453 100644
--- a/performance/performance-best-practices.mdx
+++ b/performance/performance-best-practices.mdx
@@ -82,6 +82,6 @@ This is an advanced feature that is still in the [public preview stage](/product
## How to monitor the progress of direct CDC
-To effectively monitor the progress of direct Change Data Capture (CDC), you can employ two key methods tailored to historical and real-time data for PostgreSQL and MySQL databases. For more details, see [Use Direct CDC for PostgreSQL](/docs/current/ingest-from-postgres-cdc/#monitor-the-progress-of-direct-cdc) and [Use Direct CDC for MySQL](/docs/current/ingest-from-mysql-cdc/#monitor-the-progress-of-direct-cdc).
+To effectively monitor the progress of direct Change Data Capture (CDC), you can employ two key methods tailored to historical and real-time data for PostgreSQL and MySQL databases. For more details, see [Use Direct CDC for PostgreSQL](/integrations/sources/postgresql-cdc#monitor-the-progress-of-direct-cdc) and [Use Direct CDC for MySQL](/integrations/sources/mysql-cdc#monitor-the-progress-of-direct-cdc).
For any other questions or tips regarding performance tuning, feel free to join our [Slack community](https://www.risingwave.com/slack) and become part of our growing network of users. Engage in discussions, seek assistance, and share your experiences with fellow users and our engineers who are eager to provide insights and solutions.
diff --git a/processing/maintain-wide-table-with-table-sinks.mdx b/processing/maintain-wide-table-with-table-sinks.mdx
index dba01d0e..7909afc5 100644
--- a/processing/maintain-wide-table-with-table-sinks.mdx
+++ b/processing/maintain-wide-table-with-table-sinks.mdx
@@ -3,7 +3,7 @@ title: "Maintain wide table with table sinks"
description: "This guide introduces how to maintain a wide table whose columns come from different sources. Traditional data warehouses or ETL use a join query for this purpose. However, streaming join brings issues such as low efficiency and high memory consumption."
---
-In some cases with limitation, use the [CREATE SINK INTO TABLE](/docs/current/sql-create-sink-into/) and [ON CONFLICT clause](/docs/current/sql-create-table/#pk-conflict-behavior) can save the resources and achieve high efficiency.
+In some cases with limitation, use the [CREATE SINK INTO TABLE](/docs/current/sql-create-sink-into/) and [ON CONFLICT clause](/sql/commands/sql-create-table#pk-conflict-behavior) can save the resources and achieve high efficiency.
## Merge multiple sinks with the same primary key
@@ -97,4 +97,4 @@ But maintaining wide table with table sinks can save the resources and achieve h
-Furthermore, for the large dimension table, we can use [Temporal Join](/docs/current/query-syntax-join-clause/) as the partial join to reduce the streaming state and improve performance.
+Furthermore, for the large dimension table, we can use [Temporal Join](/processing/sql/joins) as the partial join to reduce the streaming state and improve performance.
diff --git a/processing/sql/temporal-filters.mdx b/processing/sql/temporal-filters.mdx
index 797bb1ec..e79be010 100644
--- a/processing/sql/temporal-filters.mdx
+++ b/processing/sql/temporal-filters.mdx
@@ -82,7 +82,7 @@ The temporal filter in this query is in the `WHERE` clause. It checks whether th
## Usage 2: Delay table changes
-When the time expression with `NOW()` is the upper bound condition of the base relation such as `ts + interval '1 hour' < now()`, it can "delay" the table's changes of the input relation. It could be useful when used with the [Temporal Join](/docs/current/query-syntax-join-clause/).
+When the time expression with `NOW()` is the upper bound condition of the base relation such as `ts + interval '1 hour' < now()`, it can "delay" the table's changes of the input relation. It could be useful when used with the [Temporal Join](/processing/sql/joins).
Here is a typical example of the temporal join used to widen a fact table.
diff --git a/reference/key-concepts.mdx b/reference/key-concepts.mdx
index 35c1fac7..a132a3a0 100644
--- a/reference/key-concepts.mdx
+++ b/reference/key-concepts.mdx
@@ -53,7 +53,7 @@ A sink is an external target to which you can send data. RisingWave now supports
A source is a resource that RisingWave can read data from. Common sources include message brokers such as Apache Kafka and Apache Pulsar and databases such as MySQL and PostgreSQL. You can create a source in RisingWave using the [CREATE SOURCE](/docs/current/sql-create-source/) command.
-If you want to persist the data from the source, you should use the [CREATE TABLE](/docs/current/sql-create-table/) command with connector settings.
+If you want to persist the data from the source, you should use the [CREATE TABLE](/sql/commands/sql-create-table) command with connector settings.
Regardless of whether the data is persisted in RisingWave, you can create materialized views to perform data transformations.
diff --git a/sql/commands/overview.mdx b/sql/commands/overview.mdx
index f78a51bf..ad57faa9 100644
--- a/sql/commands/overview.mdx
+++ b/sql/commands/overview.mdx
@@ -55,10 +55,10 @@ sidebarTitle: Overview
>
Modify the properties of a schema.
- Modify the properties of a sink. Modify the properties of a source. Modify a server configuration parameter. Modify the properties of a table. Modify the properties of a user. Modify the properties of a view. Convert stream into an append-only changelog. Start a transaction. Cancel specific streaming jobs. Add comments on tables or columns. Commit the current transaction. Create a user-defined aggregate function. Create a connection between VPCs. Create a new database. Create a user-defined function. Create an index on a column of a table or a materialized view to speed up data retrieval. Create a materialized view. Create a new schema. Create a secret to store credentials. Create a sink into RisingWave's table. Create a sink. Supported data sources and how to connect RisingWave to the sources. Create a table. Create a new user account. Create a non-materialized view.
- Remove rows from a table. Get information about the columns in a table, source, sink, view, or materialized view. Discard session state. Drop a user-defined aggregate function. Remove a connection. Remove a database. Drop a user-defined function. Remove an index. Remove a materialized view. Remove a schema. Drop a secret. Remove a sink. Remove a source. Remove a table. Remove a user. Drop a view. Show the execution plan of a statement. Commit pending data changes and persists updated data to storage. Grant a user privileges. Insert new rows of data into a table. Trigger recovery manually. Revoke privileges from a user. Retrieve data from a table or a materialized view. Run Data Definition Language (DDL) operations in the background. Enable or disable implicit flushes after batch operations. Set time zone. Change a run-time parameter.
+ Modify the properties of a sink. Modify the properties of a source. Modify a server configuration parameter. Modify the properties of a table. Modify the properties of a user. Modify the properties of a view. Convert stream into an append-only changelog. Start a transaction. Cancel specific streaming jobs. Add comments on tables or columns. Commit the current transaction. Create a user-defined aggregate function. Create a connection between VPCs. Create a new database. Create a user-defined function. Create an index on a column of a table or a materialized view to speed up data retrieval. Create a materialized view. Create a new schema. Create a secret to store credentials. Create a sink into RisingWave's table. Create a sink. Supported data sources and how to connect RisingWave to the sources. Create a table. Create a new user account. Create a non-materialized view.
+ Remove rows from a table. Get information about the columns in a table, source, sink, view, or materialized view. Discard session state. Drop a user-defined aggregate function. Remove a connection. Remove a database. Drop a user-defined function. Remove an index. Remove a materialized view. Remove a schema. Drop a secret. Remove a sink. Remove a source. Remove a table. Remove a user. Drop a view. Show the execution plan of a statement. Commit pending data changes and persists updated data to storage. Grant a user privileges. Insert new rows of data into a table. Trigger recovery manually. Revoke privileges from a user. Retrieve data from a table or a materialized view. Run Data Definition Language (DDL) operations in the background. Enable or disable implicit flushes after batch operations. Set time zone. Change a run-time parameter. Show the details of your RisingWave cluster. Show columns in a table, source, sink, view or materialized view. Show existing connections. Show the query used to create the specified index. Show the query used to create the specified materialized view. Show the query used to create the specified sink. Show the query used to create the specified source. Show the query used to create the specified table. Show the query used to create the specified view. Show all cursors in the current session. Show existing databases. Show all user-defined functions. Show existing indexes from a particular table. Show internal tables to learn about the existing internal states. Show all streaming jobs. Show existing materialized views. Show the details of the system parameters.
- Display system current workload. Show existing schemas. Shows all sinks. Show existing sources. Show all subscription cursors in the current session. Show existing tables. Show existing views. Start a transaction. Modify existing rows in a table.
+ Display system current workload. Show existing schemas. Shows all sinks. Show existing sources. Show all subscription cursors in the current session. Show existing tables. Show existing views. Start a transaction. Modify existing rows in a table.
diff --git a/sql/commands/sql-as-changelog.mdx b/sql/commands/sql-as-changelog.mdx
index 373a0cf0..5c5eacb3 100644
--- a/sql/commands/sql-as-changelog.mdx
+++ b/sql/commands/sql-as-changelog.mdx
@@ -3,7 +3,7 @@ title: "AS CHANGELOG"
description: "Use the `AS CHANGELOG` clause to convert a changelog operation in a stream into a column."
---
-This can be used to create materialized views and sinks. See the practice in [Sink data with upsert in Snowflake](/docs/current/sink-to-snowflake/#sink-data-with-upsert).
+This can be used to create materialized views and sinks. See the practice in [Sink data with upsert in Snowflake](/integrations/destinations/snowflake#sink-data-with-upsert).
## Syntax
diff --git a/sql/commands/sql-begin.mdx b/sql/commands/sql-begin.mdx
index 04266814..43e71e70 100644
--- a/sql/commands/sql-begin.mdx
+++ b/sql/commands/sql-begin.mdx
@@ -25,6 +25,6 @@ BEGIN
-
+
diff --git a/sql/commands/sql-commit.mdx b/sql/commands/sql-commit.mdx
index 00012fec..83fc1067 100644
--- a/sql/commands/sql-commit.mdx
+++ b/sql/commands/sql-commit.mdx
@@ -42,7 +42,7 @@ COMMIT
title="START TRANSACTION"
icon="play"
iconType="solid"
- href="/docs/current/sql-start-transaction/"
+ href="/sql/commands/sql-start-transaction"
horizontal
/>
diff --git a/sql/commands/sql-create-mv.mdx b/sql/commands/sql-create-mv.mdx
index 37323b4f..4c392590 100644
--- a/sql/commands/sql-create-mv.mdx
+++ b/sql/commands/sql-create-mv.mdx
@@ -12,7 +12,7 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] mv_name AS select_query;
`CREATE MATERIALIZED VIEW` will first **backfill** historical data from the referenced relations, and completion time varies based on the volume of data to be backfilled.
-To perform the operations in the background, you can execute `SET BACKGROUND_DDL=true;` before running the `CREATE MATERIALIZED VIEW` statement. See details in [SET BACKGROUND\_DDL](/docs/current/sql-set-background-ddl/).
+To perform the operations in the background, you can execute `SET BACKGROUND_DDL=true;` before running the `CREATE MATERIALIZED VIEW` statement. See details in [SET BACKGROUND\_DDL](/sql/commands/sql-set-background-ddl).
## Parameters
diff --git a/sql/commands/sql-create-secret.mdx b/sql/commands/sql-create-secret.mdx
index 8849c3d6..69504a94 100644
--- a/sql/commands/sql-create-secret.mdx
+++ b/sql/commands/sql-create-secret.mdx
@@ -52,7 +52,7 @@ SHOW CREATE SOURCE mysql_source;
title="Manage secrets"
icon="key"
icontype="solid"
- href="/docs/current/manage-secrets/"
+ href="/operate/manage-secrets"
>
A comprehensive guide for secret management operations
diff --git a/sql/commands/sql-create-sink.mdx b/sql/commands/sql-create-sink.mdx
index 97aef14d..f9761e33 100644
--- a/sql/commands/sql-create-sink.mdx
+++ b/sql/commands/sql-create-sink.mdx
@@ -3,7 +3,7 @@ title: "CREATE SINK"
description: "Use the `CREATE SINK` command to create a sink. A sink is an external target where you can send data processed in RisingWave. You can create a sink from a table or a materialized view."
---
-If your goal is to create an append-only sink, you can use the emit-on-window-close policy when creating the materialized view that you want to sink data from. For details about the policy, see [Emit on window close](/docs/current/emit-on-window-close/).
+If your goal is to create an append-only sink, you can use the emit-on-window-close policy when creating the materialized view that you want to sink data from. For details about the policy, see [Emit on window close](/processing/emit-on-window-close).
## Syntax
@@ -41,19 +41,19 @@ Please distinguish between the parameters set in the FORMAT and ENCODE options a
Click a sink name to see the SQL syntax, options, and sample statement of sinking data from RisingWave to the sink.
* [Apache Doris](/docs/current/sink-to-doris/)
-* [Apache Iceberg](/docs/current/sink-to-iceberg/)
-* [AWS Kinesis](/docs/current/sink-to-aws-kinesis/)
-* [Cassandra or ScyllaDB](/docs/current/sink-to-cassandra/)
+* [Apache Iceberg](/integrations/destinations/apache-iceberg)
+* [AWS Kinesis](/integrations/destinations/aws-kinesis)
+* [Cassandra or ScyllaDB](/integrations/destinations/cassandra-or-scylladb)
* [ClickHouse](/docs/current/sink-to-clickhouse/)
* [CockroachDB](/docs/current/sink-to-cockroach/)
* [Delta Lake](/docs/current/sink-to-delta-lake/)
-* [Elasticsearch](/docs/current/sink-to-elasticsearch/)
-* [Google BigQuery](/docs/current/sink-to-bigquery/)
+* [Elasticsearch](/integrations/destinations/elasticsearch)
+* [Google BigQuery](/integrations/destinations/bigquery)
* [Kafka](/docs/current/create-sink-kafka/) (Supports versions 3.1.0 or later)
* [MySQL](/docs/current/sink-to-mysql-with-jdbc/) (Supports versions 5.7 and 8.0.x)
-* [NATS](/docs/current/sink-to-nats/)
+* [NATS](/integrations/destinations/nats-and-nats-jetstream)
* [PostgreSQL](/docs/current/sink-to-postgres/)
-* [Pulsar](/docs/current/sink-to-pulsar/)
+* [Pulsar](/integrations/destinations/apache-pulsar)
* [Redis](/docs/current/sink-to-redis/)
* [StarRocks](/docs/current/sink-to-starrocks/)
* [TiDB](/docs/current/sink-to-tidb/)
@@ -65,7 +65,7 @@ Click a sink name to see the SQL syntax, options, and sample statement of sinkin
title="Overview of data delivery"
icon="truck"
icontype="solid"
- href="/docs/current/data-delivery/"
+ href="/delivery/overview"
/>
A comprehensive guide for secret management operations, including creation, usage, and deletion.
diff --git a/sql/data-types/overview.mdx b/sql/data-types/overview.mdx
index 2a01050d..5c3ea00a 100644
--- a/sql/data-types/overview.mdx
+++ b/sql/data-types/overview.mdx
@@ -23,7 +23,7 @@ sidebarTitle: Overview
| interval | | Time span. Input in string format. Units include: second/seconds/s, minute/minutes/min/m, hour/hours/hr/h, day/days/d, month/months/mon, and year/years/yr/y. Units smaller than second can only be specified in a numerical format. | Examples: `interval '4 hour'` → `04:00:00` `interval '3 day'` → `3 days 00:00:00` `interval '04:00:00.1234'` → `04:00:00.1234` |
| struct | | A struct is a column that contains nested data. | For syntax and examples, see [Struct](/docs/current/data-type-struct/). |
| array | | An array is an ordered list of zero or more elements that share the same data type. | For syntax and examples, see [Array](/docs/current/data-type-array/). |
-| map | | A map contains key-value pairs. | For syntax and examples, see [Map](/docs/current/data-type-map/). |
+| map | | A map contains key-value pairs. | For syntax and examples, see [Map](/sql/data-types/map-type). |
| JSONB | | A (binary) JSON value that ignores semantically-insignificant whitespaces or order of object keys. | For syntax and examples, see [JSONB](/docs/current/data-type-jsonb/). |
diff --git a/sql/functions/window-functions.mdx b/sql/functions/window-functions.mdx
index 5b3b386e..2a4392b6 100644
--- a/sql/functions/window-functions.mdx
+++ b/sql/functions/window-functions.mdx
@@ -83,4 +83,4 @@ last_value ( value anyelement ) → anyelement
All aggregate functions, including builtin ones such as `sum()` and `min()`, user-defined ones and `AGGREGATE:`-prefixed scalar functions, can be used as window functions.
-For the complete list of builtin aggregate functions and their usage, see [Aggregate functions](/docs/current/sql-function-aggregate/).
\ No newline at end of file
+For the complete list of builtin aggregate functions and their usage, see [Aggregate functions](/sql/functions/aggregate).
\ No newline at end of file
diff --git a/sql/query-syntax/generated-columns.mdx b/sql/query-syntax/generated-columns.mdx
index 11e38c55..56c56119 100644
--- a/sql/query-syntax/generated-columns.mdx
+++ b/sql/query-syntax/generated-columns.mdx
@@ -4,7 +4,7 @@ description: "A generated column is a special column that is always computed fro
---
-To create a generated column, use the `AS ` clause in [CREATE TABLE](/docs/current/sql-create-table/) or [CREATE SOURCE](/docs/current/sql-create-source/) statements, for example:
+To create a generated column, use the `AS ` clause in [CREATE TABLE](/sql/commands/sql-create-table) or [CREATE SOURCE](/docs/current/sql-create-source/) statements, for example:
```sql
CREATE TABLE t1 (v1 int AS v2-1, v2 int, v3 int AS v2+1);
diff --git a/sql/query-syntax/group-by-clause.mdx b/sql/query-syntax/group-by-clause.mdx
index ae877dd4..5d120ed6 100644
--- a/sql/query-syntax/group-by-clause.mdx
+++ b/sql/query-syntax/group-by-clause.mdx
@@ -17,7 +17,7 @@ GROUP BY column1, column2....columnN
ORDER BY column1, column2....columnN
```
-If your goal is to generate windowed calculation results strictly as append-only output, you can utilize the emit-on-window-close policy. This approach helps avoid unnecessary computations. For more information on the emit-on-window-close policy, please refer to [Emit on window close](/docs/current/emit-on-window-close/).
+If your goal is to generate windowed calculation results strictly as append-only output, you can utilize the emit-on-window-close policy. This approach helps avoid unnecessary computations. For more information on the emit-on-window-close policy, please refer to [Emit on window close](/processing/emit-on-window-close).
You can use more than one column in the `GROUP BY` clause.
diff --git a/sql/query-syntax/value-exp.mdx b/sql/query-syntax/value-exp.mdx
index baa4079d..e1bdfcf2 100644
--- a/sql/query-syntax/value-exp.mdx
+++ b/sql/query-syntax/value-exp.mdx
@@ -18,7 +18,7 @@ aggregate_name ( * ) [ FILTER ( WHERE filter_clause ) ]
aggregate_name ( [ expression [ , ... ] ] ) WITHIN GROUP ( order_by_clause ) [ FILTER ( WHERE filter_clause ) ]
```
-`aggregate_name` is one of the aggregation functions listed on [Aggregate functions](/docs/current/sql-function-aggregate/), and `expression` is a value expression that does not contain an aggregate expression or a window function call.
+`aggregate_name` is one of the aggregation functions listed on [Aggregate functions](/sql/functions/aggregate), and `expression` is a value expression that does not contain an aggregate expression or a window function call.
The `DISTINCT` keyword, which is only available in the second form, cannot be used together with an `ORDER BY` or `WITHIN GROUP` clause. Additionally, it's important to note that the `order_by_clause` is positioned differently in the first and fourth forms.
@@ -55,7 +55,7 @@ Currently, the `PARTITION BY` clause is required. If you do not want to partitio
For ranking window functions like `row_number`, `rank` and `dense_rank`, `ORDER BY` clause is required.
-When operating in the [Emit on window close](/docs/current/emit-on-window-close/) mode for a streaming query, `ORDER BY` clause is required for all window functions. Please ensure that you specify exactly one column in the `ORDER BY` clause. This column, generally a timestamp column, must have a watermark defined for it. It's important to note that when using the timestamp column from this streaming query in another streaming query, the watermark information associated with the column is not retained.
+When operating in the [Emit on window close](/processing/emit-on-window-close) mode for a streaming query, `ORDER BY` clause is required for all window functions. Please ensure that you specify exactly one column in the `ORDER BY` clause. This column, generally a timestamp column, must have a watermark defined for it. It's important to note that when using the timestamp column from this streaming query in another streaming query, the watermark information associated with the column is not retained.
`window_function_name` is one of the window functions listed on [Window functions](/docs/current/sql-function-window-functions/).
diff --git a/sql/system-catalogs/rw-catalog.mdx b/sql/system-catalogs/rw-catalog.mdx
index 574e40b3..6487dd68 100644
--- a/sql/system-catalogs/rw-catalog.mdx
+++ b/sql/system-catalogs/rw-catalog.mdx
@@ -109,7 +109,7 @@ SELECT name, initialized_at, created_at FROM rw_sources;
| rw\_relation\_info | Contains low-level relation information about tables, sources, materialized views, and indexes that are available in the database. |
| rw\_relations | Contains information about relations in the database, including their unique IDs, names, types, schema IDs, and owners. |
| rw\_schemas | Contains information about schemas that are available in the database, including their names, unique IDs, owner IDs, and more. |
-| rw\_secrets | Contains information about the ID, name, owner, and access control of secret objects. For more details about secrets, see [Manage secrets](/docs/current/manage-secrets/). |
+| rw\_secrets | Contains information about the ID, name, owner, and access control of secret objects. For more details about secrets, see [Manage secrets](/operate/manage-secrets). |
| rw\_sinks | Contains information about sinks that are available in the database, including their unique IDs, names, schema IDs, owner IDs, connector types, sink types, connection IDs, definitions, and more. |
| rw\_sources | Contains information about sources that are available in the database, including their unique IDs, names, schema IDs, owner IDs, connector types, column definitions, row formats, append-only flags, connection IDs, and more. |
| rw\_streaming\_parallelism | Contains information about the streaming parallelism configuration for streaming jobs, including their IDs, names, relation types, and parallelism. |
diff --git a/sql/udfs/use-udfs-in-python.mdx b/sql/udfs/use-udfs-in-python.mdx
index 0621f010..0af44948 100644
--- a/sql/udfs/use-udfs-in-python.mdx
+++ b/sql/udfs/use-udfs-in-python.mdx
@@ -7,7 +7,7 @@ sidebarTitle: Python
## Prerequisites
* Ensure that you have [Python](https://www.python.org/downloads/) (3.8 or later) installed on your computer.
-* Ensure that you have [started and connected to RisingWave](/docs/current/get-started/#run-risingwave).
+* Ensure that you have [started and connected to RisingWave](/get-started/quickstart#run-risingwave).
## 1\. Install the RisingWave UDF API for Python
From 5ad708a838c33181c9de2d49e5173e91dcdfc24d Mon Sep 17 00:00:00 2001
From: WanYixian
Date: Mon, 25 Nov 2024 18:13:51 +0800
Subject: [PATCH 06/10] save work
---
cloud/create-a-connection.mdx | 2 +-
cloud/manage-sinks.mdx | 2 +-
delivery/overview.mdx | 30 ++++-----
delivery/risingwave-as-postgres-fdw.mdx | 2 +-
delivery/subscription.mdx | 2 +-
demos/clickstream-analysis.mdx | 4 +-
demos/overview.mdx | 10 +--
.../server-performance-anomaly-detection.mdx | 2 +-
...singwave-to-monitor-risingwave-metrics.mdx | 2 +-
deploy/hardware-requirements.mdx | 2 +-
deploy/k8s-cluster-scaling.mdx | 66 ++++++++++---------
get-started/rw-premium-edition-intro.mdx | 2 +-
integrations/destinations/amazon-dynamodb.mdx | 2 +-
integrations/destinations/apache-iceberg.mdx | 2 +-
integrations/destinations/apache-pulsar.mdx | 2 +-
integrations/destinations/aws-kinesis.mdx | 2 +-
integrations/destinations/azure-blob.mdx | 2 +-
integrations/destinations/bigquery.mdx | 2 +-
.../destinations/cassandra-or-scylladb.mdx | 2 +-
integrations/destinations/cockroachdb.mdx | 2 +-
integrations/destinations/elasticsearch.mdx | 2 +-
integrations/destinations/mongodb.mdx | 2 +-
.../destinations/nats-and-nats-jetstream.mdx | 2 +-
integrations/destinations/postgresql.mdx | 2 +-
integrations/destinations/snowflake.mdx | 2 +-
integrations/destinations/supabase.mdx | 2 +-
integrations/destinations/tidb.mdx | 2 +-
integrations/sources/apache-iceberg.mdx | 2 +-
integrations/sources/citus-cdc.mdx | 2 +-
integrations/sources/postgresql-cdc.mdx | 2 +-
performance/performance-best-practices.mdx | 2 +-
processing/emit-on-window-close.mdx | 2 +-
processing/sql/joins.mdx | 4 +-
processing/time-travel-queries.mdx | 2 +-
reference/key-concepts.mdx | 2 +-
sql/commands/overview.mdx | 2 +-
sql/commands/sql-cancel-jobs.mdx | 2 +-
sql/commands/sql-create-connection.mdx | 2 +-
sql/commands/sql-create-sink-into.mdx | 2 +-
sql/commands/sql-create-sink.mdx | 20 +++---
sql/commands/sql-create-source.mdx | 4 +-
sql/commands/sql-create-table.mdx | 6 +-
sql/commands/sql-drop-sink.mdx | 4 +-
sql/commands/sql-set-background-ddl.mdx | 4 +-
sql/commands/sql-show-create-sink.mdx | 2 +-
sql/commands/sql-show-jobs.mdx | 2 +-
sql/commands/sql-start-transaction.mdx | 2 +-
sql/data-types/map-type.mdx | 2 +-
sql/functions/aggregate.mdx | 2 +-
sql/query-syntax/generated-columns.mdx | 2 +-
sql/system-catalogs/rw-catalog.mdx | 2 +-
51 files changed, 119 insertions(+), 115 deletions(-)
diff --git a/cloud/create-a-connection.mdx b/cloud/create-a-connection.mdx
index a9fbda09..eafbd83b 100644
--- a/cloud/create-a-connection.mdx
+++ b/cloud/create-a-connection.mdx
@@ -63,4 +63,4 @@ We aim to automate this process in the future to make it even easier.
Now, you can create a source or sink with the PrivateLink connection using SQL.
-For details on how to use the VPC endpoint to create a source with the PrivateLink connection, see [Create source with PrivateLink connection](/docs/current/ingest-from-kafka/#create-source-with-privatelink-connection); for creating a sink, see [Create sink with PrivateLink connection](/docs/current/create-sink-kafka/#create-sink-with-privatelink-connection).
+For details on how to use the VPC endpoint to create a source with the PrivateLink connection, see [Create source with PrivateLink connection](/docs/current/ingest-from-kafka/#create-source-with-privatelink-connection); for creating a sink, see [Create sink with PrivateLink connection](/integrations/destinations/apache-kafka#create-sink-with-privatelink-connection).
diff --git a/cloud/manage-sinks.mdx b/cloud/manage-sinks.mdx
index d902d906..31edf09e 100644
--- a/cloud/manage-sinks.mdx
+++ b/cloud/manage-sinks.mdx
@@ -9,7 +9,7 @@ For the complete list of supported sink connectors and data formats, see [Data d
You can create a sink using SQL command to deliver processed data to an external target.
-Refer to [CREATE SINK](/docs/current/sql-create-sink/) in the RisingWave Database documentation.
+Refer to [CREATE SINK](/sql/commands/sql-create-sink) in the RisingWave Database documentation.
## Check a sink
diff --git a/delivery/overview.mdx b/delivery/overview.mdx
index 2e8ee9cd..e5eff912 100644
--- a/delivery/overview.mdx
+++ b/delivery/overview.mdx
@@ -4,14 +4,14 @@ description: "RisingWave supports delivering data to downstream systems via its
sidebarTitle: Overview
---
-To stream data out of RisingWave, you must create a sink. A sink is an external target that you can send data to. Use the [CREATE SINK](/docs/current/sql-create-sink/) statement to create a sink. You need to specify what data to be exported, the format, and the sink parameters.
+To stream data out of RisingWave, you must create a sink. A sink is an external target that you can send data to. Use the [CREATE SINK](/sql/commands/sql-create-sink) statement to create a sink. You need to specify what data to be exported, the format, and the sink parameters.
-Sinks become visible right after you create them, regardless of the backfilling status. Therefore, it's important to understand that the data in the sinks may not immediately reflect the latest state of their upstream sources due to the latency of the sink, connector, and backfilling process. To determine whether the process is complete and the data in the sink is consistent, refer to [Monitor statement progress](/docs/current/monitor-statement-progress/).
+Sinks become visible right after you create them, regardless of the backfilling status. Therefore, it's important to understand that the data in the sinks may not immediately reflect the latest state of their upstream sources due to the latency of the sink, connector, and backfilling process. To determine whether the process is complete and the data in the sink is consistent, refer to [Monitor statement progress](/operate/monitor-statement-progress).
Currently, RisingWave supports the following sink connectors:
* Apache Doris sink connector (`connector = 'doris'`)
-With this connector, you can sink data from RisingWave to Apache Doris. For details about the syntax and parameters, see [Sink data to Apache Doris](/docs/current/sink-to-doris/).
+With this connector, you can sink data from RisingWave to Apache Doris. For details about the syntax and parameters, see [Sink data to Apache Doris](/integrations/destinations/apache-doris).
* Apache Iceberg sink connector (`connector = 'iceberg'`)
With this connector, you can sink data from RisingWave to Apache Iceberg. For details about the syntax and parameters, see [Sink data to Apache Iceberg](/integrations/destinations/apache-iceberg).
* AWS Kinesis sink connector (`connector = 'kinesis'`)
@@ -19,35 +19,35 @@ With this connector, you can sink data from RisingWave to AWS Kinesis. For detai
* Cassandra and ScyllaDB sink connector (`connector = 'cassandra'`)
With this connector, you can sink data from RisingWave to Cassandra or ScyllaDB. For details about the syntax and parameters, see [Sink data to Cassandra or ScyllaDB](/integrations/destinations/cassandra-or-scylladb).
* ClickHouse sink connector (`connector = 'clickhouse'`)
-With this connector, you can sink data from RisingWave to ClickHouse. For details about the syntax and parameters, see [Sink data to ClickHouse](/docs/current/sink-to-clickhouse/).
+With this connector, you can sink data from RisingWave to ClickHouse. For details about the syntax and parameters, see [Sink data to ClickHouse](/integrations/destinations/clickhouse).
* CockroachDB sink connector (`connector = 'jdbc'`)
-With this connector, you can sink data from RisingWave to CockroachDB. For details about the syntax and parameters, see [Sink data to CockroachDB](/docs/current/sink-to-cockroach/).
+With this connector, you can sink data from RisingWave to CockroachDB. For details about the syntax and parameters, see [Sink data to CockroachDB](/integrations/destinations/cockroachdb).
* Delta Lake sink connector (`connector = 'deltalake'`)
-With this connector, you can sink data from RisingWave to Delta Lake. For details about the syntax and parameters, see [Sink data to Delta Lake](/docs/current/sink-to-delta-lake/).
+With this connector, you can sink data from RisingWave to Delta Lake. For details about the syntax and parameters, see [Sink data to Delta Lake](/integrations/destinations/delta-lake).
* Elasticsearch sink connector (`connector = 'elasticsearch'`)
With this connector, you can sink data from RisingWave to Elasticsearch. For details about the syntax and parameters, see [Sink data to Elasticsearch](/integrations/destinations/elasticsearch).
* Google BigQuery sink connector (`connector = 'bigquery'`)
With this connector, you can sink data from RisingWave to Google BigQuery. For details about the syntax and parameters, see [Sink data to Google BigQuery](/integrations/destinations/bigquery).
* Google Pub/Sub sink connector (`connector = 'google_pubsub'`)
-With this connector, you can sink data from RisingWave to Google Pub/Sub. For details about the syntax and parameters, see [Sink data to Google Pub/Sub](/docs/current/sink-to-google-pubsub/).
+With this connector, you can sink data from RisingWave to Google Pub/Sub. For details about the syntax and parameters, see [Sink data to Google Pub/Sub](/integrations/destinations/google-pub-sub).
* JDBC sink connector for MySQL, PostgreSQL, or TiDB (`connector = 'jdbc'`)
-With this connector, you can sink data from RisingWave to JDBC-available databases, such as MySQL, PostgreSQL, or TiDB. When sinking to a database with a JDBC driver, ensure that the corresponding table created in RisingWave has the same schema as the table in the database you are sinking to. For details about the syntax and parameters, see [Sink to MySQL](/docs/current/sink-to-mysql-with-jdbc/), [Sink to PostgreSQL](/docs/current/sink-to-postgres/), or [Sink to TiDB](/docs/current/sink-to-tidb/).
+With this connector, you can sink data from RisingWave to JDBC-available databases, such as MySQL, PostgreSQL, or TiDB. When sinking to a database with a JDBC driver, ensure that the corresponding table created in RisingWave has the same schema as the table in the database you are sinking to. For details about the syntax and parameters, see [Sink to MySQL](/integrations/destinations/mysql), [Sink to PostgreSQL](/integrations/destinations/postgresql), or [Sink to TiDB](/integrations/destinations/tidb).
* Kafka sink connector (`connector = 'kafka'`)
-With this connector, you can sink data from RisingWave to Kafka topics. For details about the syntax and parameters, see [Sink data to Kafka](/docs/current/create-sink-kafka/).
+With this connector, you can sink data from RisingWave to Kafka topics. For details about the syntax and parameters, see [Sink data to Kafka](/integrations/destinations/apache-kafka).
* MQTT sink connector (`connector = 'mqtt'`)
-With this connector, you can sink data from RisingWave to MQTT topics. For details about the syntax and parameters, see [Sink data to MQTT](/docs/current/sink-to-mqtt/).
+With this connector, you can sink data from RisingWave to MQTT topics. For details about the syntax and parameters, see [Sink data to MQTT](/integrations/destinations/mqtt).
* NATS sink connector (`connector = 'nats'`)
With this connector, you can sink data from RisingWave to NATS. For details about the syntax and parameters, see [Sink data to NATS](/integrations/destinations/nats-and-nats-jetstream).
* Pulsar sink connector (`connector = 'pulsar'`)
With this connector, you can sink data from RisingWave to Pulsar. For details about the syntax and parameters, see [Sink data to Pulsar](/integrations/destinations/apache-pulsar).
* Redis sink connector (`connector = 'redis'`)
-With this connector, you can sink data from RisingWave to Redis. For details about the syntax and parameters, see [Sink data to Redis](/docs/current/sink-to-redis/).
+With this connector, you can sink data from RisingWave to Redis. For details about the syntax and parameters, see [Sink data to Redis](/integrations/destinations/redis).
* Snowflake sink connector (`connector = 'snowflake'`)
With this connector, you can sink data from RisingWave to Snowflake. For details about the syntax and parameters, see [Sink data to Snowflake](/integrations/destinations/snowflake).
* StarRocks sink connector (`connector = 'starrocks'`)
-With this connector, you can sink data from RisingWave to StarRocks. For details about the syntax and parameters, see [Sink data to StarRocks](/docs/current/sink-to-starrocks/).
+With this connector, you can sink data from RisingWave to StarRocks. For details about the syntax and parameters, see [Sink data to StarRocks](/integrations/destinations/starrocks).
* Microsoft SQL Server sink connector(`connector = 'sqlserver'`)
-With this connector, you can sink data from RisingWave to Microsoft SQL Server. For details about the syntax and parameters, see [Sink data to SQL Server](/docs/current/sink-to-sqlserver/).
+With this connector, you can sink data from RisingWave to Microsoft SQL Server. For details about the syntax and parameters, see [Sink data to SQL Server](/integrations/destinations/sql-server).
## Sink decoupling
@@ -58,7 +58,7 @@ Sink decoupling introduces a buffering queue between a RisingWave sink and the d
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
The `sink_decouple` session variable can be specified to enable or disable sink decoupling. The default value for the session variable is `default`.
@@ -105,7 +105,7 @@ When creating an `upsert` sink, note whether or not you need to specify the prim
**PUBLIC PREVIEW**
-Sink data in parquet encode is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+Sink data in parquet encode is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
RisingWave supports sinking data in Parquet or JSON encode to file systems including S3, Google Cloud Storage (GCS), Azure Blob Storage, and WebHDFS. This eliminates the need for complex data lake setups. Once the data is saved, the files can be queried using the batch processing engine of RisingWave through the `file_scan` API. You can also leverage third-party OLAP query engines for further data processing.
diff --git a/delivery/risingwave-as-postgres-fdw.mdx b/delivery/risingwave-as-postgres-fdw.mdx
index 5b2ff019..5017adc5 100644
--- a/delivery/risingwave-as-postgres-fdw.mdx
+++ b/delivery/risingwave-as-postgres-fdw.mdx
@@ -7,7 +7,7 @@ description: "A foreign data wrapper in PostgreSQL allows you to directly virtua
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/delivery/subscription.mdx b/delivery/subscription.mdx
index d22bc6ca..7a5c8df0 100644
--- a/delivery/subscription.mdx
+++ b/delivery/subscription.mdx
@@ -10,7 +10,7 @@ This feature allows you to monitor all data changes without relying on external
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Manage subscription
diff --git a/demos/clickstream-analysis.mdx b/demos/clickstream-analysis.mdx
index 02eb359c..eb6c3547 100644
--- a/demos/clickstream-analysis.mdx
+++ b/demos/clickstream-analysis.mdx
@@ -80,7 +80,7 @@ First, the `tumble()` function will map each event into a 10-minute window to cr
Next, the `hop()` function will create 24-hour time windows every 10 minutes. Each event will be mapped to corresponding windows. Finally, they will be grouped by `target_id` and `window_time` to calculate the total number of clicks of each thread within 24 hours.
-Please refer to [Time window functions](/docs/current/sql-function-time-window/) for an explanation of the tumble and hop functions and aggregations.
+Please refer to [Time window functions](/processing/sql/time-windows) for an explanation of the tumble and hop functions and aggregations.
```sql
CREATE MATERIALIZED VIEW thread_view_count AS WITH t AS (
@@ -142,7 +142,7 @@ The result may look like this:
(5 rows)
```
-We can also query results by specifying a time interval. To learn more about data and time functions and operators, see [Date and time](/docs/current/sql-function-datetime/).
+We can also query results by specifying a time interval. To learn more about data and time functions and operators, see [Date and time](/sql/functions/datetime).
```sql
SELECT * FROM thread_view_count
diff --git a/demos/overview.mdx b/demos/overview.mdx
index e6862074..5376f991 100644
--- a/demos/overview.mdx
+++ b/demos/overview.mdx
@@ -11,10 +11,10 @@ Try out the following runnable demos in these different industries:
## Capital markets
-
+
Transform raw market data in real-time to provide insights into market trends, asset health, and trade opportunities.
-
+
Detect suspicious patterns, compliance breaches, and anomalies from trading activities in real-time.
@@ -22,10 +22,10 @@ Detect suspicious patterns, compliance breaches, and anomalies from trading acti
## Sports betting
-
+
Manage your sports betting positions in real-time by using RisingWave to monitor exposure and risk.
-
+
Identify high-risk and high-value users by analyzing and identifying trends in user betting patterns.
@@ -33,7 +33,7 @@ Identify high-risk and high-value users by analyzing and identifying trends in u
## Logistics
-
+
Track inventory levels and forecast demand to prevent shortages and optimize restocking schedules.
\ No newline at end of file
diff --git a/demos/server-performance-anomaly-detection.mdx b/demos/server-performance-anomaly-detection.mdx
index f5479dbd..b8248307 100644
--- a/demos/server-performance-anomaly-detection.mdx
+++ b/demos/server-performance-anomaly-detection.mdx
@@ -95,7 +95,7 @@ In this tutorial, we will create a few different materialized views. The first v
First, we will create the materialized view that contains all relevant TCP values. We use the tumble function to map all events into 1-minute windows and calculate the average metric value for each device within each time window. Next, the average TCP and NIC metrics are calculated separately before joining on device names and time windows. We will keep the records measuring the volume of bytes transferred by the interface and where the average utilization is greater than or equal to 50.
-Please refer to this [guide](/docs/current/sql-function-time-window/) for an explanation of the tumble function and aggregations.
+Please refer to this [guide](/processing/sql/time-windows) for an explanation of the tumble function and aggregations.
```sql
CREATE MATERIALIZED VIEW high_util_tcp_metrics AS
diff --git a/demos/use-risingwave-to-monitor-risingwave-metrics.mdx b/demos/use-risingwave-to-monitor-risingwave-metrics.mdx
index ee5b3688..fd94332f 100644
--- a/demos/use-risingwave-to-monitor-risingwave-metrics.mdx
+++ b/demos/use-risingwave-to-monitor-risingwave-metrics.mdx
@@ -92,7 +92,7 @@ We have connected RisingWave to the streams, but RisingWave has not started to c
## Step 3: Create a materialized view
-Now, create a materialized view that tracks the average metric values every 30 seconds. We will split the stream into 30 seconds windows and calculate the average metric value over each window. Here we use the [tumble window](/docs/current/sql-function-time-window/) functionality to support window slicing.
+Now, create a materialized view that tracks the average metric values every 30 seconds. We will split the stream into 30 seconds windows and calculate the average metric value over each window. Here we use the [tumble window](/processing/sql/time-windows) functionality to support window slicing.
```sql
CREATE MATERIALIZED VIEW metric_avg_30s AS
diff --git a/deploy/hardware-requirements.mdx b/deploy/hardware-requirements.mdx
index 083a2db4..3cdebca7 100644
--- a/deploy/hardware-requirements.mdx
+++ b/deploy/hardware-requirements.mdx
@@ -46,6 +46,6 @@ Meta nodes manage metadata and coordinate the cluster. It is advisable to deploy
## Storage
-RisingWave offers support for multiple storage systems as storage backends. For the complete list of supported storage systems for Kubernetes deployments, see [Set up a RisingWave cluster in Kubernetes](/docs/current/risingwave-kubernetes/#deploy-a-risingwave-instance).
+RisingWave offers support for multiple storage systems as storage backends. For the complete list of supported storage systems for Kubernetes deployments, see [Set up a RisingWave cluster in Kubernetes](/deploy/risingwave-kubernetes#deploy-a-risingwave-instance).
Please notice that storage performance can **significantly** impact RisingWave's performance. We recommend using high-performance cloud storage systems such as AWS S3\. For self-managed storage systems such as MinIO or local file system, please ensure to use high-performance SSD disks.
diff --git a/deploy/k8s-cluster-scaling.mdx b/deploy/k8s-cluster-scaling.mdx
index 455531ee..ecb35f7c 100644
--- a/deploy/k8s-cluster-scaling.mdx
+++ b/deploy/k8s-cluster-scaling.mdx
@@ -7,26 +7,28 @@ description: "This article describes adaptive parallelism as the default scaling
RisingWave supports adaptive and fixed parallelism for each streaming job, including materialized view, sink, and table.
-* Adaptive parallelism
-Adaptive parallelism is the **default** setting for newly created streaming jobs since v1.7\. In this mode, RisingWave automatically adjusts parallelism to utilize all CPU cores across the compute nodes in the cluster. When nodes are added or removed, parallelism adjusts accordingly based on the current number of CPU cores.
-To modify the scaling policy to adaptive parallelism, use the SQL command:
+- Adaptive parallelism
-```sql
-ALTER TABLE t SET PARALLELISM = adaptive;
-```
-To modify on a materialized view:
+ Adaptive parallelism is the **default** setting for newly created streaming jobs since v1.7\. In this mode, RisingWave automatically adjusts parallelism to utilize all CPU cores across the compute nodes in the cluster. When nodes are added or removed, parallelism adjusts accordingly based on the current number of CPU cores.
+ To modify the scaling policy to adaptive parallelism, use the SQL command:
-```sql
-ALTER MATERIALIZED VIEW mv SET PARALLELISM = adaptive;
-```
+ ```sql
+ ALTER TABLE t SET PARALLELISM = adaptive;
+ ```
+ To modify on a materialized view:
-* Fixed parallelism
-Fixed parallelism is the advanced mode that allows manually specifying a parallelism number that remains constant as the cluster resizes. It’s commonly used to throttle stream bandwidth and ensures predictable resource allocation. For example:
+ ```sql
+ ALTER MATERIALIZED VIEW mv SET PARALLELISM = adaptive;
+ ```
-```sql
-ALTER TABLE t SET PARALLELISM = 16; -- Replace 16 with the desired parallelism
-```
-When there are many streaming jobs running in the cluster, it’s recommended to use fixed parallelism to avoid overloading the system.
+- Fixed parallelism
+
+ Fixed parallelism is the advanced mode that allows manually specifying a parallelism number that remains constant as the cluster resizes. It’s commonly used to throttle stream bandwidth and ensures predictable resource allocation. For example:
+
+ ```sql
+ ALTER TABLE t SET PARALLELISM = 16; -- Replace 16 with the desired parallelism
+ ```
+ When there are many streaming jobs running in the cluster, it’s recommended to use fixed parallelism to avoid overloading the system.
RisingWave distributes its computation across lightweight threads called "streaming actors," which run simultaneously on CPU cores. By spreading these streaming actors across cores, RisingWave achieves parallel computation, resulting in improved performance, scalability, and throughput.
@@ -37,13 +39,15 @@ In both scaling modes, streaming actors will redistribute across the cluster to
Scale-out here refers to the process of adding more **compute nodes** to the cluster. For frontend nodes, you can simply scale out/in by adding more frontend nodes to the cluster, because they are stateless and can be automatically discovered by the meta nodes.
1. First, add more compute nodes with `kubectl`.
-```bash
-# If you are using risingwave-operator
-kubectl apply -f .yaml # or kubectl edit RisingWave/
-# If you are not using risingwave-operator
-kubectl scale statefulset/risingwave-compute --replicas=
-```
-Then wait until new compute nodes start.
+
+ ```bash
+ # If you are using risingwave-operator
+ kubectl apply -f .yaml # or kubectl edit RisingWave/
+ # If you are not using risingwave-operator
+ kubectl scale statefulset/risingwave-compute --replicas=
+ ```
+ Then wait until new compute nodes start.
+
2. If you are using fixed parallelism, you may need to manually adjust the parallelism of the streaming jobs to utilize the new compute nodes. For adaptive parallelism, the system will automatically adjust the parallelism to utilize the new compute nodes.
## Scale-in
@@ -52,15 +56,15 @@ Scale-in here refers to the process of decreasing **compute nodes** from the clu
1. Since v2.0, to trigger an immediate scale-in, apply the following yaml files to decrease the number of compute nodes:
-```bash
-# If you are using risingwave-operator
-kubectl apply -f .yaml # or kubectl edit RisingWave/
+ ```bash
+ # If you are using risingwave-operator
+ kubectl apply -f .yaml # or kubectl edit RisingWave/
-# If you are not using risingwave-operator
-kubectl scale statefulset/risingwave-compute --replicas=
-```
+ # If you are not using risingwave-operator
+ kubectl scale statefulset/risingwave-compute --replicas=
+ ```
-1. If you are using fixed parallelism, you may need to manually adjust the parallelism of the streaming jobs. For adaptive parallelism, the system will automatically adjust the streaming jobs to use less parallelism.
+2. If you are using fixed parallelism, you may need to manually adjust the parallelism of the streaming jobs. For adaptive parallelism, the system will automatically adjust the streaming jobs to use less parallelism.
## Upgrade to v1.7
@@ -90,4 +94,4 @@ dev=> SELECT * FROM rw_fragment_parallelism WHERE name = 't';
1001 | t | table | 1 | HASH | {1001} | {2} | {MVIEW} | 4
```
-To understand the output of the query, you may need to know about these two concepts: [streaming actors](/docs/current/key-concepts/#streaming-actors) and [fragments](/docs/current/key-concepts/#fragments).
+To understand the output of the query, you may need to know about these two concepts: [streaming actors](/reference/key-concepts#streaming-actors) and [fragments](/reference/key-concepts#fragments).
diff --git a/get-started/rw-premium-edition-intro.mdx b/get-started/rw-premium-edition-intro.mdx
index 69fa5f69..bef2e689 100644
--- a/get-started/rw-premium-edition-intro.mdx
+++ b/get-started/rw-premium-edition-intro.mdx
@@ -28,7 +28,7 @@ RisingWave Premium 1.0 is the first major release of this new edition with sever
### Connectors
-
+
For users who are already using these features in 1.9.x or earlier versions, rest assured that the functionality of these features will be intact if you stay on the version. If you choose to upgrade to v2.0 or later versions, an error will show up to indicate you need a license to use the features.
diff --git a/integrations/destinations/amazon-dynamodb.mdx b/integrations/destinations/amazon-dynamodb.mdx
index 811f9f93..d26b87f8 100644
--- a/integrations/destinations/amazon-dynamodb.mdx
+++ b/integrations/destinations/amazon-dynamodb.mdx
@@ -15,7 +15,7 @@ This feature is only available in the premium edition of RisingWave. The premium
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Syntax
diff --git a/integrations/destinations/apache-iceberg.mdx b/integrations/destinations/apache-iceberg.mdx
index 02e7ee5e..fd247618 100644
--- a/integrations/destinations/apache-iceberg.mdx
+++ b/integrations/destinations/apache-iceberg.mdx
@@ -7,7 +7,7 @@ sidebarTitle: Apache Iceberg
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/integrations/destinations/apache-pulsar.mdx b/integrations/destinations/apache-pulsar.mdx
index 47031d8f..a6abb69d 100644
--- a/integrations/destinations/apache-pulsar.mdx
+++ b/integrations/destinations/apache-pulsar.mdx
@@ -9,7 +9,7 @@ sidebarTitle: Apache Pulsar
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/integrations/destinations/aws-kinesis.mdx b/integrations/destinations/aws-kinesis.mdx
index 96e87eb5..c0cb65b2 100644
--- a/integrations/destinations/aws-kinesis.mdx
+++ b/integrations/destinations/aws-kinesis.mdx
@@ -7,7 +7,7 @@ sidebarTitle: AWS Kinesis
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Syntax
diff --git a/integrations/destinations/azure-blob.mdx b/integrations/destinations/azure-blob.mdx
index d681410c..32903bfd 100644
--- a/integrations/destinations/azure-blob.mdx
+++ b/integrations/destinations/azure-blob.mdx
@@ -11,7 +11,7 @@ description: This guide describes how to sink data from RisingWave to Azure Blob
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Syntax
diff --git a/integrations/destinations/bigquery.mdx b/integrations/destinations/bigquery.mdx
index 73d88f6e..2e4532dd 100644
--- a/integrations/destinations/bigquery.mdx
+++ b/integrations/destinations/bigquery.mdx
@@ -17,7 +17,7 @@ This feature is only available in the premium edition of RisingWave. The premium
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/integrations/destinations/cassandra-or-scylladb.mdx b/integrations/destinations/cassandra-or-scylladb.mdx
index b3506b7e..87d6e7bc 100644
--- a/integrations/destinations/cassandra-or-scylladb.mdx
+++ b/integrations/destinations/cassandra-or-scylladb.mdx
@@ -9,7 +9,7 @@ This guide describes how to sink data from RisingWave to Cassandra or ScyllaDB u
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/integrations/destinations/cockroachdb.mdx b/integrations/destinations/cockroachdb.mdx
index 5574d413..bbc27f7d 100644
--- a/integrations/destinations/cockroachdb.mdx
+++ b/integrations/destinations/cockroachdb.mdx
@@ -13,7 +13,7 @@ You can test out this process on your own device by using the `cockroach-sink` d
* If you are running RisingWave locally from binaries and intend to use the native CDC source connectors or the JDBC sink connector, make sure you have [JDK 11](https://openjdk.org/projects/jdk/11/) or a later version installed in your environment.
## Syntax and Parameters
-Since CockroachDB is PostgreSQL-compatible, the syntax and parameters are the same as creating a PostgreSQL sink. For the syntax and parameters, see [Create a sink](/docs/current/sink-to-postgres/#create-a-sink%E2%80%8B).
+Since CockroachDB is PostgreSQL-compatible, the syntax and parameters are the same as creating a PostgreSQL sink. For the syntax and parameters, see [Create a sink](/integrations/destinations/postgresql#create-a-sink%E2%80%8B).
## Examples
diff --git a/integrations/destinations/elasticsearch.mdx b/integrations/destinations/elasticsearch.mdx
index a44be2cf..92b175f1 100644
--- a/integrations/destinations/elasticsearch.mdx
+++ b/integrations/destinations/elasticsearch.mdx
@@ -19,7 +19,7 @@ The Elasticsearch sink connector in RisingWave provides at-least-once delivery s
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/integrations/destinations/mongodb.mdx b/integrations/destinations/mongodb.mdx
index c0671d29..540324a6 100644
--- a/integrations/destinations/mongodb.mdx
+++ b/integrations/destinations/mongodb.mdx
@@ -7,7 +7,7 @@ description: "This guide describes how to sink data from RisingWave to MongoDB.
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Syntax
diff --git a/integrations/destinations/nats-and-nats-jetstream.mdx b/integrations/destinations/nats-and-nats-jetstream.mdx
index eb6c398a..fa9477ab 100644
--- a/integrations/destinations/nats-and-nats-jetstream.mdx
+++ b/integrations/destinations/nats-and-nats-jetstream.mdx
@@ -10,7 +10,7 @@ description: "This guide describes how to sink data from RisingWave to NATS subj
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/integrations/destinations/postgresql.mdx b/integrations/destinations/postgresql.mdx
index ec5cc673..2ffb5404 100644
--- a/integrations/destinations/postgresql.mdx
+++ b/integrations/destinations/postgresql.mdx
@@ -146,7 +146,7 @@ GROUP BY
```
### Sink from RisingWave
-Use the following query to sink data from the materialized view to the target table in PostgreSQL. Ensure that the `jdbc_url` is accurate and reflects the PostgreSQL database that you are connecting to. See [CREATE SINK](/docs/current/sql-create-sink/) for more details.
+Use the following query to sink data from the materialized view to the target table in PostgreSQL. Ensure that the `jdbc_url` is accurate and reflects the PostgreSQL database that you are connecting to. See [CREATE SINK](/sql/commands/sql-create-sink) for more details.
```sql
CREATE SINK target_count_postgres_sink FROM target_count WITH (
diff --git a/integrations/destinations/snowflake.mdx b/integrations/destinations/snowflake.mdx
index 3b0e6fd6..fa410e0c 100644
--- a/integrations/destinations/snowflake.mdx
+++ b/integrations/destinations/snowflake.mdx
@@ -17,7 +17,7 @@ This feature is only available in the premium edition of RisingWave. The premium
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisite
diff --git a/integrations/destinations/supabase.mdx b/integrations/destinations/supabase.mdx
index 7be8f617..ba5b807e 100644
--- a/integrations/destinations/supabase.mdx
+++ b/integrations/destinations/supabase.mdx
@@ -12,7 +12,7 @@ Before creating a sink in RisingWave, create a project and target table to sink
## Sink data to Supabase
-As Supabase is compatible with PostgreSQL, you can sink data to Supabase the same way you would sink data to PostgreSQL with the JDBC connector. For the syntax, parameters, and examples, see [Sink data from RisingWave to PostgreSQL](/docs/current/sink-to-postgres/).
+As Supabase is compatible with PostgreSQL, you can sink data to Supabase the same way you would sink data to PostgreSQL with the JDBC connector. For the syntax, parameters, and examples, see [Sink data from RisingWave to PostgreSQL](/integrations/destinations/postgresql).
The following SQL command creates a sink, `promotion_update`, that sinks data from the materialized view, `product_calc_mv`, in RisingWave, to the `promotions` table in Supabase. The columns of the materialized view must match the columns of the table in Supabase.
diff --git a/integrations/destinations/tidb.mdx b/integrations/destinations/tidb.mdx
index 62f7100a..d218ca4e 100644
--- a/integrations/destinations/tidb.mdx
+++ b/integrations/destinations/tidb.mdx
@@ -4,7 +4,7 @@ description: "As TiDB is compatible with MySQL, you can sink data to TiDB the sa
sidebarTitle: TiDB
---
-For the syntax, settings, and examples, see [Sink data from RisingWave to MySQL with the JDBC connector](/docs/current/sink-to-mysql-with-jdbc/).
+For the syntax, settings, and examples, see [Sink data from RisingWave to MySQL with the JDBC connector](/integrations/destinations/mysql).
### Data type mapping
diff --git a/integrations/sources/apache-iceberg.mdx b/integrations/sources/apache-iceberg.mdx
index 0d362d20..5b007873 100644
--- a/integrations/sources/apache-iceberg.mdx
+++ b/integrations/sources/apache-iceberg.mdx
@@ -7,7 +7,7 @@ sidebarTitle: Apache Iceberg
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Syntax
diff --git a/integrations/sources/citus-cdc.mdx b/integrations/sources/citus-cdc.mdx
index 44a95929..4b7d2cbe 100644
--- a/integrations/sources/citus-cdc.mdx
+++ b/integrations/sources/citus-cdc.mdx
@@ -9,7 +9,7 @@ Citus database is an extension to PostgreSQL that transforms PostgreSQL into a d
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Set up Citus
diff --git a/integrations/sources/postgresql-cdc.mdx b/integrations/sources/postgresql-cdc.mdx
index 40502ada..f0b5072a 100644
--- a/integrations/sources/postgresql-cdc.mdx
+++ b/integrations/sources/postgresql-cdc.mdx
@@ -416,7 +416,7 @@ And this it the output of `DESCRIBE supplier;`
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
RisingWave supports ingesting data from a partitioned table. To configure a publication for your CDC stream, note that PostgreSQL, by default, creates publications with `publish_via_partition_root = false`. This setting causes replication slot events to contain separate events for each partition, rather than for the root partitioned table.
diff --git a/performance/performance-best-practices.mdx b/performance/performance-best-practices.mdx
index b4548453..a583ac49 100644
--- a/performance/performance-best-practices.mdx
+++ b/performance/performance-best-practices.mdx
@@ -78,7 +78,7 @@ This append-only versus non-append-only difference can make an impact in a few u
2. Deduplication.
3. Join.
-This is an advanced feature that is still in the [public preview stage](/product-lifecycle/#features-in-the-public-preview-stage), which may or may not exist in the future version of RisingWave. Feel free to raise the question in RisingWave’s Slack channel before making a decision.
+This is an advanced feature that is still in the [public preview stage](/changelog/product-lifecycle#features-in-the-public-preview-stage), which may or may not exist in the future version of RisingWave. Feel free to raise the question in RisingWave’s Slack channel before making a decision.
## How to monitor the progress of direct CDC
diff --git a/processing/emit-on-window-close.mdx b/processing/emit-on-window-close.mdx
index e385c055..07c56877 100644
--- a/processing/emit-on-window-close.mdx
+++ b/processing/emit-on-window-close.mdx
@@ -5,7 +5,7 @@ title: "Emit on window close"
**Public Preview**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
In streaming systems, there are typically two types of triggering policies for window calculations:
diff --git a/processing/sql/joins.mdx b/processing/sql/joins.mdx
index 61513613..7fa2fd9e 100644
--- a/processing/sql/joins.mdx
+++ b/processing/sql/joins.mdx
@@ -65,7 +65,7 @@ A full outer join (or simply, full join) returns all rows when there is a match
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
An ASOF join returns the nearest record in a reference table based on the event time or any ordered properties.
@@ -172,7 +172,7 @@ The syntax of a window join is:
JOIN ON ;
```
-One of the `join_conditions` must be an equality condition based on the watermarks of the two table expressions. For the syntax of ``, see [Time window functions](/docs/current/sql-function-time-window/).
+One of the `join_conditions` must be an equality condition based on the watermarks of the two table expressions. For the syntax of ``, see [Time window functions](/processing/sql/time-windows).
For example, suppose you have these two sources:
diff --git a/processing/time-travel-queries.mdx b/processing/time-travel-queries.mdx
index aff430fe..c69b2b3f 100644
--- a/processing/time-travel-queries.mdx
+++ b/processing/time-travel-queries.mdx
@@ -12,7 +12,7 @@ This feature is exclusive to RisingWave Premium Edition that offers advanced cap
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/reference/key-concepts.mdx b/reference/key-concepts.mdx
index a132a3a0..d23adf71 100644
--- a/reference/key-concepts.mdx
+++ b/reference/key-concepts.mdx
@@ -47,7 +47,7 @@ Parallelism refers to the technique of simultaneously executing multiple databas
### Sinks[](#sinks "Direct link to Sinks")
-A sink is an external target to which you can send data. RisingWave now supports exporting data to Kafka topics. Before you stream data out of RisingWave to a sink, you need to create a sink using the [CREATE SINK](/docs/current/sql-create-sink/) statement to establish the connection.
+A sink is an external target to which you can send data. RisingWave now supports exporting data to Kafka topics. Before you stream data out of RisingWave to a sink, you need to create a sink using the [CREATE SINK](/sql/commands/sql-create-sink) statement to establish the connection.
### Sources[](#sources "Direct link to Sources")
diff --git a/sql/commands/overview.mdx b/sql/commands/overview.mdx
index ad57faa9..4c8f285d 100644
--- a/sql/commands/overview.mdx
+++ b/sql/commands/overview.mdx
@@ -55,7 +55,7 @@ sidebarTitle: Overview
>
Modify the properties of a schema.
- Modify the properties of a sink. Modify the properties of a source. Modify a server configuration parameter. Modify the properties of a table. Modify the properties of a user. Modify the properties of a view. Convert stream into an append-only changelog. Start a transaction. Cancel specific streaming jobs. Add comments on tables or columns. Commit the current transaction. Create a user-defined aggregate function. Create a connection between VPCs. Create a new database. Create a user-defined function. Create an index on a column of a table or a materialized view to speed up data retrieval. Create a materialized view. Create a new schema. Create a secret to store credentials. Create a sink into RisingWave's table. Create a sink. Supported data sources and how to connect RisingWave to the sources. Create a table. Create a new user account. Create a non-materialized view.
+ Modify the properties of a sink. Modify the properties of a source. Modify a server configuration parameter. Modify the properties of a table. Modify the properties of a user. Modify the properties of a view. Convert stream into an append-only changelog. Start a transaction. Cancel specific streaming jobs. Add comments on tables or columns. Commit the current transaction. Create a user-defined aggregate function. Create a connection between VPCs. Create a new database. Create a user-defined function. Create an index on a column of a table or a materialized view to speed up data retrieval. Create a materialized view. Create a new schema. Create a secret to store credentials. Create a sink into RisingWave's table. Create a sink. Supported data sources and how to connect RisingWave to the sources. Create a table. Create a new user account. Create a non-materialized view. Remove rows from a table. Get information about the columns in a table, source, sink, view, or materialized view. Discard session state. Drop a user-defined aggregate function. Remove a connection. Remove a database. Drop a user-defined function. Remove an index. Remove a materialized view. Remove a schema. Drop a secret. Remove a sink. Remove a source. Remove a table. Remove a user. Drop a view. Show the execution plan of a statement. Commit pending data changes and persists updated data to storage. Grant a user privileges. Insert new rows of data into a table. Trigger recovery manually. Revoke privileges from a user. Retrieve data from a table or a materialized view. Run Data Definition Language (DDL) operations in the background. Enable or disable implicit flushes after batch operations. Set time zone. Change a run-time parameter. Show the details of your RisingWave cluster. Show columns in a table, source, sink, view or materialized view. Show existing connections. Show the query used to create the specified index. Show the query used to create the specified materialized view. Show the query used to create the specified sink. Show the query used to create the specified source. Show the query used to create the specified table. Show the query used to create the specified view. Show all cursors in the current session. Show existing databases. Show all user-defined functions. Show existing indexes from a particular table. Show internal tables to learn about the existing internal states. Show all streaming jobs. Show existing materialized views. Show the details of the system parameters. Display system current workload. Show existing schemas. Shows all sinks. Show existing sources. Show all subscription cursors in the current session. Show existing tables. Show existing views. Start a transaction. Modify existing rows in a table.
diff --git a/sql/commands/sql-cancel-jobs.mdx b/sql/commands/sql-cancel-jobs.mdx
index 3932b32d..aa96bce2 100644
--- a/sql/commands/sql-cancel-jobs.mdx
+++ b/sql/commands/sql-cancel-jobs.mdx
@@ -44,7 +44,7 @@ Id
diff --git a/sql/commands/sql-create-connection.mdx b/sql/commands/sql-create-connection.mdx
index e9193aaf..0b5ebcd4 100644
--- a/sql/commands/sql-create-connection.mdx
+++ b/sql/commands/sql-create-connection.mdx
@@ -63,4 +63,4 @@ CREATE CONNECTION connection_name WITH (
```
7. Create a source or sink with AWS PrivateLink connection.
* Use the `CREATE SOURCE/TABLE` command to create a Kafka source with PrivateLink connection. For more details, see [Create source with AWS PrivateLink connection](/docs/current/ingest-from-kafka/#create-source-with-vpc-connection).
- * Use the `CREATE SINK` command to create a Kafka sink with PrivateLink connection. For more details, see [Create sink with AWS PrivateLink connection](/docs/current/create-sink-kafka/#create-sink-with-vpc-connection).
+ * Use the `CREATE SINK` command to create a Kafka sink with PrivateLink connection. For more details, see [Create sink with AWS PrivateLink connection](/integrations/destinations/apache-kafka#create-sink-with-vpc-connection).
diff --git a/sql/commands/sql-create-sink-into.mdx b/sql/commands/sql-create-sink-into.mdx
index a8f4a085..170a7929 100644
--- a/sql/commands/sql-create-sink-into.mdx
+++ b/sql/commands/sql-create-sink-into.mdx
@@ -76,7 +76,7 @@ DROP SINK orders_sink0;
title="CREATE SINK"
icon="plus"
icontype="solid"
- href="/docs/current/sql-create-sink/"
+ href="/sql/commands/sql-create-sink"
>
Create a sink into an external target
diff --git a/sql/commands/sql-create-sink.mdx b/sql/commands/sql-create-sink.mdx
index f9761e33..4b788380 100644
--- a/sql/commands/sql-create-sink.mdx
+++ b/sql/commands/sql-create-sink.mdx
@@ -40,23 +40,23 @@ Please distinguish between the parameters set in the FORMAT and ENCODE options a
Click a sink name to see the SQL syntax, options, and sample statement of sinking data from RisingWave to the sink.
-* [Apache Doris](/docs/current/sink-to-doris/)
+* [Apache Doris](/integrations/destinations/apache-doris)
* [Apache Iceberg](/integrations/destinations/apache-iceberg)
* [AWS Kinesis](/integrations/destinations/aws-kinesis)
* [Cassandra or ScyllaDB](/integrations/destinations/cassandra-or-scylladb)
-* [ClickHouse](/docs/current/sink-to-clickhouse/)
-* [CockroachDB](/docs/current/sink-to-cockroach/)
-* [Delta Lake](/docs/current/sink-to-delta-lake/)
+* [ClickHouse](/integrations/destinations/clickhouse)
+* [CockroachDB](/integrations/destinations/cockroachdb)
+* [Delta Lake](/integrations/destinations/delta-lake)
* [Elasticsearch](/integrations/destinations/elasticsearch)
* [Google BigQuery](/integrations/destinations/bigquery)
-* [Kafka](/docs/current/create-sink-kafka/) (Supports versions 3.1.0 or later)
-* [MySQL](/docs/current/sink-to-mysql-with-jdbc/) (Supports versions 5.7 and 8.0.x)
+* [Kafka](/integrations/destinations/apache-kafka) (Supports versions 3.1.0 or later)
+* [MySQL](/integrations/destinations/mysql) (Supports versions 5.7 and 8.0.x)
* [NATS](/integrations/destinations/nats-and-nats-jetstream)
-* [PostgreSQL](/docs/current/sink-to-postgres/)
+* [PostgreSQL](/integrations/destinations/postgresql)
* [Pulsar](/integrations/destinations/apache-pulsar)
-* [Redis](/docs/current/sink-to-redis/)
-* [StarRocks](/docs/current/sink-to-starrocks/)
-* [TiDB](/docs/current/sink-to-tidb/)
+* [Redis](/integrations/destinations/redis)
+* [StarRocks](/integrations/destinations/starrocks)
+* [TiDB](/integrations/destinations/tidb)
## See also
diff --git a/sql/commands/sql-create-source.mdx b/sql/commands/sql-create-source.mdx
index a1131b4e..577146ef 100644
--- a/sql/commands/sql-create-source.mdx
+++ b/sql/commands/sql-create-source.mdx
@@ -33,7 +33,7 @@ A [generated column](/docs/current/query-syntax-generated-columns/) is defined w
Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
-To know when a data record is loaded to RisingWave, you can define a column that is generated based on the processing time (` timestamptz AS proctime()`) when creating the table or source. See also [proctime()](/docs/current/sql-function-datetime/#proctime).
+To know when a data record is loaded to RisingWave, you can define a column that is generated based on the processing time (` timestamptz AS proctime()`) when creating the table or source. See also [proctime()](/sql/functions/datetime#proctime).
For a source with schema from an external connector, use `*` to represent all columns from the external connector first, so that you can define a generated column on the source with an external connector. See the example below.
@@ -95,7 +95,7 @@ Shared source improves resource utilization and data consistency when working wi
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
### Configure
diff --git a/sql/commands/sql-create-table.mdx b/sql/commands/sql-create-table.mdx
index 0a7676b5..9844a045 100644
--- a/sql/commands/sql-create-table.mdx
+++ b/sql/commands/sql-create-table.mdx
@@ -42,7 +42,7 @@ CREATE TABLE [ IF NOT EXISTS ] table_name (
- The syntax for creating a table with connector settings and the supported connectors are the same as for creating a source. See [CREATE SOURCE](/docs/current/sql-create-source/) for a full list of supported connectors and data formats.
-- To know when a data record is loaded to RisingWave, you can define a column that is generated based on the processing time (` timestamptz AS proctime()`) when creating the table or source. See also [proctime()](/docs/current/sql-function-datetime/#proctime).
+- To know when a data record is loaded to RisingWave, you can define a column that is generated based on the processing time (` timestamptz AS proctime()`) when creating the table or source. See also [proctime()](/sql/functions/datetime#proctime).
- For a table with schema from external connector, use `*` to represent all columns from the external connector first, so that you can define a generated column on table with an external connector. See the example below
@@ -74,7 +74,7 @@ CREATE TABLE [ IF NOT EXISTS ] table_name (
| DEFAULT | The DEFAULT clause allows you to assign a default value to a column. This default value is used when a new row is inserted, and no explicit value is provided for that column. default\_expr is any constant value or variable-free expression that does not reference other columns in the current table or involve subqueries. The data type of default\_expr must match the data type of the column. |
| generation\_expression | The expression for the generated column. For details about generated columns, see [Generated columns](/docs/current/query-syntax-generated-columns/). |
| watermark\_clause | A clause that defines the watermark for a timestamp column. The syntax is WATERMARK FOR column\_name as expr. For the watermark clause to be valid, the table must be an append-only table. That is, the APPEND ONLY option must be specified. This restriction only applies to a table. For details about watermarks, refer to [Watermarks](/docs/current/watermarks/). |
-| APPEND ONLY | When this option is specified, the table will be created as an append-only table. An append-only table cannot have primary keys. UPDATE and DELETE statements are not valid for append-only tables. Note that append-only tables is in the [public preview stage](/product-lifecycle/#features-in-the-public-preview-stage). |
+| APPEND ONLY | When this option is specified, the table will be created as an append-only table. An append-only table cannot have primary keys. UPDATE and DELETE statements are not valid for append-only tables. Note that append-only tables is in the [public preview stage](/changelog/product-lifecycle#features-in-the-public-preview-stage). |
| ON CONFLICT | Specify the alternative action when the newly inserted record brings a violation of PRIMARY KEY constraint on the table. See [PK conflict behavior](#pk-conflict-behavior) below for more information. |
| **INCLUDE** clause | Extract fields not included in the payload as separate columns. For more details on its usage, see [INCLUDE clause](/docs/current/include-clause/). |
| **WITH** clause | Specify the connector settings here if trying to store all the source data. See the [Data ingestion](/docs/current/data-ingestion/) page for the full list of supported source as well as links to specific connector pages detailing the syntax for each source. |
@@ -101,7 +101,7 @@ The action could one of the following. A column not in the primary key can be sp
**PUBLIC PREVIEW**
-`VERSION COLUMN` is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+`VERSION COLUMN` is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
diff --git a/sql/commands/sql-drop-sink.mdx b/sql/commands/sql-drop-sink.mdx
index 5bdcaaa8..abdcfe35 100644
--- a/sql/commands/sql-drop-sink.mdx
+++ b/sql/commands/sql-drop-sink.mdx
@@ -1,6 +1,6 @@
---
title: "DROP SINK"
-description: "Use the `DROP SINK` command to remove a [sink](/docs/current/sql-create-sink/) if you no longer need to deliver data to the sink."
+description: "Use the `DROP SINK` command to remove a [sink](/sql/commands/sql-create-sink) if you no longer need to deliver data to the sink."
---
## Syntax
@@ -37,7 +37,7 @@ DROP SINK IF EXISTS rw_schema.rw_sink;
title="CREATE SINK"
icon="database"
iconType="solid"
- href="/docs/current/sql-create-sink/"
+ href="/sql/commands/sql-create-sink"
>
Create a sink
diff --git a/sql/commands/sql-set-background-ddl.mdx b/sql/commands/sql-set-background-ddl.mdx
index 5e139215..08f3c2c9 100644
--- a/sql/commands/sql-set-background-ddl.mdx
+++ b/sql/commands/sql-set-background-ddl.mdx
@@ -6,7 +6,7 @@ description: "Data Definition Language (DDL) commands, such as creating material
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
You can use the `SET BACKGROUND_DDL` command to run DDL commands in the background.
@@ -34,7 +34,7 @@ SET BACKGROUND_DDL = { true | false };
title="CREATE SINK"
icon="database"
iconType="solid"
- href="/docs/current/sql-create-sink/"
+ href="/sql/commands/sql-create-sink"
horizontal
/>
diff --git a/sql/commands/sql-show-create-sink.mdx b/sql/commands/sql-show-create-sink.mdx
index fd3cbd45..8bf55e06 100644
--- a/sql/commands/sql-show-create-sink.mdx
+++ b/sql/commands/sql-show-create-sink.mdx
@@ -22,7 +22,7 @@ SHOW CREATE SINK sink_name;
title="CREATE SINK"
icon="database"
iconType="solid"
- href="/docs/current/sql-create-sink/"
+ href="/sql/commands/sql-create-sink"
>
Create a sink
diff --git a/sql/commands/sql-show-jobs.mdx b/sql/commands/sql-show-jobs.mdx
index 772fe428..fac02295 100644
--- a/sql/commands/sql-show-jobs.mdx
+++ b/sql/commands/sql-show-jobs.mdx
@@ -34,7 +34,7 @@ SHOW JOBS;
title="Monitor statement progress"
icon="chart-line"
iconType="solid"
- href="/docs/current/monitor-statement-progress/"
+ href="/operate/monitor-statement-progress"
/>
**PUBLIC PREVIEW**
-Read-only transactions is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+Read-only transactions is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Syntax
diff --git a/sql/data-types/map-type.mdx b/sql/data-types/map-type.mdx
index 01b5503f..9cec42cb 100644
--- a/sql/data-types/map-type.mdx
+++ b/sql/data-types/map-type.mdx
@@ -10,7 +10,7 @@ title: "Map type"
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Define a map
diff --git a/sql/functions/aggregate.mdx b/sql/functions/aggregate.mdx
index 8570fc90..baf9348f 100644
--- a/sql/functions/aggregate.mdx
+++ b/sql/functions/aggregate.mdx
@@ -252,7 +252,7 @@ If NULL is provided, the function will not calculate a specific percentile and r
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/product-lifecycle/#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
Returns an approximate value of the specified percentile from a numeric column.
diff --git a/sql/query-syntax/generated-columns.mdx b/sql/query-syntax/generated-columns.mdx
index 56c56119..c5f2f383 100644
--- a/sql/query-syntax/generated-columns.mdx
+++ b/sql/query-syntax/generated-columns.mdx
@@ -26,4 +26,4 @@ To create a generated column as the processing time of a message, use the `proct
CREATE TABLE t1 (v1 int, proc_time timestamptz as proctime());
```
-See also [proctime()](/docs/current/sql-function-datetime/#proctime).
+See also [proctime()](/sql/functions/datetime#proctime).
diff --git a/sql/system-catalogs/rw-catalog.mdx b/sql/system-catalogs/rw-catalog.mdx
index 6487dd68..5f8eab26 100644
--- a/sql/system-catalogs/rw-catalog.mdx
+++ b/sql/system-catalogs/rw-catalog.mdx
@@ -82,7 +82,7 @@ SELECT name, initialized_at, created_at FROM rw_sources;
| rw\_connections | Contains details about the connections available in the database, such as their IDs, names, owners, types, and more. |
| rw\_databases | Contains information about the databases available in the database, such as the IDs, names, and owners. |
| rw\_depend | Contains the dependency relationships between tables, indexes, views, materialized views, sources, and sinks. |
-| rw\_ddl\_progress | Contains the progress of running DDL statements. You can use this relation to view the progress of running DDL statements. For details, see [Monitor statement progress](/docs/current/monitor-statement-progress/). |
+| rw\_ddl\_progress | Contains the progress of running DDL statements. You can use this relation to view the progress of running DDL statements. For details, see [Monitor statement progress](/operate/monitor-statement-progress). |
| rw\_description | Contains optional descriptions (comments) for each database object. Descriptions can be added with the [COMMENT ON](/docs/current/sql-comment-on/) command and viewed with DESCRIBE or SHOW COLUMNS FROM command. |
| rw\_event\_logs | Contains information about events, including event IDs, timestamps, event types, and additional information if available. |
| rw\_fragment\_id\_to\_ddl | Contains information about the database schema change operations (DDL) and their corresponding fragment\_id identifiers. The outputs include fragment IDs, job IDs, schema IDs, DDL types, and names of the affected object. |
From 0e5c509bfb677254ee1d0531d3b36fe2cc13ab45 Mon Sep 17 00:00:00 2001
From: WanYixian
Date: Tue, 26 Nov 2024 12:50:57 +0800
Subject: [PATCH 07/10] save work
---
cloud/connect-to-a-project.mdx | 2 +-
cloud/create-a-connection.mdx | 2 +-
cloud/develop-overview.mdx | 8 ++++----
demos/fast-twitter-events-processing.mdx | 2 +-
deploy/migrate-to-sql-backend.mdx | 2 +-
deploy/node-specific-configurations.mdx | 6 +++---
deploy/risingwave-docker-compose.mdx | 4 ++--
deploy/risingwave-k8s-helm.mdx | 6 +++---
deploy/risingwave-kubernetes.mdx | 4 ++--
faq/faq-using-risingwave.mdx | 4 ++--
faq/risingwave-flink-comparison.mdx | 6 +++---
get-started/quickstart.mdx | 8 ++++----
get-started/rw-premium-edition-intro.mdx | 4 ++--
get-started/use-cases.mdx | 2 +-
ingestion/change-data-capture-with-risingwave.mdx | 2 +-
ingestion/format-and-encode-parameters.mdx | 2 +-
ingestion/generate-test-data.mdx | 6 +++---
ingestion/modify-source-or-table-schemas.mdx | 14 +++++++-------
ingestion/overview.mdx | 6 +++---
ingestion/supported-sources-and-formats.mdx | 8 ++++----
integrations/destinations/apache-doris.mdx | 4 ++--
integrations/destinations/snowflake.mdx | 2 +-
integrations/destinations/sql-server.mdx | 2 +-
integrations/destinations/starrocks.mdx | 2 +-
integrations/destinations/tidb.mdx | 2 +-
integrations/sources/amazon-msk.mdx | 2 +-
integrations/sources/confluent-cloud.mdx | 2 +-
integrations/sources/mysql-cdc.mdx | 2 +-
integrations/sources/overview.mdx | 2 +-
integrations/sources/postgresql-cdc.mdx | 2 +-
integrations/sources/redhat-amq-streams.mdx | 2 +-
integrations/sources/redpanda.mdx | 2 +-
integrations/sources/sql-server-cdc.mdx | 2 +-
integrations/visualization/overview.mdx | 2 +-
operate/alter-streaming.mdx | 2 +-
.../manage-a-large-number-of-streaming-jobs.mdx | 4 ++--
operate/meta-backup.mdx | 2 +-
processing/time-travel-queries.mdx | 4 ++--
processing/watermarks.mdx | 2 +-
reference/key-concepts.mdx | 2 +-
sql/commands/overview.mdx | 4 ++--
sql/commands/sql-alter-source.mdx | 4 ++--
sql/commands/sql-alter-system.mdx | 2 +-
sql/commands/sql-create-aggregate.mdx | 2 +-
sql/commands/sql-create-connection.mdx | 2 +-
sql/commands/sql-create-function.mdx | 2 +-
sql/commands/sql-create-source.mdx | 8 ++++----
sql/commands/sql-drop-aggregate.mdx | 2 +-
sql/commands/sql-drop-function.mdx | 4 ++--
sql/commands/sql-show-functions.mdx | 2 +-
sql/commands/sql-show-parameters.mdx | 2 +-
sql/query-syntax/value-exp.mdx | 2 +-
sql/system-catalogs/rw-catalog.mdx | 2 +-
troubleshoot/meta-failure.mdx | 7 +++----
troubleshoot/node-failure.mdx | 2 +-
troubleshoot/overview.mdx | 2 +-
troubleshoot/troubleshoot-oom.mdx | 2 +-
troubleshoot/troubleshoot-recovery-failure.mdx | 4 ++--
58 files changed, 99 insertions(+), 100 deletions(-)
diff --git a/cloud/connect-to-a-project.mdx b/cloud/connect-to-a-project.mdx
index 0b6cd9d1..f9d165c6 100644
--- a/cloud/connect-to-a-project.mdx
+++ b/cloud/connect-to-a-project.mdx
@@ -26,7 +26,7 @@ To connect with any local clients, follow the steps below:
* Alternatively, you can create a new user, RisingWave Cloud offers `psql`, `Connection String`, `Parameters Only`, `Java`, `Node.js`, `Python`, and `Golang` as connection options.
-To connect via `psql`, you need to [Install psql](/docs/current/install-psql-without-postgresql/) in your environment. `psql` is a command-line interface for interacting with PostgreSQL databases, including RisingWave.
+To connect via `psql`, you need to [Install psql](/deploy/install-psql-without-postgresql) in your environment. `psql` is a command-line interface for interacting with PostgreSQL databases, including RisingWave.
3. You may need to set up a CA certificate to enable SSL connections. See the instructions displayed on the portal for more details.
diff --git a/cloud/create-a-connection.mdx b/cloud/create-a-connection.mdx
index eafbd83b..374a255a 100644
--- a/cloud/create-a-connection.mdx
+++ b/cloud/create-a-connection.mdx
@@ -63,4 +63,4 @@ We aim to automate this process in the future to make it even easier.
Now, you can create a source or sink with the PrivateLink connection using SQL.
-For details on how to use the VPC endpoint to create a source with the PrivateLink connection, see [Create source with PrivateLink connection](/docs/current/ingest-from-kafka/#create-source-with-privatelink-connection); for creating a sink, see [Create sink with PrivateLink connection](/integrations/destinations/apache-kafka#create-sink-with-privatelink-connection).
+For details on how to use the VPC endpoint to create a source with the PrivateLink connection, see [Create source with PrivateLink connection](/integrations/sources/kafka#create-source-with-privatelink-connection); for creating a sink, see [Create sink with PrivateLink connection](/integrations/destinations/apache-kafka#create-sink-with-privatelink-connection).
diff --git a/cloud/develop-overview.mdx b/cloud/develop-overview.mdx
index 9efd8e46..3288f42c 100644
--- a/cloud/develop-overview.mdx
+++ b/cloud/develop-overview.mdx
@@ -48,7 +48,7 @@ Select the version of the corresponding docs when using the RisingWave user docs
### Ecosystem
- See how RisingWave can integrate with your existing data stack. Vote for your favorite data tools and streaming services to help us prioritize the integration development.
+ See how RisingWave can integrate with your existing data stack. Vote for your favorite data tools and streaming services to help us prioritize the integration development. Connect to and ingest data from external sources such as databases and message brokers. See supported data sources. Stream processed data out of RisingWave to message brokers and databases. See supported data destinations.
@@ -90,14 +90,14 @@ Select the version of the corresponding docs when using the RisingWave user docs
RisingWave offers support for popular PostgreSQL drivers, enabling seamless integration with your applications for interacting with it.
Go
diff --git a/demos/fast-twitter-events-processing.mdx b/demos/fast-twitter-events-processing.mdx
index 3e1d0487..89d9b816 100644
--- a/demos/fast-twitter-events-processing.mdx
+++ b/demos/fast-twitter-events-processing.mdx
@@ -92,7 +92,7 @@ CREATE SOURCE twitter (
) FORMAT PLAIN ENCODE JSON;
```
-Note that the SQL statement uses the STRUCT data type. For details about the STRUCT data type, please see [Data types](/docs/current/sql-data-types/).
+Note that the SQL statement uses the STRUCT data type. For details about the STRUCT data type, please see [Data types](/sql/data-types/overview).
## Step 3: Define a materialized view and analyze data
diff --git a/deploy/migrate-to-sql-backend.mdx b/deploy/migrate-to-sql-backend.mdx
index cbda6add..2a607050 100644
--- a/deploy/migrate-to-sql-backend.mdx
+++ b/deploy/migrate-to-sql-backend.mdx
@@ -32,7 +32,7 @@ Make sure the SQL backend service is operational and you have the necessary cred
### Back up etcd data
-The migration process from etcd to a SQL backend is performed offline, so we recommend taking a backup of your current etcd data to avoid any data loss before the migration. Refer to the [meta-backup](/docs/current/meta-backup/) for detailed instructions.
+The migration process from etcd to a SQL backend is performed offline, so we recommend taking a backup of your current etcd data to avoid any data loss before the migration. Refer to the [meta-backup](/operate/meta-backup) for detailed instructions.
## Procedure
diff --git a/deploy/node-specific-configurations.mdx b/deploy/node-specific-configurations.mdx
index 16c46e97..bc682bf1 100644
--- a/deploy/node-specific-configurations.mdx
+++ b/deploy/node-specific-configurations.mdx
@@ -59,7 +59,7 @@ recent_filter_rotate_interval_ms = 10000
When setting up configurations, please be extra careful with those items prefixed by `unsafe_`. Typically these configurations can cause system or data damage if wrongly configured. You may want to contact our technical support before changing the `unsafe_` prefixed configurations.
### System configurations
-System configurations are used to **initialize** the [system parameters](/docs/current/view-configure-system-parameters/) at the first startup. Once the system has started, the system parameters are managed by Meta service and can be altered using the `ALTER SYSTEM SET` command.
+System configurations are used to **initialize** the [system parameters](/operate/view-configure-system-parameters) at the first startup. Once the system has started, the system parameters are managed by Meta service and can be altered using the `ALTER SYSTEM SET` command.
Example for the system configuration section:
@@ -73,7 +73,7 @@ backup_storage_url = "minio://hummockadmin:hummockadmin@127.0.0.1:9301/hummock00
backup_storage_directory = "hummock_001/backup"
```
-For more information on system parameters, please refer to [View and configure system parameters](/docs/current/view-configure-system-parameters/).
+For more information on system parameters, please refer to [View and configure system parameters](/operate/view-configure-system-parameters).
### Streaming configurations
@@ -158,4 +158,4 @@ Below is an example of the cache refill configuration for your reference.
#### Other storage configurations
-Except for the above, RisingWave also provides some other storage configurations to help control the overall buffer and cache limits. Please see [Dedicated compute node](/docs/current/dedicated-compute-node/) for more.
+Except for the above, RisingWave also provides some other storage configurations to help control the overall buffer and cache limits. Please see [Dedicated compute node](/operate/dedicated-compute-node) for more.
diff --git a/deploy/risingwave-docker-compose.mdx b/deploy/risingwave-docker-compose.mdx
index cdbe31ed..f4bd30dc 100644
--- a/deploy/risingwave-docker-compose.mdx
+++ b/deploy/risingwave-docker-compose.mdx
@@ -6,7 +6,7 @@ description: This topic describes how to start RisingWave using Docker Compose o
In this option, RisingWave functions as an all-in-one service. All components of RisingWave, including the compute node, meta node, and compactor node, are put into a single process. They are executed in different threads, eliminating the need to start each component as a separate process.
-However, please be aware that certain critical features, such as failover and resource management, are not implemented in this mode. Therefore, this option is not recommended for production deployments. For production deployments, please consider [RisingWave Cloud](/docs/current/risingwave-cloud/), [Kubernetes with Helm](/docs/current/risingwave-k8s-helm/), or [Kubernetes with Operator](/docs/current/risingwave-kubernetes/).
+However, please be aware that certain critical features, such as failover and resource management, are not implemented in this mode. Therefore, this option is not recommended for production deployments. For production deployments, please consider [RisingWave Cloud](/deploy/risingwave-cloud), [Kubernetes with Helm](/deploy/risingwave-k8s-helm), or [Kubernetes with Operator](/deploy/risingwave-kubernetes).
This option uses a pre-defined Docker Compose configuration file to set up a RisingWave cluster.
@@ -167,7 +167,7 @@ Remember to replace the `docker-compose-with-storage_backend_name.yml` with the
## Connect to RisingWave
-After RisingWave is up and running, you need to connect to it via the Postgres interactive terminal `psql` so that you can issue queries to RisingWave and see the query results. If you don't have `psql` installed, [install psql](/docs/current/install-psql-without-postgresql/) first.
+After RisingWave is up and running, you need to connect to it via the Postgres interactive terminal `psql` so that you can issue queries to RisingWave and see the query results. If you don't have `psql` installed, [install psql](/deploy/install-psql-without-postgresql) first.
```bash
psql -h localhost -p 4566 -d dev -U root
diff --git a/deploy/risingwave-k8s-helm.mdx b/deploy/risingwave-k8s-helm.mdx
index d4cba9e2..a1cec17f 100644
--- a/deploy/risingwave-k8s-helm.mdx
+++ b/deploy/risingwave-k8s-helm.mdx
@@ -8,7 +8,7 @@ sidebarTitle: Kubernetes with Helm
* Ensure you have Helm 3.7 + installed in your environment. For details about how to install Helm, see the [Helm documentation](https://helm.sh/docs/intro/install/).
* Ensure you have [Kubernetes](https://kubernetes.io/) 1.24 or higher installed in your environment.
-* Ensure you allocate enough resources for the deployment. For details, see [Hardware requirements](/docs/current/hardware-requirements/).
+* Ensure you allocate enough resources for the deployment. For details, see [Hardware requirements](/deploy/hardware-requirements).
## Step 1: Start Kubernetes
@@ -112,7 +112,7 @@ psql -h localhost -p 4567 -d dev -U root
## Step 4: Monitor performance
-You can monitor the RisingWave cluster using the monitoring stack. For details, see [Monitoring a RisingWave cluster](/docs/current/monitor-risingwave-cluster/).
+You can monitor the RisingWave cluster using the monitoring stack. For details, see [Monitoring a RisingWave cluster](/operate/monitor-risingwave-cluster).
## Optional: Resize a node
@@ -134,4 +134,4 @@ compactorComponent:
memory: 64Mi
```
-Please note that increasing the CPU resource will not automatically increase the parallelism of existing materialized views. When scaling up (adding more CPU cores) a compute node, you should perform the scaling by following the instructions in [Cluster scaling](/docs/current/k8s-cluster-scaling/).
+Please note that increasing the CPU resource will not automatically increase the parallelism of existing materialized views. When scaling up (adding more CPU cores) a compute node, you should perform the scaling by following the instructions in [Cluster scaling](/deploy/k8s-cluster-scaling).
diff --git a/deploy/risingwave-kubernetes.mdx b/deploy/risingwave-kubernetes.mdx
index 2d7855b5..c45e0597 100644
--- a/deploy/risingwave-kubernetes.mdx
+++ b/deploy/risingwave-kubernetes.mdx
@@ -10,11 +10,11 @@ The Operator is a deployment and management system for RisingWave. It runs on to
* **[Install kubectl](http://pwittrock.github.io/docs/tasks/tools/install-kubectl/)**
Ensure that the Kubernetes command-line tool [kubectl](https://kubernetes.io/docs/reference/kubectl/) is installed in your environment.
-* **[Install psql](/docs/current/install-psql-without-postgresql/)**
+* **[Install psql](/deploy/install-psql-without-postgresql)**
Ensure that the PostgreSQL interactive terminal [psql](https://www.postgresql.org/docs/current/app-psql.html) is installed in your environment.
* **[Install and run Docker](https://docs.docker.com/get-docker/)**
Ensure that [Docker](https://docs.docker.com/desktop/) is installed in your environment and running.
-* Ensure you allocate enough resources for the deployment. For details, see [Hardware requirements](/docs/current/hardware-requirements/).
+* Ensure you allocate enough resources for the deployment. For details, see [Hardware requirements](/deploy/hardware-requirements).
## Create a Kubernetes cluster
diff --git a/faq/faq-using-risingwave.mdx b/faq/faq-using-risingwave.mdx
index 44d0156d..b18188a9 100644
--- a/faq/faq-using-risingwave.mdx
+++ b/faq/faq-using-risingwave.mdx
@@ -7,7 +7,7 @@ mode: wide
Don't worry, this is by design. RisingWave uses memory for in-memory cache of streaming queries, such as data structures like hash tables, etc., to optimize streaming computation performance. By default, RisingWave will utilize all available memory (unless specifically configured through `RW_TOTAL_MEMORY_BYTES`/`--total-memory-bytes`). This is why setting memory limits is required in Kubernetes/Docker deployments.
-During the instance running, RisingWave will keep memory usage below this limit. If you encounter unexpected issues like OOM (Out-of-memory), please refer to [Troubleshoot out-of-memory](/docs/current/troubleshoot-oom/) for assistance.
+During the instance running, RisingWave will keep memory usage below this limit. If you encounter unexpected issues like OOM (Out-of-memory), please refer to [Troubleshoot out-of-memory](/troubleshoot/troubleshoot-oom) for assistance.
As part of its design, RisingWave allocates part of the total memory in the compute node as reserved memory. This reserved memory is specifically set aside for system usage, such as the stack and code segment of processes, allocation overhead, and network buffer.
@@ -48,7 +48,7 @@ By continuously improving the reserved memory feature, we strive to offer a more
The execution time for the `CREATE MATERIALIZED VIEW` statement can vary based on several factors. Here are two common reasons:
1. **Backfilling of historical data**: RisingWave ensures consistent snapshots across materialized views (MVs). So when a new MV is created, it backfills all historical data from the upstream MV or tables and calculate them, which takes some time. And the created DDL statement will only end when the backfill ends. You can run `SHOW JOBS;` in SQL to check the DDL progress. If you want the create statement to not wait for the process to finish and not block the session, you can execute `SET BACKGROUND_DDL=true;` before running the `CREATE MATERIALIZED VIEW` statement. See details in [SET BACKGROUND\_DDL](/sql/commands/sql-set-background-ddl). But please notice that the newly created MV is still invisible in the catalog until the end of backfill when `BACKGROUND_DDL=true`.
-2. **High cluster latency**: If the cluster experiences high latency, it may take longer to apply changes to the streaming graph. If the `Progress` in the `SHOW JOBS;` result stays at 0.0%, high latency could be the cause. See details in [Troubleshoot high latency](/docs/current/troubleshoot-high-latency/)
+2. **High cluster latency**: If the cluster experiences high latency, it may take longer to apply changes to the streaming graph. If the `Progress` in the `SHOW JOBS;` result stays at 0.0%, high latency could be the cause. See details in [Troubleshoot high latency](/troubleshoot/troubleshoot-high-latency)
Memory usage is divided into the following components:
diff --git a/faq/risingwave-flink-comparison.mdx b/faq/risingwave-flink-comparison.mdx
index 4b207455..c14021d8 100644
--- a/faq/risingwave-flink-comparison.mdx
+++ b/faq/risingwave-flink-comparison.mdx
@@ -8,7 +8,7 @@ We periodically update this article to keep up with the rapidly evolving landsca
## Summary
-| Apache Flink | RisingWave | |
+| | Apache Flink | RisingWave |
| :------------------------------- | :-------------------------------------------------------------------- | :------------------------------------------------------------------------- |
| Version | 1.17 | Latest version |
| License | Apache License 2.0 | Apache License 2.0 |
@@ -82,7 +82,7 @@ RisingWave is a SQL streaming database that offers PostgreSQL-style SQL to its u
Apache Flink is a programming framework that does not support any language clients. To use Apache Flink, users must either write Java/Scala/Python programs or use Flink’s own SQL client.
-RisingWave is compatible with the PostgreSQL wire protocol and can work with the majority of PostgreSQL's client libraries. This means that RisingWave can communicate in any programming language that is supported by the [PostgreSQL driver](https://wiki.postgresql.org/wiki/Client%5FLibraries), such as [Java](/docs/current/java-client-libraries/), [Python](/docs/current/python-client-libraries/), and [Node.js](/docs/current/nodejs-client-libraries/). Additionally, users can interact with RisingWave using `psql`, the official PostgreSQL terminal.
+RisingWave is compatible with the PostgreSQL wire protocol and can work with the majority of PostgreSQL's client libraries. This means that RisingWave can communicate in any programming language that is supported by the [PostgreSQL driver](https://wiki.postgresql.org/wiki/Client%5FLibraries), such as [Java](/client-libraries/java), [Python](/client-libraries/python), and [Node.js](/client-libraries/nodejs). Additionally, users can interact with RisingWave using `psql`, the official PostgreSQL terminal.
## State management
@@ -127,7 +127,7 @@ RisingWave differs from Apache Flink in that it was specifically designed for th
RisingWave can function as both a stream processing system and a database system. As a database system, it is compatible with PostgreSQL clients, making it a natural fit for the PostgreSQL ecosystem. Users can program in different languages such as Python, Java, and Node.js using existing libraries. Additionally, users can easily find tools that work with RisingWave, such as DBeaver.
-For a complete list of RisingWave integrations, see [Integrations](/docs/current/rw-integration-summary/).
+For a complete list of RisingWave integrations, check what's listed under [Integrations](/integrations/overview).
## Learning curve
diff --git a/get-started/quickstart.mdx b/get-started/quickstart.mdx
index f801e8cb..2a6688e8 100644
--- a/get-started/quickstart.mdx
+++ b/get-started/quickstart.mdx
@@ -8,7 +8,7 @@ description: "This guide aims to provide a quick and easy way to get started wit
The following options start RisingWave in the standalone mode. In this mode, data is stored in the file system and the metadata is stored in the embedded SQLite database. See [About RisingWave standalone mode](#about-risingwave-standalone-mode) for more details.
-For extensive testing or single-machine deployment, consider [starting RisingWave via Docker Compose](/docs/current/risingwave-docker-compose/). For production environments, consider [RisingWave Cloud](/docs/current/risingwave-cloud/), our fully managed service, or [deployment on Kubernetes using the Operator](/docs/current/risingwave-kubernetes/) or [Helm Chart](/docs/current/risingwave-k8s-helm/).
+For extensive testing or single-machine deployment, consider [starting RisingWave via Docker Compose](/deploy/risingwave-docker-compose). For production environments, consider [RisingWave Cloud](/deploy/risingwave-cloud), our fully managed service, or [deployment on Kubernetes using the Operator](/deploy/risingwave-kubernetes) or [Helm Chart](/deploy/risingwave-k8s-helm).
### Script installation
@@ -44,7 +44,7 @@ risingwave
## Step 2: Connect to RisingWave
-Ensure you have `psql` installed in your environment. To learn about how to install it, see [Install psql without PostgreSQL](/docs/current/install-psql-without-postgresql/).
+Ensure you have `psql` installed in your environment. To learn about how to install it, see [Install psql without PostgreSQL](/deploy/install-psql-without-postgresql).
Open a new terminal window and run:
@@ -133,13 +133,13 @@ SELECT * FROM average_exam_scores;
RisingWave standalone mode is a simplified deployment mode for RisingWave. It is designed to be minimal, easy to install, and configure.
-Unlike other deployment modes, for instance [Docker Compose](/docs/current/risingwave-docker-compose/) or [Kubernetes](/docs/current/risingwave-kubernetes/), RisingWave standalone mode starts the cluster as a single process. This means that services like `compactor`, `frontend`, `compute` and `meta` are all embedded in this process.
+Unlike other deployment modes, for instance [Docker Compose](/deploy/risingwave-docker-compose) or [Kubernetes](/deploy/risingwave-kubernetes), RisingWave standalone mode starts the cluster as a single process. This means that services like `compactor`, `frontend`, `compute` and `meta` are all embedded in this process.
For state store, we will use the embedded `LocalFs` Object Store, eliminating the need for an external service like `minio` or `s3`; for meta store, we will use the embedded `SQLite` database, eliminating the need for an external service like `etcd`.
By default, the RisingWave standalone mode will store its data in `~/risingwave`, which includes both `Metadata` and `State Data`.
-For a batteries-included setup, with `monitoring` tools and external services like `kafka` fully included, you can use [Docker Compose](/docs/current/risingwave-docker-compose/) instead. If you would like to set up these external services manually, you may check out RisingWave's [Docker Compose](https://github.com/risingwavelabs/risingwave/blob/main/docker/docker-compose.yml), and run these services using the same configurations.
+For a batteries-included setup, with `monitoring` tools and external services like `kafka` fully included, you can use [Docker Compose](/deploy/risingwave-docker-compose) instead. If you would like to set up these external services manually, you may check out RisingWave's [Docker Compose](https://github.com/risingwavelabs/risingwave/blob/main/docker/docker-compose.yml), and run these services using the same configurations.
## Configure RisingWave standalone mode
diff --git a/get-started/rw-premium-edition-intro.mdx b/get-started/rw-premium-edition-intro.mdx
index bef2e689..ac345763 100644
--- a/get-started/rw-premium-edition-intro.mdx
+++ b/get-started/rw-premium-edition-intro.mdx
@@ -24,11 +24,11 @@ RisingWave Premium 1.0 is the first major release of this new edition with sever
* Automatic schema mapping to the source tables for [PostgreSQL CDC](/integrations/sources/postgresql-cdc#automatically-map-upstream-table-schema) and [MySQL CDC](/integrations/sources/mysql-cdc#automatically-map-upstream-table-schema)
* [Automatic schema change for MySQL CDC](/integrations/sources/mysql-cdc#automatically-change-schema)
-* [AWS Glue Schema Registry](/docs/current/ingest-from-kafka/#read-schemas-from-aws-glue-schema-registry)
+* [AWS Glue Schema Registry](/integrations/sources/kafka#read-schemas-from-aws-glue-schema-registry)
### Connectors
-
+
For users who are already using these features in 1.9.x or earlier versions, rest assured that the functionality of these features will be intact if you stay on the version. If you choose to upgrade to v2.0 or later versions, an error will show up to indicate you need a license to use the features.
diff --git a/get-started/use-cases.mdx b/get-started/use-cases.mdx
index 6b87ad28..e3af57f8 100644
--- a/get-started/use-cases.mdx
+++ b/get-started/use-cases.mdx
@@ -238,7 +238,7 @@ SELECT * FROM bidding_feature_vectors WHERE ad_id = 'specific_ad_id';
4. Real-time inference
-As new bidding data arrives, you can continuously update your feature vectors and use them for real-time inference, ensuring your bids are always informed by the most recent data. For instance, you can create a [User-defined function](/docs/current/user-defined-functions/), `PREDICT_BID`, that predicts the next bid given the most recent data.
+As new bidding data arrives, you can continuously update your feature vectors and use them for real-time inference, ensuring your bids are always informed by the most recent data. For instance, you can create a [User-defined function](/sql/udfs/user-defined-functions), `PREDICT_BID`, that predicts the next bid given the most recent data.
```sql
CREATE MATERIALIZED VIEW live_predictions AS
diff --git a/ingestion/change-data-capture-with-risingwave.mdx b/ingestion/change-data-capture-with-risingwave.mdx
index 83f5bb22..d9f44475 100644
--- a/ingestion/change-data-capture-with-risingwave.mdx
+++ b/ingestion/change-data-capture-with-risingwave.mdx
@@ -9,4 +9,4 @@ You can use event streaming systems like Apache Kafka, Pulsar, or Kinesis to str
RisingWave also provides native MySQL and PostgreSQL CDC connectors. With these CDC connectors, you can ingest CDC data from these databases directly, without setting up additional services like Kafka. For complete step-to-step guides about using the native CDC connector to ingest MySQL and PostgreSQL data, see [Ingest data from MySQL](/integrations/sources/mysql-cdc) and [Ingest data from PostgreSQL](/integrations/sources/postgresql-cdc). This topic only describes the configurations for using RisingWave to ingest CDC data from an event streaming system.
-For the supported sources and corresponding formats, see [Supported sources and formats](/docs/current/supported-sources-and-formats/).
+For the supported sources and corresponding formats, see [Supported sources and formats](/ingestion/supported-sources-and-formats).
diff --git a/ingestion/format-and-encode-parameters.mdx b/ingestion/format-and-encode-parameters.mdx
index f87b46de..e83a1555 100644
--- a/ingestion/format-and-encode-parameters.mdx
+++ b/ingestion/format-and-encode-parameters.mdx
@@ -1,6 +1,6 @@
---
title: "FORMAT and ENCODE parameters"
-description: "When creating a source or table using a connector, you need to specify the `FORMAT` and `ENCODE` section of the [CREATE SOURCE](/docs/current/sql-create-source/) or [CREATE TABLE](/docs/current/sql-create-source/) statement. This topic provides an overview of the formats and encoding options. For the complete list of formats we support, see [Supported sources and formats](/docs/current/supported-sources-and-formats/)"
+description: "When creating a source or table using a connector, you need to specify the `FORMAT` and `ENCODE` section of the [CREATE SOURCE](/docs/current/sql-create-source/) or [CREATE TABLE](/docs/current/sql-create-source/) statement. This topic provides an overview of the formats and encoding options. For the complete list of formats we support, see [Supported sources and formats](/ingestion/supported-sources-and-formats)"
sidebarTitle: Formats and encoding
mode: wide
---
diff --git a/ingestion/generate-test-data.mdx b/ingestion/generate-test-data.mdx
index 5c8b70be..b34631cc 100644
--- a/ingestion/generate-test-data.mdx
+++ b/ingestion/generate-test-data.mdx
@@ -68,7 +68,7 @@ Specify the following fields for every column in the source you are creating.
| column\_parameter | Description | Value | Required? |
| :---------------- | :--------------- | :------------- | :----------------- |
| kind | Generator type. | Set to `random`. | False. Default: `random` |
-| max\_past | Specify the maximum deviation from the baseline timestamp or timestamptz to determine the earliest possible timestamp or timestamptz that can be generated. | An [interval](/docs/current/sql-data-types/). Example: `2h 37min` | False. Default: `1 day` |
+| max\_past | Specify the maximum deviation from the baseline timestamp or timestamptz to determine the earliest possible timestamp or timestamptz that can be generated. | An [interval](/sql/data-types/overview). Example: `2h 37min` | False. Default: `1 day` |
| max\_past\_mode | Specify the baseline timestamp or timestamptz. The range for generated timestamps or timestamptzs is \[base time - `max_past`, base time\] | `absolute` — The base time is set to the execution time of the generator. The base time is fixed for each generation. `relative` — The base time is the system time obtained each time a new record is generated. | False. Default: `absolute` |
| basetime | If set, the generator will ignore max\_past\_mode and use the specified time as the base time. | A [date and time string](https://docs.rs/chrono/latest/chrono/struct.DateTime.html#method.parse%5Ffrom%5Frfc3339). Example: `2023-04-01T16:39:57-08:00` | False. Default: generator execution time |
| seed | A seed number that initializes the random load generator. The sequence of the generated timestamps or timestamptzs is determined by the seed value. If given the same seed number, the generator will produce the same sequence of timestamps or timestamptzs. | A positive integer. Example: `3` | False. If not specified, a fixed sequence of timestamps or timestamptzs will be generated (if the system time is constant). |
@@ -89,7 +89,7 @@ Specify the following fields for every column in the source you are creating.
-The generator supports generating data in a [struct](/docs/current/data-type-struct/). A column of `struct` type can contain multiple nested columns of different types.
+The generator supports generating data in a [struct](/sql/data-types/struct). A column of `struct` type can contain multiple nested columns of different types.
The following statement creates a load generator source which contains one column, `v1`. `v1` consists of two nested columns `v2` and `v3`.
@@ -114,7 +114,7 @@ When you configure a nested column, use `column.nested_column` to specify it. Fo
-The generator supports generating data in an [array](/docs/current/data-type-array/). An array is a list of elements of the same type. Append `[]` to the data type of the column when creating the source.
+The generator supports generating data in an [array](/sql/data-types/array-type). An array is a list of elements of the same type. Append `[]` to the data type of the column when creating the source.
The following statement creates a load generator source which contains one column, `c1`. `c1` is an array of `varchar`.
diff --git a/ingestion/modify-source-or-table-schemas.mdx b/ingestion/modify-source-or-table-schemas.mdx
index ffcb5831..a7e2256d 100644
--- a/ingestion/modify-source-or-table-schemas.mdx
+++ b/ingestion/modify-source-or-table-schemas.mdx
@@ -21,7 +21,7 @@ Similarly, to add a column to a table, use this command:
ALTER TABLE ADD COLUMN ;
```
-For details about these two commands, see [ALTER SOURCE](/docs/current/sql-alter-source/) and [ALTER TABLE](/docs/current/sql-alter-table/).
+For details about these two commands, see [ALTER SOURCE](/sql/commands/sql-alter-source) and [ALTER TABLE](/sql/commands/sql-alter-table).
Note that you cannot add a primary key column to a source or table in RisingWave. To modify the primary key of a source or table, you need to recreate the table.
@@ -59,7 +59,7 @@ ALTER TABLE table_name DROP COLUMN column_name;
### Source
-At present, combined with the [ALTER SOURCE command](/docs/current/sql-alter-source/#format-and-encode-options), you can refresh the schema registry of a source by refilling its [FORMAT and ENCODE options](/docs/current/formats-and-encode-parameters/). The syntax is:
+At present, combined with the [ALTER SOURCE command](/sql/commands/sql-alter-source#format-and-encode-options), you can refresh the schema registry of a source by refilling its [FORMAT and ENCODE options](/ingestion/format-and-encode-parameters). The syntax is:
```sql
ALTER SOURCE source_name FORMAT data_format ENCODE data_encode [ (
@@ -95,7 +95,7 @@ ALTER SOURCE src_user FORMAT PLAIN ENCODE PROTOBUF(
Currently, it is not supported to modify the `data_format` and `data_encode`. Furthermore, when refreshing the schema registry of a source, it is not allowed to drop columns or change types.
-In addition, when the [FORMAT and ENCODE options](/docs/current/formats-and-encode-parameters/) are not changed, the `REFRESH SCHEMA` clause of `ALTER SOURCE` can also be used to refresh the schema of a source.
+In addition, when the [FORMAT and ENCODE options](/ingestion/format-and-encode-parameters) are not changed, the `REFRESH SCHEMA` clause of `ALTER SOURCE` can also be used to refresh the schema of a source.
```sql
ALTER SOURCE source_name REFRESH SCHEMA;
@@ -130,7 +130,7 @@ For more details about this example, see our [test file](https://github.com/risi
### Table
-Similarly, you can use the following statement to refresh the schema of a table with connectors. For more details, see [ALTER TABLE](/docs/current/sql-alter-table/#refresh-schema).
+Similarly, you can use the following statement to refresh the schema of a table with connectors. For more details, see [ALTER TABLE](/sql/commands/sql-alter-table#refresh-schema).
Refresh schema of table
@@ -144,6 +144,6 @@ If a downstream fragment references a column that is either missing or has under
## See also
-* [ALTER SOURCE command](/docs/current/sql-alter-source/)
-* [ALTER TABLE command](/docs/current/sql-alter-table/)
-* [ALTER SCHEMA command](/docs/current/sql-alter-schema/)
+* [ALTER SOURCE command](/sql/commands/sql-alter-source)
+* [ALTER TABLE command](/sql/commands/sql-alter-table)
+* [ALTER SCHEMA command](/sql/commands/sql-alter-schema)
diff --git a/ingestion/overview.mdx b/ingestion/overview.mdx
index 0a5e29cf..e530459b 100644
--- a/ingestion/overview.mdx
+++ b/ingestion/overview.mdx
@@ -42,7 +42,7 @@ SELECT *
FROM kafka_source;
```
-* Also, queries can be executed directly on the source, and **ad-hoc ingestion** will happen during the query's processing, see more information in [directly query Kafka](/docs/current/ingest-from-kafka/#query-kafka-timestamp).
+* Also, queries can be executed directly on the source, and **ad-hoc ingestion** will happen during the query's processing, see more information in [directly query Kafka](/integrations/sources/kafka#query-kafka-timestamp).
```sql
SELECT * FROM source_name
WHERE _rw_kafka_timestamp > now() - interval '10 minute';
@@ -125,8 +125,8 @@ INSERT INTO t1 SELECT * FROM source_iceberg_t1;
The information presented above provides a brief overview of the data ingestion process in RisingWave. To gain a more comprehensive understanding of this process, the following topics in this section will delve more deeply into the subject matter. Here is a brief introduction to what you can expect to find in each topic:
* Among different types of sources, we have abstracted a series of common syntax and features.
- * For more detailed information about the types, formats, and encoding options of sources, see [Formats and encoding](/docs/current/formats-and-encode-parameters/).
- * For the complete list of the sources and formats supported in RisingWave, see [Supported sources and formats](/docs/current/supported-sources-and-formats/).
+ * For more detailed information about the types, formats, and encoding options of sources, see [Formats and encoding](/ingestion/format-and-encode-parameters).
+ * For the complete list of the sources and formats supported in RisingWave, see [Supported sources and formats](/ingestion/supported-sources-and-formats).
* To learn about how to manage schemas and ingest additional fields from sources :
* [Modify source or table schemas](/docs/current/modify-schemas/)
* [Ingest additional fields with INCLUDE clause](/docs/current/include-clause/)
diff --git a/ingestion/supported-sources-and-formats.mdx b/ingestion/supported-sources-and-formats.mdx
index a6a644e0..6cd846ae 100644
--- a/ingestion/supported-sources-and-formats.mdx
+++ b/ingestion/supported-sources-and-formats.mdx
@@ -12,7 +12,7 @@ To ingest data in formats marked with "T", you need to create tables (with conne
| Connector | Version | Format |
| :------------ | :------------ | :------------------- |
-| [Kafka](/docs/current/ingest-from-kafka/) | 3.1.0 or later versions | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Debezium AVRO](#debezium-avro) (T), [DEBEZIUM\_MONGO\_JSON](#debezium-mongo-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T), [Upsert JSON](#upsert-json) (T), [Upsert AVRO](#upsert-avro) (T), [Bytes](#bytes) |
+| [Kafka](/integrations/sources/kafka) | 3.1.0 or later versions | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Debezium AVRO](#debezium-avro) (T), [DEBEZIUM\_MONGO\_JSON](#debezium-mongo-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T), [Upsert JSON](#upsert-json) (T), [Upsert AVRO](#upsert-avro) (T), [Bytes](#bytes) |
| [Redpanda](/docs/current/ingest-from-redpanda/) | Latest | [Avro](#avro), [JSON](#json), [protobuf](#protobuf) |
| [Pulsar](/integrations/sources/pulsar) | 2.8.0 or later versions | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) |
| [Kinesis](/docs/current/ingest-from-kinesis/) | Latest | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) |
@@ -34,7 +34,7 @@ When creating a source, you need to specify the data and encoding formats in the
### Avro
-For data in Avro format, you must specify a message and a schema registry. For Kafka data in Avro, you need to provide a Confluent Schema Registry that RisingWave can get the schema from. For more details about using Schema Registry for Kafka data, see [Read schema from Schema Registry](/docs/current/ingest-from-kafka/#read-schemas-from-schema-registry).
+For data in Avro format, you must specify a message and a schema registry. For Kafka data in Avro, you need to provide a Confluent Schema Registry that RisingWave can get the schema from. For more details about using Schema Registry for Kafka data, see [Read schema from Schema Registry](/integrations/sources/kafka#read-schemas-from-confluent-schema-registry).
`schema.registry` can accept multiple addresses. RisingWave will send requests to all URLs and return the first successful result.
@@ -65,7 +65,7 @@ Note that for `map.handling.mode = 'jsonb'`, the value types can only be: `null`
### Debezium AVRO
-When creating a source from streams in with Debezium AVRO, the schema of the source does not need to be defined in the `CREATE TABLE` statement as it can be inferred from the `SCHEMA REGISTRY`. This means that the schema file location must be specified. The schema file location can be an actual Web location, which is in `http://...`, `https://...`, or `S3://...` format, or a Confluent Schema Registry. For more details about using Schema Registry for Kafka data, see [Read schema from Schema Registry](/docs/current/ingest-from-kafka/#read-schemas-from-schema-registry).
+When creating a source from streams in with Debezium AVRO, the schema of the source does not need to be defined in the `CREATE TABLE` statement as it can be inferred from the `SCHEMA REGISTRY`. This means that the schema file location must be specified. The schema file location can be an actual Web location, which is in `http://...`, `https://...`, or `S3://...` format, or a Confluent Schema Registry. For more details about using Schema Registry for Kafka data, see [Read schema from Schema Registry](/integrations/sources/kafka#read-schemas-from-confluent-schema-registry).
`schema.registry` can accept multiple addresses. RisingWave will send requests to all URLs and return the first successful result.
@@ -183,7 +183,7 @@ ENCODE JSON [ (
### Protobuf
-For data in protobuf format, you must specify a message (fully qualified by package path) and a schema location. The schema location can be an actual Web location that is in `http://...`, `https://...`, or `S3://...` format. For Kafka data in protobuf, instead of providing a schema location, you can provide a Confluent Schema Registry that RisingWave can get the schema from. For more details about using Schema Registry for Kafka data, see [Read schema from Schema Registry](/docs/current/ingest-from-kafka/#read-schemas-from-schema-registry).
+For data in protobuf format, you must specify a message (fully qualified by package path) and a schema location. The schema location can be an actual Web location that is in `http://...`, `https://...`, or `S3://...` format. For Kafka data in protobuf, instead of providing a schema location, you can provide a Confluent Schema Registry that RisingWave can get the schema from. For more details about using Schema Registry for Kafka data, see [Read schema from Schema Registry](/integrations/sources/kafka#read-schemas-from-confluent-schema-registry).
`schema.registry` can accept multiple addresses. RisingWave will send requests to all URLs and return the first successful result.
diff --git a/integrations/destinations/apache-doris.mdx b/integrations/destinations/apache-doris.mdx
index 92d2116f..60cd8bdc 100644
--- a/integrations/destinations/apache-doris.mdx
+++ b/integrations/destinations/apache-doris.mdx
@@ -8,7 +8,7 @@ description: "This guide describes how to sink data from RisingWave to Apache Do
* Ensure that RisingWave can access the network where the Doris backend and frontend are located. For more details, see [Synchronize Data Through External Table](https://doris.apache.org/docs/dev/data-operate/import/import-scenes/external-table-load/).
* Ensure you have an upstream materialized view or source that you can sink data from. For more details, see [CREATE SOURCE](/docs/current/sql-create-source/) or [CREATE MATERIALIZED VIEW](/docs/current/sql-create-mv/).
-* Ensure that for `struct` elements, the name and type are the same in Doris and RisingWave. If they are not the same, the values will be set to `NULL` or to default values. For more details on the `struct` data type, see [Struct](/docs/current/data-type-struct/).
+* Ensure that for `struct` elements, the name and type are the same in Doris and RisingWave. If they are not the same, the values will be set to `NULL` or to default values. For more details on the `struct` data type, see [Struct](/sql/data-types/struct).
## Syntax
@@ -75,7 +75,7 @@ WITH (
## Data type mapping
-The following table shows the corresponding data types between RisingWave and Doris that should be specified when creating a sink. For details on native RisingWave data types, see [Overview of data types](/docs/current/sql-data-types/).
+The following table shows the corresponding data types between RisingWave and Doris that should be specified when creating a sink. For details on native RisingWave data types, see [Overview of data types](/sql/data-types/overview).
In regards to `decimal` types, RisingWave will round to the nearest decimal place to ensure that its precision matches that of Doris. Ensure that the length of decimal types being imported into Doris does not exceed Doris's decimal length. Otherwise, it will fail to import.
diff --git a/integrations/destinations/snowflake.mdx b/integrations/destinations/snowflake.mdx
index fa410e0c..f0c23376 100644
--- a/integrations/destinations/snowflake.mdx
+++ b/integrations/destinations/snowflake.mdx
@@ -58,7 +58,7 @@ All parameters are required unless specified otherwise.
## Data type mapping
-The following table shows the corresponding data types between RisingWave and Snowflake. For details on native RisingWave data types, see [Overview of data types](/docs/current/sql-data-types/).
+The following table shows the corresponding data types between RisingWave and Snowflake. For details on native RisingWave data types, see [Overview of data types](/sql/data-types/overview).
| RisingWave type | Snowflake type |
| :-------------- | :---------------------------------------------------------------- |
diff --git a/integrations/destinations/sql-server.mdx b/integrations/destinations/sql-server.mdx
index 2e8cc291..873bf9c7 100644
--- a/integrations/destinations/sql-server.mdx
+++ b/integrations/destinations/sql-server.mdx
@@ -46,7 +46,7 @@ WITH (
## Data type mapping
-The following table shows the corresponding data types between RisingWave and SQL Server that should be specified when creating a sink. For details on native RisingWave data types, see [Overview of data types](/docs/current/sql-data-types/).
+The following table shows the corresponding data types between RisingWave and SQL Server that should be specified when creating a sink. For details on native RisingWave data types, see [Overview of data types](/sql/data-types/overview).
| SQL Server type | RisingWave type |
| :-------------- | :-------------------------- |
diff --git a/integrations/destinations/starrocks.mdx b/integrations/destinations/starrocks.mdx
index 1e072696..85ea9b36 100644
--- a/integrations/destinations/starrocks.mdx
+++ b/integrations/destinations/starrocks.mdx
@@ -66,7 +66,7 @@ FROM bhv_mv WITH (
```
## Data type mapping
-The following table shows the corresponding data type in RisingWave that should be specified when creating a sink. For details on native RisingWave data types, see [Overview of data types](/docs/current/sql-data-types/).
+The following table shows the corresponding data type in RisingWave that should be specified when creating a sink. For details on native RisingWave data types, see [Overview of data types](/sql/data-types/overview).
| StarRocks type | RisingWave type |
| :------------- | :---------------------------------- |
diff --git a/integrations/destinations/tidb.mdx b/integrations/destinations/tidb.mdx
index d218ca4e..1c4325b6 100644
--- a/integrations/destinations/tidb.mdx
+++ b/integrations/destinations/tidb.mdx
@@ -8,7 +8,7 @@ For the syntax, settings, and examples, see [Sink data from RisingWave to MySQL
### Data type mapping
-The following table shows the corresponding data types between RisingWave and TiDB. For details on native RisingWave data types, see [Overview of data types](/docs/current/sql-data-types/).
+The following table shows the corresponding data types between RisingWave and TiDB. For details on native RisingWave data types, see [Overview of data types](/sql/data-types/overview).
| RisingWave type | TiDB type |
| :-------------- | :------------------------------------------------- |
diff --git a/integrations/sources/amazon-msk.mdx b/integrations/sources/amazon-msk.mdx
index 1e005647..7b5d8d58 100644
--- a/integrations/sources/amazon-msk.mdx
+++ b/integrations/sources/amazon-msk.mdx
@@ -163,7 +163,7 @@ psql -h localhost -p 4566 -d dev -U root
### Create a source in RisingWave
-To learn about the specific syntax used to consume data from a Kafka topic, see [Ingest data from Kafka](/docs/current/ingest-from-kafka/).
+To learn about the specific syntax used to consume data from a Kafka topic, see [Ingest data from Kafka](/integrations/sources/kafka).
For example, the following query creates a table that consumes data from an MSK topic connected to Kafka.
diff --git a/integrations/sources/confluent-cloud.mdx b/integrations/sources/confluent-cloud.mdx
index 05ca26d3..a7f0d5fd 100644
--- a/integrations/sources/confluent-cloud.mdx
+++ b/integrations/sources/confluent-cloud.mdx
@@ -54,7 +54,7 @@ Create a table in RisingWave to ingest data from the Kafka topic created in Conf
The following query will create a table that connects to the data generator created in Confluent. Remember to fill in the authentication parameters accordingly.
-See the [Ingest data from Kafka](/docs/current/ingest-from-kafka/) topic for more details on the syntax and connection parameters.
+See the [Ingest data from Kafka](/integrations/sources/kafka) topic for more details on the syntax and connection parameters.
```sql
CREATE TABLE s (
diff --git a/integrations/sources/mysql-cdc.mdx b/integrations/sources/mysql-cdc.mdx
index 4aa58ba4..7ba04220 100644
--- a/integrations/sources/mysql-cdc.mdx
+++ b/integrations/sources/mysql-cdc.mdx
@@ -275,7 +275,7 @@ cdc_offset | {"MySql": {"filename": "binlog.000005", "position": 60946679
## Data type mapping
-The following table shows the corresponding data type in RisingWave that should be specified when creating a source. For details on native RisingWave data types, see [Overview of data types](/docs/current/sql-data-types/).
+The following table shows the corresponding data type in RisingWave that should be specified when creating a source. For details on native RisingWave data types, see [Overview of data types](/sql/data-types/overview).
RisingWave data types marked with an asterisk indicate that while there is no corresponding RisingWave data type, the ingested data can still be consumed as the listed type.
diff --git a/integrations/sources/overview.mdx b/integrations/sources/overview.mdx
index 7fd667fc..a471e311 100644
--- a/integrations/sources/overview.mdx
+++ b/integrations/sources/overview.mdx
@@ -5,4 +5,4 @@ mode: wide
sidebarTitle: Overview
---
- 6 items 5 items 1 item 3 items 3 item
+ 6 items 5 items 1 item 3 items 3 item
diff --git a/integrations/sources/postgresql-cdc.mdx b/integrations/sources/postgresql-cdc.mdx
index f0b5072a..035c5d20 100644
--- a/integrations/sources/postgresql-cdc.mdx
+++ b/integrations/sources/postgresql-cdc.mdx
@@ -290,7 +290,7 @@ To check the progress of backfilling historical data, find the corresponding int
## Data type mapping
-The following table shows the corresponding data type in RisingWave that should be specified when creating a source. For details on native RisingWave data types, see [Overview of data types](/docs/current/sql-data-types/).
+The following table shows the corresponding data type in RisingWave that should be specified when creating a source. For details on native RisingWave data types, see [Overview of data types](/sql/data-types/overview).
RisingWave data types marked with an asterisk indicate that while there is no corresponding RisingWave data type, the ingested data can still be consumed as the listed type.
diff --git a/integrations/sources/redhat-amq-streams.mdx b/integrations/sources/redhat-amq-streams.mdx
index f3510dff..ac19e394 100644
--- a/integrations/sources/redhat-amq-streams.mdx
+++ b/integrations/sources/redhat-amq-streams.mdx
@@ -14,7 +14,7 @@ Before ingesting data from RedHat AMQ Streams into RisingWave, please ensure the
* Create the AMQ Streams topic from which you want to ingest data.
* Ensure that your RisingWave cluster is running.
-For example, we create a topic named `financial-transactions` with the following sample data from various financial transactions data, formatted as JSON. Each sample represents a unique transaction with distinct transaction IDs, sender and receiver accounts, amounts, currencies, and timestamps. Hence AMQ Streams is compatible with Apache Kafka. For more information, refer to [Apache Kafka](https://docs.risingwave.com/docs/current/ingest-from-kafka/).
+For example, we create a topic named `financial-transactions` with the following sample data from various financial transactions data, formatted as JSON. Each sample represents a unique transaction with distinct transaction IDs, sender and receiver accounts, amounts, currencies, and timestamps. Hence AMQ Streams is compatible with Apache Kafka. For more information, refer to [Apache Kafka](https://docs.risingwave.com/integrations/sources/kafka).
```bash
{"tx_id": "TX1004", "sender_account": "ACC1004", "receiver_account": "ACC2004", "amount": 2000.00, "currency": "USD", "tx_timestamp": "2024-03-29T12:36:00Z"}
diff --git a/integrations/sources/redpanda.mdx b/integrations/sources/redpanda.mdx
index f1cf34cd..5910ab81 100644
--- a/integrations/sources/redpanda.mdx
+++ b/integrations/sources/redpanda.mdx
@@ -5,4 +5,4 @@ mode: wide
sidebarTitle: Redpanda
---
-For the syntax, settings, and examples, see [Ingest data from Kafka](/docs/current/ingest-from-kafka/).
+For the syntax, settings, and examples, see [Ingest data from Kafka](/integrations/sources/kafka).
diff --git a/integrations/sources/sql-server-cdc.mdx b/integrations/sources/sql-server-cdc.mdx
index 6647339b..93333c9d 100644
--- a/integrations/sources/sql-server-cdc.mdx
+++ b/integrations/sources/sql-server-cdc.mdx
@@ -223,7 +223,7 @@ To check the progress of backfilling historical data, find the corresponding int
## Data type mapping
-The following table shows the corresponding data type in RisingWave that should be specified when creating a CDC table. For details on native RisingWave data types, see [Overview of data types](/docs/current/sql-data-types/).
+The following table shows the corresponding data type in RisingWave that should be specified when creating a CDC table. For details on native RisingWave data types, see [Overview of data types](/sql/data-types/overview).
RisingWave data types marked with an asterisk indicate that while there is no corresponding RisingWave data type, the ingested data can still be consumed as the listed type.
diff --git a/integrations/visualization/overview.mdx b/integrations/visualization/overview.mdx
index 4babcf58..00495113 100644
--- a/integrations/visualization/overview.mdx
+++ b/integrations/visualization/overview.mdx
@@ -4,4 +4,4 @@ description: "You can use a variety of visualization tools, such as [Apache Supe
mode: wide
---
-If the visualization tool you are using is not listed in the official RisingWave documentation, you may attempt to connect to RisingWave directly using the PostgreSQL driver. You can let us know your interest in a particular system by clicking the thumb-up button on [Integrations](/docs/current/rw-integration-summary/).
+If the visualization tool you are using is not listed in the official RisingWave documentation, you may attempt to connect to RisingWave directly using the PostgreSQL driver. You can let us know your interest in a particular system by clicking the thumb-up button on [Integrations](/integrations/overview).
diff --git a/operate/alter-streaming.mdx b/operate/alter-streaming.mdx
index b556d19d..bb289b4e 100644
--- a/operate/alter-streaming.mdx
+++ b/operate/alter-streaming.mdx
@@ -5,7 +5,7 @@ description: "This document explains how to modify the logic in streaming pipeli
## Alter a table or source
-To add or drop columns from a table or source, simply use the [ALTER TABLE](/docs/current/sql-alter-table/) or [ALTER SOURCE](/docs/current/sql-alter-source/) command. For example:
+To add or drop columns from a table or source, simply use the [ALTER TABLE](/sql/commands/sql-alter-table) or [ALTER SOURCE](/sql/commands/sql-alter-source) command. For example:
```sql
ALTER TABLE customers ADD COLUMN birth_date date;
diff --git a/operate/manage-a-large-number-of-streaming-jobs.mdx b/operate/manage-a-large-number-of-streaming-jobs.mdx
index 7809f40b..9ce034e8 100644
--- a/operate/manage-a-large-number-of-streaming-jobs.mdx
+++ b/operate/manage-a-large-number-of-streaming-jobs.mdx
@@ -25,7 +25,7 @@ The adaptive parallelism feature in version 1.7.0 ensures that every streaming j
### Limit the concurrency of creating stream jobs
-If you want to create multiple streaming jobs at once using scripts or tools such as DBT, the [system parameter](/docs/current/view-configure-system-parameters/) `max_concurrent_creating_streaming_jobs` is helpful. It controls the maximum number of streaming jobs created concurrently. However, please do not set it too high, as it may introduce excessive pressure on the cluster.
+If you want to create multiple streaming jobs at once using scripts or tools such as DBT, the [system parameter](/operate/view-configure-system-parameters) `max_concurrent_creating_streaming_jobs` is helpful. It controls the maximum number of streaming jobs created concurrently. However, please do not set it too high, as it may introduce excessive pressure on the cluster.
## Tuning an existing cluster
@@ -39,7 +39,7 @@ If the number exceeds 50000, please pay close attention and check the following
### Decrease the parallelism
-When the total number of actors in the cluster is large, excessive parallelism can be counterproductive. After v1.7.0, you can check the parallelism number of the running streaming jobs in the system table `rw_fragment_parallelism`, and you can alter the streaming jobs's parallelism with the `ALTER` statement. For more information, refer to [Cluster scaling](/docs/current/k8s-cluster-scaling/).
+When the total number of actors in the cluster is large, excessive parallelism can be counterproductive. After v1.7.0, you can check the parallelism number of the running streaming jobs in the system table `rw_fragment_parallelism`, and you can alter the streaming jobs's parallelism with the `ALTER` statement. For more information, refer to [Cluster scaling](/deploy/k8s-cluster-scaling).
Here is an example of how to adjust the parallelism.
diff --git a/operate/meta-backup.mdx b/operate/meta-backup.mdx
index 8c087959..a6ebb728 100644
--- a/operate/meta-backup.mdx
+++ b/operate/meta-backup.mdx
@@ -13,7 +13,7 @@ Before you can create a meta snapshot, you need to set the `backup_storage_url`
Be careful not to set the `backup_storage_url` and `backup_storage_directory` when there are snapshots. However, it's not strictly forbidden. If you insist on doing so, please note the snapshots taken before the setting will all be invalidated and cannot be used in restoration anymore.
-To learn about how to configure system parameters, see [How to configure system parameters](/docs/current/view-configure-system-parameters/#how-to-configure-system-parameters).
+To learn about how to configure system parameters, see [How to configure system parameters](/operate/view-configure-system-parameters#how-to-configure-system-parameters).
## Create a meta snapshot
diff --git a/processing/time-travel-queries.mdx b/processing/time-travel-queries.mdx
index c69b2b3f..87afaf18 100644
--- a/processing/time-travel-queries.mdx
+++ b/processing/time-travel-queries.mdx
@@ -17,9 +17,9 @@ This feature is in the public preview stage, meaning it's nearing the final prod
## Prerequisites
-Time travel requires the meta store type to be [SQL-compatible](/docs/current/risingwave-docker-compose/introduction#customize-meta-store). We recommend reserving at least 50 GB of disk space for the meta store.
+Time travel requires the meta store type to be [SQL-compatible](/deploy/risingwave-docker-compose#customize-meta-store). We recommend reserving at least 50 GB of disk space for the meta store.
-The system parameter `time_travel_retention_ms` controls time travel functionality. By default, it's set to `0`, which means time travel is turned off. To enable time travel, you need to [alter this system parameter](/docs/current/view-configure-system-parameters/#how-to-configure-system-parameters) to a non-zero value.
+The system parameter `time_travel_retention_ms` controls time travel functionality. By default, it's set to `0`, which means time travel is turned off. To enable time travel, you need to [alter this system parameter](/operate/view-configure-system-parameters#how-to-configure-system-parameters) to a non-zero value.
For example, you can set `time_travel_retention_ms` to `86400000` (1 day). Then historical data older than this period will be deleted and no longer accessible.
diff --git a/processing/watermarks.mdx b/processing/watermarks.mdx
index 52b2e4d7..d57cc80d 100644
--- a/processing/watermarks.mdx
+++ b/processing/watermarks.mdx
@@ -47,7 +47,7 @@ WATERMARK FOR time_col as time_col
```sql
WATERMARK FOR time_col as time_col - INTERVAL 'string' time_unit
```
-Supported `time_unit` values include: second, minute, hour, day, month, and year. For more details, see the `interval` data type under [Overview of data types](/docs/current/sql-data-types/).
+Supported `time_unit` values include: second, minute, hour, day, month, and year. For more details, see the `interval` data type under [Overview of data types](/sql/data-types/overview).
Currently, RisingWave only supports using one of the columns from the table as the watermark column. To use nested fields (e.g., fields in `STRUCT`), or perform expression evaluation on the input rows (e.g., casting data types), please refer to [generated columns](/docs/current/query-syntax-generated-columns/).
diff --git a/reference/key-concepts.mdx b/reference/key-concepts.mdx
index d23adf71..08292f80 100644
--- a/reference/key-concepts.mdx
+++ b/reference/key-concepts.mdx
@@ -43,7 +43,7 @@ A node is a logical collection of IT resources that handles specific workloads b
### Parallelism[](#parallelism "Direct link to Parallelism")
-Parallelism refers to the technique of simultaneously executing multiple database operations or queries to improve performance and increase efficiency. It involves dividing a database workload into smaller tasks and executing them concurrently on multiple processors or machines. In RisingWave, you can set the parallelism of streaming jobs, like [tables](/docs/current/sql-alter-table/#set-parallelism), [materialized views](/docs/current/sql-alter-materialized-view/#set-parallelism), and [sinks](/docs/current/sql-alter-sink/#set-parallelism).
+Parallelism refers to the technique of simultaneously executing multiple database operations or queries to improve performance and increase efficiency. It involves dividing a database workload into smaller tasks and executing them concurrently on multiple processors or machines. In RisingWave, you can set the parallelism of streaming jobs, like [tables](/sql/commands/sql-alter-table#set-parallelism), [materialized views](/docs/current/sql-alter-materialized-view/#set-parallelism), and [sinks](/docs/current/sql-alter-sink/#set-parallelism).
### Sinks[](#sinks "Direct link to Sinks")
diff --git a/sql/commands/overview.mdx b/sql/commands/overview.mdx
index 4c8f285d..1db8b32b 100644
--- a/sql/commands/overview.mdx
+++ b/sql/commands/overview.mdx
@@ -51,11 +51,11 @@ sidebarTitle: Overview
title="ALTER SCHEMA"
icon="diagram-project"
iconType="solid"
- href="/docs/current/sql-alter-schema/"
+ href="/sql/commands/sql-alter-schema"
>
Modify the properties of a schema.
- Modify the properties of a sink. Modify the properties of a source. Modify a server configuration parameter. Modify the properties of a table. Modify the properties of a user. Modify the properties of a view. Convert stream into an append-only changelog. Start a transaction. Cancel specific streaming jobs. Add comments on tables or columns. Commit the current transaction. Create a user-defined aggregate function. Create a connection between VPCs. Create a new database. Create a user-defined function. Create an index on a column of a table or a materialized view to speed up data retrieval. Create a materialized view. Create a new schema. Create a secret to store credentials. Create a sink into RisingWave's table. Create a sink. Supported data sources and how to connect RisingWave to the sources. Create a table. Create a new user account. Create a non-materialized view.
+ Modify the properties of a sink. Modify the properties of a source. Modify a server configuration parameter. Modify the properties of a table. Modify the properties of a user. Modify the properties of a view. Convert stream into an append-only changelog. Start a transaction. Cancel specific streaming jobs. Add comments on tables or columns. Commit the current transaction. Create a user-defined aggregate function. Create a connection between VPCs. Create a new database. Create a user-defined function. Create an index on a column of a table or a materialized view to speed up data retrieval. Create a materialized view. Create a new schema. Create a secret to store credentials. Create a sink into RisingWave's table. Create a sink. Supported data sources and how to connect RisingWave to the sources. Create a table. Create a new user account. Create a non-materialized view. Remove rows from a table. Get information about the columns in a table, source, sink, view, or materialized view. Discard session state. Drop a user-defined aggregate function. Remove a connection. Remove a database. Drop a user-defined function. Remove an index. Remove a materialized view. Remove a schema. Drop a secret. Remove a sink. Remove a source. Remove a table. Remove a user. Drop a view. Show the execution plan of a statement. Commit pending data changes and persists updated data to storage. Grant a user privileges. Insert new rows of data into a table. Trigger recovery manually. Revoke privileges from a user. Retrieve data from a table or a materialized view. Run Data Definition Language (DDL) operations in the background. Enable or disable implicit flushes after batch operations. Set time zone. Change a run-time parameter. Show the details of your RisingWave cluster. Show columns in a table, source, sink, view or materialized view. Show existing connections. Show the query used to create the specified index. Show the query used to create the specified materialized view. Show the query used to create the specified sink. Show the query used to create the specified source. Show the query used to create the specified table. Show the query used to create the specified view. Show all cursors in the current session. Show existing databases. Show all user-defined functions. Show existing indexes from a particular table. Show internal tables to learn about the existing internal states. Show all streaming jobs. Show existing materialized views. Show the details of the system parameters. Display system current workload. Show existing schemas. Shows all sinks. Show existing sources. Show all subscription cursors in the current session. Show existing tables. Show existing views. Start a transaction. Modify existing rows in a table.
diff --git a/sql/commands/sql-alter-source.mdx b/sql/commands/sql-alter-source.mdx
index 3a479ef9..05216969 100644
--- a/sql/commands/sql-alter-source.mdx
+++ b/sql/commands/sql-alter-source.mdx
@@ -34,7 +34,7 @@ ALTER SOURCE src1
```
-* To alter columns in a source created with a schema registry, see [FORMAT and ENCODE options](/docs/current/sql-alter-source/#format-and-encode-options).
+* To alter columns in a source created with a schema registry, see [FORMAT and ENCODE options](/sql/commands/sql-alter-source#format-and-encode-options).
* You cannot add a primary key column to a source or table in RisingWave. To modify the primary key of a source or table, you need to recreate the table.
* You cannot remove a column from a source in RisingWave. If you intend to remove a column from a source, you'll need to drop the source and create the source again.
@@ -93,7 +93,7 @@ ALTER SOURCE test_source SET SCHEMA test_schema;
### `FORMAT and ENCODE options`
-At present, combined with the `ALTER SOURCE` command, you can refresh the schema registry of a source by refilling the FORMAT and ENCODE options. For more details about these options, see [FORMAT and ENCODE parameters](/docs/current/formats-and-encode-parameters/).
+At present, combined with the `ALTER SOURCE` command, you can refresh the schema registry of a source by refilling the FORMAT and ENCODE options. For more details about these options, see [FORMAT and ENCODE parameters](/ingestion/format-and-encode-parameters).
```sql
ALTER SOURCE source_name FORMAT data_format ENCODE data_encode [ (
diff --git a/sql/commands/sql-alter-system.mdx b/sql/commands/sql-alter-system.mdx
index 42dacf5e..057336d0 100644
--- a/sql/commands/sql-alter-system.mdx
+++ b/sql/commands/sql-alter-system.mdx
@@ -3,7 +3,7 @@ title: "ALTER SYSTEM"
description: "The `ALTER SYSTEM` command modifies the value of a server configuration parameter."
---
-You can use this command to configure some parameters, like the [system parameters](/docs/current/view-configure-system-parameters/#how-to-configure-system-parameters) and [runtime parameters](/docs/current/view-configure-runtime-parameters/#how-to-configure-runtime-parameters).
+You can use this command to configure some parameters, like the [system parameters](/operate/view-configure-system-parameters#how-to-configure-system-parameters) and [runtime parameters](/docs/current/view-configure-runtime-parameters/#how-to-configure-runtime-parameters).
```sql Syntax
ALTER SYSTEM SET configuration_parameter { TO | = } { value [, ...] | DEFAULT }
diff --git a/sql/commands/sql-create-aggregate.mdx b/sql/commands/sql-create-aggregate.mdx
index a6188e7e..acbcb127 100644
--- a/sql/commands/sql-create-aggregate.mdx
+++ b/sql/commands/sql-create-aggregate.mdx
@@ -1,6 +1,6 @@
---
title: "CREATE AGGREGATE"
-description: "The `CREATE AGGREGATE` command can be used to create [user-defined aggregate functions](/docs/current/user-defined-functions/) (UDAFs). Currently, UDAFs are only supported in Python and JavaScript as embedded UDFs."
+description: "The `CREATE AGGREGATE` command can be used to create [user-defined aggregate functions](/sql/udfs/user-defined-functions) (UDAFs). Currently, UDAFs are only supported in Python and JavaScript as embedded UDFs."
---
## Syntax
diff --git a/sql/commands/sql-create-connection.mdx b/sql/commands/sql-create-connection.mdx
index 0b5ebcd4..77ca441b 100644
--- a/sql/commands/sql-create-connection.mdx
+++ b/sql/commands/sql-create-connection.mdx
@@ -62,5 +62,5 @@ CREATE CONNECTION connection_name WITH (
);
```
7. Create a source or sink with AWS PrivateLink connection.
- * Use the `CREATE SOURCE/TABLE` command to create a Kafka source with PrivateLink connection. For more details, see [Create source with AWS PrivateLink connection](/docs/current/ingest-from-kafka/#create-source-with-vpc-connection).
+ * Use the `CREATE SOURCE/TABLE` command to create a Kafka source with PrivateLink connection. For more details, see [Create source with AWS PrivateLink connection](/integrations/sources/kafka#create-source-with-privatelink-connection).
* Use the `CREATE SINK` command to create a Kafka sink with PrivateLink connection. For more details, see [Create sink with AWS PrivateLink connection](/integrations/destinations/apache-kafka#create-sink-with-vpc-connection).
diff --git a/sql/commands/sql-create-function.mdx b/sql/commands/sql-create-function.mdx
index 97f86a2a..6ea8f64f 100644
--- a/sql/commands/sql-create-function.mdx
+++ b/sql/commands/sql-create-function.mdx
@@ -1,6 +1,6 @@
---
title: "CREATE FUNCTION"
-description: "The `CREATE FUNCTION` command can be used to create [user-defined functions](/docs/current/user-defined-functions/) (UDFs)."
+description: "The `CREATE FUNCTION` command can be used to create [user-defined functions](/sql/udfs/user-defined-functions) (UDFs)."
---
There are three ways to create UDFs in RisingWave: UDFs as external functions, embedded UDFs and SQL UDFs. `CREATE FUNCTION` can be used for them with different syntax.
diff --git a/sql/commands/sql-create-source.mdx b/sql/commands/sql-create-source.mdx
index 577146ef..58afc3d8 100644
--- a/sql/commands/sql-create-source.mdx
+++ b/sql/commands/sql-create-source.mdx
@@ -3,7 +3,7 @@ title: "CREATE SOURCE"
description: "A source is a resource that RisingWave can read data from. You can create a source in RisingWave using the `CREATE SOURCE` command."
---
-For the full list of the sources we support, see [Supported sources](/docs/current/supported-sources-and-formats/#supported-sources).
+For the full list of the sources we support, see [Supported sources](/ingestion/supported-sources-and-formats#supported-sources).
If you choose to persist the data from the source in RisingWave, use the [CREATE TABLE](/sql/commands/sql-create-table) command with connector settings. Or if you need to create the primary key (which is required by some formats like FORMAT UPSERT/DEBEZIUM), you have to use `CREATE TABLE` too. For more details about the differences between sources and tables, see [here](/docs/current/data-ingestion/#table-with-connectors).
@@ -67,8 +67,8 @@ The generated column is created in RisingWave and will not be accessed through t
| _generation\_expression_ | The expression for the generated column. For details about generated columns, see [Generated columns](/docs/current/query-syntax-generated-columns/). |
| _watermark\_clause_ | A clause that defines the watermark for a timestamp column. The syntax is WATERMARK FOR column\_name as expr. For details about watermarks, refer to [Watermarks](/docs/current/watermarks/). |
| **INCLUDE** clause | Extract fields not included in the payload as separate columns. For more details on its usage, see [INCLUDE clause](/docs/current/include-clause/). |
-| **WITH** clause | Specify the connector settings here if trying to store all the source data. See [Supported sources](/docs/current/supported-sources-and-formats/#supported-sources) for the full list of supported source as well as links to specific connector pages detailing the syntax for each source. |
-| **FORMAT** and **ENCODE** options | Specify the data format and the encoding format of the source data. To learn about the supported data formats, see [Supported formats](/docs/current/supported-sources-and-formats/#supported-formats). |
+| **WITH** clause | Specify the connector settings here if trying to store all the source data. See [Supported sources](/ingestion/supported-sources-and-formats#supported-sources) for the full list of supported source as well as links to specific connector pages detailing the syntax for each source. |
+| **FORMAT** and **ENCODE** options | Specify the data format and the encoding format of the source data. To learn about the supported data formats, see [Supported formats](/ingestion/supported-sources-and-formats#supported-formats). |
Please distinguish between the parameters set in the FORMAT and ENCODE options and those set in the WITH clause. Ensure that you place them correctly and avoid any misuse.
@@ -197,7 +197,7 @@ Shared sources do not support `ALTER SOURCE`. Use non-shared sources if you requ
title="ALTER SOURCE"
icon="pen-to-square"
iconType="solid"
- href="/docs/current/sql-alter-source/"
+ href="/sql/commands/sql-alter-source"
>
Modify a source
diff --git a/sql/commands/sql-drop-aggregate.mdx b/sql/commands/sql-drop-aggregate.mdx
index 1c64c6d6..f025743a 100644
--- a/sql/commands/sql-drop-aggregate.mdx
+++ b/sql/commands/sql-drop-aggregate.mdx
@@ -1,6 +1,6 @@
---
title: "DROP AGGREGATE"
-description: "Use the `DROP AGGREGATE` command to remove an existing [user-defined aggregate function (UDAF)](/docs/current/user-defined-functions/). The usage is similar to `DROP FUNCTION`, except that it's for aggregate functions."
+description: "Use the `DROP AGGREGATE` command to remove an existing [user-defined aggregate function (UDAF)](/sql/udfs/user-defined-functions). The usage is similar to `DROP FUNCTION`, except that it's for aggregate functions."
---
## Syntax
diff --git a/sql/commands/sql-drop-function.mdx b/sql/commands/sql-drop-function.mdx
index b74b50e4..ae388af9 100644
--- a/sql/commands/sql-drop-function.mdx
+++ b/sql/commands/sql-drop-function.mdx
@@ -2,7 +2,7 @@
title: "DROP FUNCTION"
---
-Use the `DROP FUNCTION` command to remove an existing [user-defined function (UDF)](/docs/current/user-defined-functions/).
+Use the `DROP FUNCTION` command to remove an existing [user-defined function (UDF)](/sql/udfs/user-defined-functions).
## Syntax
@@ -85,7 +85,7 @@ DROP FUNCTION f1;
title="User-defined functions"
icon="code"
iconType="solid"
- href="/docs/current/user-defined-functions/"
+ href="/sql/udfs/user-defined-functions"
>
A step-by-step guide for using UDFs in RisingWave: installing the RisingWave UDF API, defining functions in a Python file, starting the UDF server, and declaring UDFs in RisingWave.
diff --git a/sql/commands/sql-show-functions.mdx b/sql/commands/sql-show-functions.mdx
index 47a1178d..51bae471 100644
--- a/sql/commands/sql-show-functions.mdx
+++ b/sql/commands/sql-show-functions.mdx
@@ -1,6 +1,6 @@
---
title: "SHOW FUNCTIONS"
-description: "Run `SHOW FUNCTIONS` to get a list of existing [user-defined functions](/docs/current/user-defined-functions/). The returned information includes the name, argument types, return type, language, and server address of each function."
+description: "Run `SHOW FUNCTIONS` to get a list of existing [user-defined functions](/sql/udfs/user-defined-functions). The returned information includes the name, argument types, return type, language, and server address of each function."
---
## Syntax
diff --git a/sql/commands/sql-show-parameters.mdx b/sql/commands/sql-show-parameters.mdx
index 7f911a75..cc0bd52a 100644
--- a/sql/commands/sql-show-parameters.mdx
+++ b/sql/commands/sql-show-parameters.mdx
@@ -1,6 +1,6 @@
---
title: "SHOW PARAMETERS"
-description: "You can use the `SHOW PARAMETERS` command to view the [system parameters](/docs/current/view-configure-system-parameters/), along with their current values."
+description: "You can use the `SHOW PARAMETERS` command to view the [system parameters](/operate/view-configure-system-parameters), along with their current values."
---
```bash Examples
diff --git a/sql/query-syntax/value-exp.mdx b/sql/query-syntax/value-exp.mdx
index e1bdfcf2..0254b550 100644
--- a/sql/query-syntax/value-exp.mdx
+++ b/sql/query-syntax/value-exp.mdx
@@ -28,7 +28,7 @@ The `DISTINCT` keyword, which is only available in the second form, cannot be us
AGGREGATE:function_name
```
-where the `AGGREGATE:` prefix converts a [builtin array function](/docs/current/sql-function-array/) (e.g. `array_sum`) or an [user-defined function](/docs/current/user-defined-functions/), to an aggregate function. The function being converted must accept exactly one argument of an [array type](/docs/current/data-type-array/). After the conversion, a function like `foo ( array of T ) -> U` becomes an aggregate function like `AGGREGATE:foo ( T ) -> U`.
+where the `AGGREGATE:` prefix converts a [builtin array function](/docs/current/sql-function-array/) (e.g. `array_sum`) or an [user-defined function](/sql/udfs/user-defined-functions), to an aggregate function. The function being converted must accept exactly one argument of an [array type](/sql/data-types/array-type). After the conversion, a function like `foo ( array of T ) -> U` becomes an aggregate function like `AGGREGATE:foo ( T ) -> U`.
## Window function calls
diff --git a/sql/system-catalogs/rw-catalog.mdx b/sql/system-catalogs/rw-catalog.mdx
index 5f8eab26..c8367745 100644
--- a/sql/system-catalogs/rw-catalog.mdx
+++ b/sql/system-catalogs/rw-catalog.mdx
@@ -104,7 +104,7 @@ SELECT name, initialized_at, created_at FROM rw_sources;
| rw\_indexes | Contains information about indexes in the database, including their IDs, names, schema identifiers, definitions, and more. |
| rw\_internal\_tables | Contains information about internal tables in the database. Internal tables are tables that store intermediate results (also known as internal states) of queries. Equivalent to the [SHOW INTERNAL TABLES](/docs/current/sql-show-internal-tables/) command. |
| rw\_materialized\_views | Contains information about materialized views in the database, including their unique IDs, names, schema IDs, owner IDs, definitions, append-only information, access control lists, initialization and creation timestamps, and the cluster version when the materialized view was initialized and created. |
-| rw\_meta\_snapshot | Contains information about existing snapshots of the RisingWave meta service. You can use this relation to get IDs of meta snapshots and then restore the meta service from a snapshot. For details, see [Back up and restore meta service](/docs/current/meta-backup/). |
+| rw\_meta\_snapshot | Contains information about existing snapshots of the RisingWave meta service. You can use this relation to get IDs of meta snapshots and then restore the meta service from a snapshot. For details, see [Back up and restore meta service](/operate/meta-backup). |
| rw\_parallel\_units | Contains information about parallel worker units used for executing database operations, including their unique IDs, worker IDs, and primary keys. |
| rw\_relation\_info | Contains low-level relation information about tables, sources, materialized views, and indexes that are available in the database. |
| rw\_relations | Contains information about relations in the database, including their unique IDs, names, types, schema IDs, and owners. |
diff --git a/troubleshoot/meta-failure.mdx b/troubleshoot/meta-failure.mdx
index 5964c0f9..dec92e9c 100644
--- a/troubleshoot/meta-failure.mdx
+++ b/troubleshoot/meta-failure.mdx
@@ -16,10 +16,9 @@ The observed issue is most likely a result of ETCD experiencing fluctuations, wh
## Solutions
-1. Check the [notes about disks for etcd in our documentation](/docs/current/hardware-requirements/#etcd).
-2. Check etcd configures, whether `-auto-compaction-mode`, `-max-request-bytes` are set properly.
-3. If only one meta node is deployed, you can set the parameter `meta_leader_lease_secs` to `86400` to avoid impact on leader election by the disk performance. For multi-node deployment, you can also increase the value of this parameter.
-4. For better performance and stability of the cluster, it is recommended to use higher-performance disks and configure etcd correctly.
+1. Check etcd configures, whether `-auto-compaction-mode`, `-max-request-bytes` are set properly.
+2. If only one meta node is deployed, you can set the parameter `meta_leader_lease_secs` to `86400` to avoid impact on leader election by the disk performance. For multi-node deployment, you can also increase the value of this parameter.
+3. For better performance and stability of the cluster, it is recommended to use higher-performance disks and configure etcd correctly.
## Further explanation
diff --git a/troubleshoot/node-failure.mdx b/troubleshoot/node-failure.mdx
index bf68281d..9328382e 100644
--- a/troubleshoot/node-failure.mdx
+++ b/troubleshoot/node-failure.mdx
@@ -50,7 +50,7 @@ Since compaction is an append-only operation and does not modify files in place,
RisingWave supports two types of metadata storage backends: etcd and relational databases (Postgres by default).
-etcd is designed to be a highly available and consistent key-value storage solution. However, after equipping etcd in the production environment for a while, we learned that etcd can be quite demanding for the quality of the disk it operates on. You can find more details about [etcd's hardware requirements](/docs/current/hardware-requirements/#etcd) in our documentation.
+etcd is designed to be a highly available and consistent key-value storage solution. However, after equipping etcd in the production environment for a while, we learned that etcd can be quite demanding for the quality of the disk it operates on.
Therefore, we have decided to make RDS the default metadata storage backend starting from version v1.9.0 of RisingWave. Over time, we will gradually deprecate the support for etcd. This decision is based on the following factors:
diff --git a/troubleshoot/overview.mdx b/troubleshoot/overview.mdx
index 4dc578df..3bb46447 100644
--- a/troubleshoot/overview.mdx
+++ b/troubleshoot/overview.mdx
@@ -19,7 +19,7 @@ You can access RisingWave Dashboard at `http://localhost:5691` by default.
You can monitor the performance metrics of a RisingWave cluster, including the usage of resources like CPU, memory, and network, and the status of different nodes.
-RisingWave uses Prometheus for collecting data, and Grafana for visualization and alerting. This monitoring stack requires configuration. To configure the monitoring stack, follow the steps detailed in [Monitor a RisingWave cluster](/docs/current/monitor-risingwave-cluster/).
+RisingWave uses Prometheus for collecting data, and Grafana for visualization and alerting. This monitoring stack requires configuration. To configure the monitoring stack, follow the steps detailed in [Monitor a RisingWave cluster](/operate/monitor-risingwave-cluster).
After you complete the configuration, go to [http://localhost:3000](http://localhost:3000) to access Grafana from a local machine, or `http://:3000` to access Grafana from a different host, where `` is the IP address of the machine running the Grafana service. When prompted, enter the default credentials (username: `admin`; password: `prom-operator`).
diff --git a/troubleshoot/troubleshoot-oom.mdx b/troubleshoot/troubleshoot-oom.mdx
index c19116f3..630ba2e2 100644
--- a/troubleshoot/troubleshoot-oom.mdx
+++ b/troubleshoot/troubleshoot-oom.mdx
@@ -44,7 +44,7 @@ Barrier latency can be observed from Grafana dashboard - Barrier latency panel.
Instead of solely addressing the memory problem, we recommend investigating why the barrier is getting stuck. This issue could be caused by heavy streaming jobs, sudden impact of input traffic, or even some temporary issues.
-Please refer to [High latency](/docs/current/troubleshoot-high-latency/) for more details.
+Please refer to [High latency](/troubleshoot/troubleshoot-high-latency) for more details.
## OOM during prefetching
diff --git a/troubleshoot/troubleshoot-recovery-failure.mdx b/troubleshoot/troubleshoot-recovery-failure.mdx
index d475f046..4085e5a1 100644
--- a/troubleshoot/troubleshoot-recovery-failure.mdx
+++ b/troubleshoot/troubleshoot-recovery-failure.mdx
@@ -19,7 +19,7 @@ It’s important to identify the root cause of the issue. Some common reasons fo
How to identify:
1. When the meta node continues to enter the recovery state or when the actor keeps exiting during the recovery process.
-2. Check if the CN node is continuously restarting due to OOM, refer to: [Out-of-memory](/docs/current/troubleshoot-oom/).
+2. Check if the CN node is continuously restarting due to OOM, refer to: [Out-of-memory](/troubleshoot/troubleshoot-oom).
Two solutions:
@@ -27,7 +27,7 @@ Two solutions:
2. Decrease the parallelism of the running streaming jobs or drop problematic streaming jobs.
1. `alter system set pause_on_next_bootstrap to true;`
2. Reboot the meta service, then the cluster will enter safe mode after recovery.
- 3. Drop the problematic streaming jobs or scale in them using `risectl` , refer to: [Cluster scaling](/docs/current/k8s-cluster-scaling/).
+ 3. Drop the problematic streaming jobs or scale in them using `risectl` , refer to: [Cluster scaling](/deploy/k8s-cluster-scaling).
4. Restart the meta node, or resume the cluster by: `risectl meta resume`.
### Unconventional CN scaling down
From 02111a94351fb7994aeabc627bb16626a47df5bd Mon Sep 17 00:00:00 2001
From: WanYixian
Date: Tue, 26 Nov 2024 15:41:21 +0800
Subject: [PATCH 08/10] save work
---
cloud/create-a-database-user.mdx | 2 +-
cloud/develop-overview.mdx | 2 +-
cloud/manage-sinks.mdx | 2 +-
cloud/manage-sources.mdx | 8 +++----
cloud/monitor-materialized-views.mdx | 2 +-
...singwave-to-monitor-risingwave-metrics.mdx | 3 ++-
ingestion/format-and-encode-parameters.mdx | 2 +-
ingestion/overview.mdx | 18 +++++++--------
ingestion/supported-sources-and-formats.mdx | 14 ++++++------
integrations/destinations/apache-doris.mdx | 2 +-
integrations/destinations/apache-iceberg.mdx | 2 +-
integrations/destinations/apache-kafka.mdx | 8 +++----
integrations/destinations/bigquery.mdx | 2 +-
.../destinations/cassandra-or-scylladb.mdx | 2 +-
integrations/destinations/clickhouse.mdx | 2 +-
integrations/destinations/delta-lake.mdx | 2 +-
integrations/destinations/elasticsearch.mdx | 2 +-
integrations/destinations/mysql.mdx | 2 +-
integrations/destinations/opensearch.mdx | 2 +-
integrations/destinations/postgresql.mdx | 2 +-
integrations/destinations/snowflake.mdx | 2 +-
integrations/other/dbt.mdx | 4 ++--
integrations/sources/apache-iceberg.mdx | 4 ++--
integrations/sources/kafka.mdx | 8 +++----
integrations/sources/mongodb-cdc.mdx | 2 +-
integrations/sources/mysql-cdc.mdx | 14 ++++++------
integrations/sources/nats-jetstream.mdx | 2 +-
integrations/sources/overview.mdx | 2 +-
integrations/sources/postgresql-cdc.mdx | 16 +++++++-------
integrations/sources/pulsar.mdx | 2 +-
integrations/sources/sql-server-cdc.mdx | 10 ++++-----
integrations/visualization/grafana.mdx | 8 +++----
integrations/visualization/superset.mdx | 2 +-
operate/access-control.mdx | 6 ++---
operate/dedicated-compute-node.mdx | 2 +-
...anage-a-large-number-of-streaming-jobs.mdx | 2 +-
operate/manage-secrets.mdx | 10 ++++-----
operate/monitor-statement-progress.mdx | 6 ++---
operate/tune-reserved-memory.mdx | 4 ++--
operate/view-configure-runtime-parameters.mdx | 6 ++---
operate/view-configure-system-parameters.mdx | 2 +-
performance/performance-best-practices.mdx | 4 ++--
processing/emit-on-window-close.mdx | 2 +-
processing/indexes.mdx | 14 ++++++------
.../maintain-wide-table-with-table-sinks.mdx | 2 +-
processing/overview.mdx | 4 ++--
processing/sql/time-windows.mdx | 2 +-
processing/time-travel-queries.mdx | 2 +-
processing/watermarks.mdx | 4 ++--
reference/key-concepts.mdx | 12 +++++-----
sql/commands/overview.mdx | 18 +++++++--------
sql/commands/sql-alter-materialized-view.mdx | 2 +-
sql/commands/sql-alter-sink.mdx | 2 +-
sql/commands/sql-alter-system.mdx | 2 +-
sql/commands/sql-alter-table.mdx | 10 ++++-----
sql/commands/sql-alter-user.mdx | 2 +-
sql/commands/sql-begin.mdx | 2 +-
sql/commands/sql-cancel-jobs.mdx | 4 ++--
sql/commands/sql-comment-on.mdx | 2 +-
sql/commands/sql-commit.mdx | 2 +-
sql/commands/sql-create-aggregate.mdx | 4 ++--
sql/commands/sql-create-database.mdx | 2 +-
sql/commands/sql-create-function.mdx | 4 ++--
sql/commands/sql-create-index.mdx | 8 +++----
sql/commands/sql-create-mv.mdx | 6 ++---
sql/commands/sql-create-schema.mdx | 2 +-
sql/commands/sql-create-secret.mdx | 2 +-
sql/commands/sql-create-sink-into.mdx | 4 ++--
sql/commands/sql-create-sink.mdx | 6 ++---
sql/commands/sql-create-source.mdx | 16 +++++++-------
sql/commands/sql-create-table.mdx | 22 +++++++++----------
sql/commands/sql-create-user.mdx | 2 +-
sql/commands/sql-create-view.mdx | 6 ++---
sql/commands/sql-delete.mdx | 2 +-
sql/commands/sql-describe.mdx | 2 +-
sql/commands/sql-drop-aggregate.mdx | 6 ++---
sql/commands/sql-drop-connection.mdx | 2 +-
sql/commands/sql-drop-database.mdx | 4 ++--
sql/commands/sql-drop-function.mdx | 6 ++---
sql/commands/sql-drop-index.mdx | 6 ++---
sql/commands/sql-drop-mv.mdx | 4 ++--
sql/commands/sql-drop-schema.mdx | 4 ++--
sql/commands/sql-drop-secret.mdx | 2 +-
sql/commands/sql-drop-sink.mdx | 2 +-
sql/commands/sql-drop-source.mdx | 8 +++----
sql/commands/sql-drop-table.mdx | 4 ++--
sql/commands/sql-drop-view.mdx | 6 ++---
sql/commands/sql-insert.mdx | 6 ++---
sql/commands/sql-select.mdx | 2 +-
sql/commands/sql-set-background-ddl.mdx | 4 ++--
sql/commands/sql-set-rw-implicit-flush.mdx | 2 +-
sql/commands/sql-show-connections.mdx | 2 +-
sql/commands/sql-show-create-index.mdx | 2 +-
sql/commands/sql-show-create-mv.mdx | 8 +++----
sql/commands/sql-show-create-source.mdx | 2 +-
sql/commands/sql-show-create-table.mdx | 4 ++--
sql/commands/sql-show-create-view.mdx | 4 ++--
sql/commands/sql-show-indexes.mdx | 2 +-
sql/commands/sql-show-internal-tables.mdx | 2 +-
sql/commands/sql-show-jobs.mdx | 4 ++--
sql/commands/sql-show-views.mdx | 4 ++--
sql/commands/sql-start-transaction.mdx | 4 ++--
sql/commands/sql-update.mdx | 2 +-
sql/data-types/casting.mdx | 2 +-
sql/functions/aggregate.mdx | 4 ++--
sql/functions/sys-admin.mdx | 2 +-
sql/functions/window-functions.mdx | 2 +-
sql/query-syntax/generated-columns.mdx | 2 +-
sql/system-catalogs/pg-catalog.mdx | 2 +-
sql/system-catalogs/rw-catalog.mdx | 4 ++--
sql/udfs/embedded-python-udfs.mdx | 4 ++--
sql/udfs/sql-udfs.mdx | 2 +-
sql/udfs/use-udfs-in-java.mdx | 2 +-
sql/udfs/use-udfs-in-javascript.mdx | 4 ++--
sql/udfs/use-udfs-in-python.mdx | 2 +-
sql/udfs/use-udfs-in-rust.mdx | 4 ++--
sql/udfs/user-defined-functions.mdx | 4 ++--
troubleshoot/troubleshoot-oom.mdx | 2 +-
118 files changed, 263 insertions(+), 262 deletions(-)
diff --git a/cloud/create-a-database-user.mdx b/cloud/create-a-database-user.mdx
index 2e09f253..2744a690 100644
--- a/cloud/create-a-database-user.mdx
+++ b/cloud/create-a-database-user.mdx
@@ -7,5 +7,5 @@ sidebarTitle: Create a user
* You can create a database user when [connecting to a project](/cloud/connect-to-a-project/).
* You can click **Create user** in the **Users** tab on the [project details page](/cloud/check-status-and-metrics/#check-project-details) to create a new user.
-* You can run the [CREATE USER](/docs/current/sql-create-user/) command to create a new user after [connecting to a project](/cloud/connect-to-a-project/) using the console or terminal.
+* You can run the [CREATE USER](/sql/commands/sql-create-user) command to create a new user after [connecting to a project](/cloud/connect-to-a-project/) using the console or terminal.
Ensure that you have logged in to the project with a user that has the `CREATEUSER` privilege. A super user has all privileges, including `CREATEUSER`.
diff --git a/cloud/develop-overview.mdx b/cloud/develop-overview.mdx
index 3288f42c..4b3f52b7 100644
--- a/cloud/develop-overview.mdx
+++ b/cloud/develop-overview.mdx
@@ -115,7 +115,7 @@ RisingWave offers support for popular PostgreSQL drivers, enabling seamless inte
Continue to learn about RisingWave.
diff --git a/cloud/manage-sinks.mdx b/cloud/manage-sinks.mdx
index 31edf09e..d6298a54 100644
--- a/cloud/manage-sinks.mdx
+++ b/cloud/manage-sinks.mdx
@@ -21,4 +21,4 @@ Refer to [CREATE SINK](/sql/commands/sql-create-sink) in the RisingWave Database
If you no longer need to deliver data to a sink, you can drop the sink using SQL command.
-Refer to [DROP SINK](/docs/current/sql-drop-sink/) in the RisingWave Database documentation.
+Refer to [DROP SINK](/sql/commands/sql-drop-sink) in the RisingWave Database documentation.
diff --git a/cloud/manage-sources.mdx b/cloud/manage-sources.mdx
index c8971ea1..5cd9c6c3 100644
--- a/cloud/manage-sources.mdx
+++ b/cloud/manage-sources.mdx
@@ -3,7 +3,7 @@ title: "Manage sources"
description: "To ingest data into RisingWave, you must first create a source. A source refers to an external data feed that RisingWave can read from. You can connect RisingWave to a variety of external sources like databases and message brokers. After a source is connected, you can create materialized views to perform analysis or sinks for data transformations."
---
-For the complete list of supported sources and formats, see [Supported sources and formats](/docs/current/sql-create-source/#supported-sources).
+For the complete list of supported sources and formats, see [Supported sources and formats](/ingestion/supported-sources-and-formats).
## Create a source
@@ -25,7 +25,7 @@ You can create a source with one of the following methods:
### Using SQL command
-Refer to [CREARE SOURCE](/docs/current/sql-create-source/#supported-sources) in the RisingWave documentation. Select a connector to see the SQL syntax, options, and sample statement of connecting RisingWave to the connector.
+Refer to [CREARE SOURCE](/sql/commands/sql-create-source) in the RisingWave documentation. Select a connector to see the SQL syntax, options, and sample statement of connecting RisingWave to the connector.
## Check a source
@@ -47,5 +47,5 @@ If you no longer require data from a source, drop the source connection with one
### Using SQL command
-* Use the [DROP SOURCE](/docs/current/sql-drop-source/) command to drop a source from the database.
-* Use the [DROP TABLE](/docs/current/sql-drop-table/) command if it's a materialized source.
+* Use the [DROP SOURCE](/sql/commands/sql-drop-source) command to drop a source from the database.
+* Use the [DROP TABLE](/sql/commands/sql-drop-table) command if it's a materialized source.
diff --git a/cloud/monitor-materialized-views.mdx b/cloud/monitor-materialized-views.mdx
index 86ee1e08..23638bb8 100644
--- a/cloud/monitor-materialized-views.mdx
+++ b/cloud/monitor-materialized-views.mdx
@@ -1,6 +1,6 @@
---
title: "Monitor materialized views"
-description: "You can view all [materialized views](/docs/current/key-concepts/#materialized-views) defined in the databases of a project."
+description: "You can view all [materialized views](/reference/key-concepts#materialized-views) defined in the databases of a project."
mode: wide
---
diff --git a/demos/use-risingwave-to-monitor-risingwave-metrics.mdx b/demos/use-risingwave-to-monitor-risingwave-metrics.mdx
index fd94332f..b4e8f6f5 100644
--- a/demos/use-risingwave-to-monitor-risingwave-metrics.mdx
+++ b/demos/use-risingwave-to-monitor-risingwave-metrics.mdx
@@ -1,10 +1,11 @@
---
title: "Use RisingWave to monitor RisingWave metrics"
-description: "RisingWave uses Prometheus to collect the system metrics. Prometheus is a powerful monitoring platform that provides an end-to-end solution from instrumenting applications to querying metrics."
---
## Overview
+RisingWave uses Prometheus to collect the system metrics. Prometheus is a powerful monitoring platform that provides an end-to-end solution from instrumenting applications to querying metrics.
+
However, Prometheus’s local storage is limited to single-node durability and scalability. To replicate data from local storage to remote storage systems, we can use a proxy service that sends data in JSON format to Kafka. Then RisingWave can read, store, and perform complex queries on the data from Kafka.
There are numerous RisingWave system metrics that Prometheus collects. The most convenient method of tracking these metrics would be using a live dashboard. Luckily, since RisingWave is Postgres-compatible, we can use Grafana to visualize the metrics changing over time by creating dashboards.
diff --git a/ingestion/format-and-encode-parameters.mdx b/ingestion/format-and-encode-parameters.mdx
index e83a1555..397ef0f6 100644
--- a/ingestion/format-and-encode-parameters.mdx
+++ b/ingestion/format-and-encode-parameters.mdx
@@ -1,6 +1,6 @@
---
title: "FORMAT and ENCODE parameters"
-description: "When creating a source or table using a connector, you need to specify the `FORMAT` and `ENCODE` section of the [CREATE SOURCE](/docs/current/sql-create-source/) or [CREATE TABLE](/docs/current/sql-create-source/) statement. This topic provides an overview of the formats and encoding options. For the complete list of formats we support, see [Supported sources and formats](/ingestion/supported-sources-and-formats)"
+description: "When creating a source or table using a connector, you need to specify the `FORMAT` and `ENCODE` section of the [CREATE SOURCE](/sql/commands/sql-create-source) or [CREATE TABLE](/sql/commands/sql-create-source) statement. This topic provides an overview of the formats and encoding options. For the complete list of formats we support, see [Supported sources and formats](/ingestion/supported-sources-and-formats)"
sidebarTitle: Formats and encoding
mode: wide
---
diff --git a/ingestion/overview.mdx b/ingestion/overview.mdx
index e530459b..898d0157 100644
--- a/ingestion/overview.mdx
+++ b/ingestion/overview.mdx
@@ -4,7 +4,7 @@ sidebarTitle: Overview
description: "RisingWave supports a variety of data ingestion methods."
---
-To know the difference between stream processing and ad-hoc query, please refer to [Ad-hoc (on read) vs. Streaming (on write)](/docs/current/transform-overview/#ad-hoc-on-read-vs-streaming-on-write).
+To know the difference between stream processing and ad-hoc query, please refer to [Ad-hoc (on read) vs. Streaming (on write)](/processing/overview#ad-hoc-on-read-vs-streaming-on-write).
* **Streaming ingestion from external systems**: This is tied to a stream processing task, continuously monitoring and synchronizing changes from external systems.
* **Ad-hoc ingestion from external systems**: This is bound to an ad-hoc query, where RisingWave queries the current data from the external system for processing during the query.
@@ -48,7 +48,7 @@ SELECT * FROM source_name
WHERE _rw_kafka_timestamp > now() - interval '10 minute';
```
-For specific source types, their support for streaming ingestion and ad-hoc ingestion varies. Please refer to [our documentation](/docs/current/sources/) for the specific source.
+For specific source types, their support for streaming ingestion and ad-hoc ingestion varies. Please refer to [our documentation](/integrations/sources/overview) for the specific source.
### Table with connectors
@@ -68,17 +68,17 @@ WITH (
The statement will create a streaming job that continuously ingests data from the Kafka topic to the table and the data will be stored in RisingWave's internal storage, which brings the following benefits:
-1. **Improved ad-hoc query performance:** When users execute queries such as `SELECT * FROM table_on_kafka`, the query engine will directly access the data from RisingWave's internal storage, eliminating unnecessary network overhead and avoiding read pressure on upstream systems. Additionally, users can create [indexes](/docs/current/indexes/) on the table to accelerate queries.
+1. **Improved ad-hoc query performance:** When users execute queries such as `SELECT * FROM table_on_kafka`, the query engine will directly access the data from RisingWave's internal storage, eliminating unnecessary network overhead and avoiding read pressure on upstream systems. Additionally, users can create [indexes](/processing/indexes) on the table to accelerate queries.
2. **Allow defining primary keys:** With the help of its internal storage, RisingWave can efficiently maintain primary key constraints. Users can define a primary key on a specific column of the table and define different behaviors for primary key conflicts with [ON CONFLICT clause](/sql/commands/sql-create-table#pk-conflict-behavior).
3. **Ability to handle delete/update changes**: Based on the definition of primary keys, RisingWave can efficiently process upstream synchronized delete and update operations. For systems that synchronize delete/update operations from external systems, such as database's CDC and UPSERT format messages from message queues, we **do not** allow creating a source on it but require a table with connectors.
4. **Stronger consistency guarantee**: When using a table with connectors, all downstream jobs will be guaranteed to have a consistent view of the data persisted in the table; while for source, different jobs may see inconsistent results due to different ingestion speed or data retention in the external system.
-5. **Greater flexibility**: Like regular tables, you can use DML statements like [INSERT](/docs/current/sql-insert/), [UPDATE](/docs/current/sql-update/) and [DELETE](/docs/current/sql-delete/) to insert or modify data in tables with connectors, and use [CREATE SINK INTO TABLE](/docs/current/sql-create-sink-into/) to merge other data streams into the table.
+5. **Greater flexibility**: Like regular tables, you can use DML statements like [INSERT](/sql/commands/sql-insert), [UPDATE](/sql/commands/sql-update) and [DELETE](/sql/commands/sql-delete) to insert or modify data in tables with connectors, and use [CREATE SINK INTO TABLE](/sql/commands/sql-create-sink-into) to merge other data streams into the table.
## DML on tables
### Insert data into tables
-You can load data in batch to RisingWave by creating a table ([CREATE TABLE](/sql/commands/sql-create-table)) and then inserting data into it ([INSERT](/docs/current/sql-insert/)). For example, the statement below creates a table `website_visits` and inserts 5 rows of data.
+You can load data in batch to RisingWave by creating a table ([CREATE TABLE](/sql/commands/sql-create-table)) and then inserting data into it ([INSERT](/sql/commands/sql-insert)). For example, the statement below creates a table `website_visits` and inserts 5 rows of data.
```sql
CREATE TABLE website_visits (
@@ -98,7 +98,7 @@ INSERT INTO website_visits (timestamp, user_id, page_id, action) VALUES
### Use `INSERT SELECT` to do bulk ingestion
-For sources that only support ad-hoc ingestion but not streaming ingestion, such as the [Iceberg source](/docs/next/ingest-from-iceberg/), `insert ... select ...` can be used to implement bulk data import into the table, and to convert the data into a stream of changes that are synchronized downstream to the table.
+For sources that only support ad-hoc ingestion but not streaming ingestion, such as the [Iceberg source](/integrations/sources/apache-iceberg), `insert ... select ...` can be used to implement bulk data import into the table, and to convert the data into a stream of changes that are synchronized downstream to the table.
```sql
CREATE SOURCE source_iceberg_t1
@@ -128,6 +128,6 @@ The information presented above provides a brief overview of the data ingestion
* For more detailed information about the types, formats, and encoding options of sources, see [Formats and encoding](/ingestion/format-and-encode-parameters).
* For the complete list of the sources and formats supported in RisingWave, see [Supported sources and formats](/ingestion/supported-sources-and-formats).
* To learn about how to manage schemas and ingest additional fields from sources :
- * [Modify source or table schemas](/docs/current/modify-schemas/)
- * [Ingest additional fields with INCLUDE clause](/docs/current/include-clause/)
-* To learn about how to ingest data from a particular source, see specific [Data ingestion guides](/docs/current/sources/).
+ * [Modify source or table schemas](/ingestion/modify-source-or-table-schemas)
+ * [Ingest additional fields with INCLUDE clause](/ingestion/ingest-additional-fields-with-include-clause)
+* To learn about how to ingest data from a particular source, see specific [Data ingestion guides](/integrations/sources/overview).
diff --git a/ingestion/supported-sources-and-formats.mdx b/ingestion/supported-sources-and-formats.mdx
index 6cd846ae..9522962e 100644
--- a/ingestion/supported-sources-and-formats.mdx
+++ b/ingestion/supported-sources-and-formats.mdx
@@ -13,16 +13,16 @@ To ingest data in formats marked with "T", you need to create tables (with conne
| Connector | Version | Format |
| :------------ | :------------ | :------------------- |
| [Kafka](/integrations/sources/kafka) | 3.1.0 or later versions | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Debezium AVRO](#debezium-avro) (T), [DEBEZIUM\_MONGO\_JSON](#debezium-mongo-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T), [Upsert JSON](#upsert-json) (T), [Upsert AVRO](#upsert-avro) (T), [Bytes](#bytes) |
-| [Redpanda](/docs/current/ingest-from-redpanda/) | Latest | [Avro](#avro), [JSON](#json), [protobuf](#protobuf) |
+| [Redpanda](/integrations/sources/redpanda) | Latest | [Avro](#avro), [JSON](#json), [protobuf](#protobuf) |
| [Pulsar](/integrations/sources/pulsar) | 2.8.0 or later versions | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) |
-| [Kinesis](/docs/current/ingest-from-kinesis/) | Latest | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) |
+| [Kinesis](/integrations/sources/kinesis) | Latest | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) |
| [PostgreSQL CDC](/integrations/sources/postgresql-cdc) | 10, 11, 12, 13, 14 | [Debezium JSON](#debezium-json) (T) |
| [MySQL CDC](/integrations/sources/mysql-cdc) | 5.7, 8.0 | [Debezium JSON](#debezium-json) (T) |
-| [CDC via Kafka](/docs/current/ingest-from-cdc/) | [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) | |
+| [CDC via Kafka](/ingestion/change-data-capture-with-risingwave) | [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) | |
| [Amazon S3](/integrations/sources/s3) | Latest | [JSON](#json), CSV |
-| [Load generator](/docs/current/ingest-from-datagen/) | Built-in | [JSON](#json) |
-| [Google Pub/Sub](/docs/current/ingest-from-google-pubsub/) | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) | |
-| [Google Cloud Storage](/docs/current/ingest-from-gcs/) | [JSON](#json) | |
+| [Load generator](/ingestion/generate-test-data) | Built-in | [JSON](#json) |
+| [Google Pub/Sub](/integrations/sources/google-pub-sub) | [Avro](#avro), [JSON](#json), [protobuf](#protobuf), [Debezium JSON](#debezium-json) (T), [Maxwell JSON](#maxwell-json) (T), [Canal JSON](#canal-json) (T) | |
+| [Google Cloud Storage](/integrations/sources/google-cloud-storage) | [JSON](#json) | |
When a source is created, RisingWave does not ingest data immediately. RisingWave starts to process data when a materialized view is created based on the source.
@@ -207,7 +207,7 @@ ENCODE PROTOBUF (
)
```
-For more information on supported protobuf types, refer to [Supported protobuf types](/docs/current/protobuf-types/supported-protobuf-types).
+For more information on supported protobuf types, refer to [Supported protobuf types](/sql/data-types/supported-protobuf-types).
### Bytes
diff --git a/integrations/destinations/apache-doris.mdx b/integrations/destinations/apache-doris.mdx
index 60cd8bdc..b1e7f129 100644
--- a/integrations/destinations/apache-doris.mdx
+++ b/integrations/destinations/apache-doris.mdx
@@ -7,7 +7,7 @@ description: "This guide describes how to sink data from RisingWave to Apache Do
## Prerequisites
* Ensure that RisingWave can access the network where the Doris backend and frontend are located. For more details, see [Synchronize Data Through External Table](https://doris.apache.org/docs/dev/data-operate/import/import-scenes/external-table-load/).
-* Ensure you have an upstream materialized view or source that you can sink data from. For more details, see [CREATE SOURCE](/docs/current/sql-create-source/) or [CREATE MATERIALIZED VIEW](/docs/current/sql-create-mv/).
+* Ensure you have an upstream materialized view or source that you can sink data from. For more details, see [CREATE SOURCE](/sql/commands/sql-create-source) or [CREATE MATERIALIZED VIEW](/sql/commands/sql-create-mv).
* Ensure that for `struct` elements, the name and type are the same in Doris and RisingWave. If they are not the same, the values will be set to `NULL` or to default values. For more details on the `struct` data type, see [Struct](/sql/data-types/struct).
## Syntax
diff --git a/integrations/destinations/apache-iceberg.mdx b/integrations/destinations/apache-iceberg.mdx
index fd247618..a399f8d3 100644
--- a/integrations/destinations/apache-iceberg.mdx
+++ b/integrations/destinations/apache-iceberg.mdx
@@ -230,7 +230,7 @@ CREATE TABLE demo.dev.`table`
### Create an upstream materialized view or source[](#create-an-upstream-materialized-view-or-source "Direct link to Create an upstream materialized view or source")
-The following query creates an append-only source. For more details on creating a source, see [CREATE SOURCE](/docs/current/sql-create-source/) .
+The following query creates an append-only source. For more details on creating a source, see [CREATE SOURCE](/sql/commands/sql-create-source) .
```sql
CREATE SOURCE s1_source (
diff --git a/integrations/destinations/apache-kafka.mdx b/integrations/destinations/apache-kafka.mdx
index b299beae..3ac85f31 100644
--- a/integrations/destinations/apache-kafka.mdx
+++ b/integrations/destinations/apache-kafka.mdx
@@ -23,7 +23,7 @@ FORMAT data_format ENCODE data_encode [ (
```
-Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
+Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/sql/identifiers).
## Basic parameters
@@ -34,7 +34,7 @@ All `WITH` options are required unless explicitly mentioned as optional.
| :-------------------------- | :------------- |
| sink\_name | Name of the sink to be created. |
| sink\_from | A clause that specifies the direct source from which data will be output. `sink_from` can be a materialized view or a table. Either this clause or a SELECT query must be specified. |
-| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
| connector | Sink connector type must be `kafka` for Kafka sink. |
| properties.bootstrap.server | Address of the Kafka broker. Format: `ip:port`. If there are multiple brokers, separate them with commas. |
| topic | Address of the Kafka topic. One sink can only correspond to one topic. |
@@ -233,7 +233,7 @@ FORMAT PLAIN ENCODE JSON;
## Create sink with PrivateLink connection
-If your Kafka sink service is located in a different VPC from RisingWave, use AWS PrivateLink or GCP Private Service Connect to establish a secure and direct connection. For details on how to set up an AWS PrivateLink connection, see [Create an AWS PrivateLink connection](/docs/current/sql-create-connection/#create-an-aws-privatelink-connection).
+If your Kafka sink service is located in a different VPC from RisingWave, use AWS PrivateLink or GCP Private Service Connect to establish a secure and direct connection. For details on how to set up an AWS PrivateLink connection, see [Create an AWS PrivateLink connection](/sql/commands/sql-create-connection#create-an-aws-privatelink-connection).
To create a Kafka sink with a PrivateLink connection, in the WITH section of your `CREATE SINK` statement, specify the following parameters.
@@ -241,7 +241,7 @@ To create a Kafka sink with a PrivateLink connection, in the WITH section of you
| :------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| privatelink.targets | The PrivateLink targets that correspond to the Kafka brokers. The targets should be in JSON format. Note that each target listed corresponds to each broker specified in the properties.bootstrap.server field. If the order is incorrect, there will be connectivity issues. |
| privatelink.endpoint | The DNS name of the VPC endpoint. If you're using RisingWave Cloud, you can find the auto-generated endpoint after you created a connection. See details in [Create a VPC connection](/cloud/create-a-connection/#whats-next). |
-| connection.name | The name of the connection, which comes from the connection created using the [CREATE CONNECTION](/docs/current/sql-create-connection/) statement. Omit this parameter if you have provisioned a VPC endpoint using privatelink.endpoint (recommended). |
+| connection.name | The name of the connection, which comes from the connection created using the [CREATE CONNECTION](/sql/commands/sql-create-connection) statement. Omit this parameter if you have provisioned a VPC endpoint using privatelink.endpoint (recommended). |
Here is an example of creating a Kafka sink using a PrivateLink connection. Notice that `{"port": 8001}` corresponds to the broker `ip1:9092`, and `{"port": 8002}` corresponds to the broker `ip2:9092`.
diff --git a/integrations/destinations/bigquery.mdx b/integrations/destinations/bigquery.mdx
index 2e4532dd..3a960891 100644
--- a/integrations/destinations/bigquery.mdx
+++ b/integrations/destinations/bigquery.mdx
@@ -44,7 +44,7 @@ WITH (
| :---------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _sink\_name_ | Name of the sink to be created. |
| _sink\_from_ | A clause that specifies the direct source from which data will be output. `sink_from` can be a materialized view or a table. Either this clause or `select_query` query must be specified. |
-| AS _select\_query_ | A SELECT query that specifies the data to be output to the sink. Either this query or a `sink_from` clause must be specified. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| AS _select\_query_ | A SELECT query that specifies the data to be output to the sink. Either this query or a `sink_from` clause must be specified. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
| type | Required. Data format. Allowed formats:
`append-only`: Output data with insert operations.
`upsert`: For this type, you need to set corresponding permissions and primary keys based on the [Document of BigQuery](https://cloud.google.com/bigquery/docs/change-data-capture).
|
| force\_append\_only | Optional. If true, forces the sink to be append-only, even if it cannot be. |
| bigquery.local.path | Optional. The file path leading to the JSON key file located in your local server. Details can be found in [Service Accounts](https://console.cloud.google.com/iam-admin/serviceaccounts) under your Google Cloud account. Either `bigquery.local.path` or `bigquery.s3.path` must be specified. |
diff --git a/integrations/destinations/cassandra-or-scylladb.mdx b/integrations/destinations/cassandra-or-scylladb.mdx
index 87d6e7bc..bf26f8c2 100644
--- a/integrations/destinations/cassandra-or-scylladb.mdx
+++ b/integrations/destinations/cassandra-or-scylladb.mdx
@@ -42,7 +42,7 @@ Once the sink is created, data changes will be streamed to the specified table.
| :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| _sink\_name_ | Name of the sink to be created. |
| _sink\_from_ | A clause that specifies the direct source from which data will be output. `sink_from` can be a materialized view or a table. Either this clause or `select_query` query must be specified. |
-| AS _select\_query_ | A SELECT query that specifies the data to be output to the sink. Either this query or a `sink_from` clause must be specified. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| AS _select\_query_ | A SELECT query that specifies the data to be output to the sink. Either this query or a `sink_from` clause must be specified. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
| type | Required. Specify if the sink should be upsert or append-only. If creating an upsert sink, you must specify a primary key. |
| primary\_key | Optional. A string of a list of column names, separated by commas, that specifies the primary key of the Cassandra sink. |
| force\_append\_only | If true, forces the sink to be append-only, even if it cannot be. |
diff --git a/integrations/destinations/clickhouse.mdx b/integrations/destinations/clickhouse.mdx
index 7dee26f5..a739f063 100644
--- a/integrations/destinations/clickhouse.mdx
+++ b/integrations/destinations/clickhouse.mdx
@@ -71,7 +71,7 @@ PRIMARY KEY (seq_id);
### Create an upstream materialized view or source
-The following query creates an append-only source. For more details on creating a source, see [CREATE SOURCE](/docs/current/sql-create-source/).
+The following query creates an append-only source. For more details on creating a source, see [CREATE SOURCE](/sql/commands/sql-create-source).
```sql
CREATE SOURCE s1_source (
diff --git a/integrations/destinations/delta-lake.mdx b/integrations/destinations/delta-lake.mdx
index 61539350..7f453d40 100644
--- a/integrations/destinations/delta-lake.mdx
+++ b/integrations/destinations/delta-lake.mdx
@@ -63,7 +63,7 @@ spark-sql --packages io.delta:delta-core_2.12:2.2.0,org.apache.hadoop:hadoop-aws
### Create an upstream materialized view or source
-The following query creates a source using the built-in load generator, which creates mock data. For more details, see [CREATE SOURCE](/docs/current/sql-create-source/) and [Generate test data](/docs/current/ingest-from-datagen/). You can transform the data using additional SQL queries if needed.
+The following query creates a source using the built-in load generator, which creates mock data. For more details, see [CREATE SOURCE](/sql/commands/sql-create-source) and [Generate test data](/ingestion/generate-test-data). You can transform the data using additional SQL queries if needed.
```sql
CREATE SOURCE s1_source (id int, name varchar)
diff --git a/integrations/destinations/elasticsearch.mdx b/integrations/destinations/elasticsearch.mdx
index 92b175f1..20d28e7b 100644
--- a/integrations/destinations/elasticsearch.mdx
+++ b/integrations/destinations/elasticsearch.mdx
@@ -51,7 +51,7 @@ WITH (
| :------------------- | :---------------- |
| sink\_name | Name of the sink to be created. |
| sink\_from | A clause that specifies the direct source from which data will be output. `sink_from` can be a materialized view or a table. Either this clause or a SELECT query must be specified. |
-| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
| primary\_key | Optional. The primary keys of the sink. If the primary key has multiple columns, set a delimiter in the delimiter parameter below to join them. |
| index | Required if `index_column` is not set. Name of the Elasticsearch index that you want to write data to. |
| index\_column | Allows writing to multiple indexes dynamically based on the column's value. This parameter is mutually exclusive with `index`. When `index` is set, the write index is `index`; When `index_column` is set, the target index is the value of this column, and it must be of `string` type. Avoiding setting this column as the first column, because the sink defaults to the first column as the key. |
diff --git a/integrations/destinations/mysql.mdx b/integrations/destinations/mysql.mdx
index d94c049b..d8cd4938 100644
--- a/integrations/destinations/mysql.mdx
+++ b/integrations/destinations/mysql.mdx
@@ -121,7 +121,7 @@ All `WITH` options are required.
| :------------------ | :------------- |
| sink\_name | Name of the sink to be created. |
| sink\_from | A clause that specifies the direct source from which data will be output. `sink_from` can be a materialized view or a table. Either this clause or a SELECT query must be specified. |
-| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
| connector | Sink connector type must be `jdbc` for MySQL sink. |
| jdbc.url | The JDBC URL of the destination database necessary for the driver to recognize and connect to the database. |
| jdbc.query.timeout | Specifies the timeout for the operations to downstream. If not set, the default is 10 minutes. |
diff --git a/integrations/destinations/opensearch.mdx b/integrations/destinations/opensearch.mdx
index 9802eb67..58eb9c27 100644
--- a/integrations/destinations/opensearch.mdx
+++ b/integrations/destinations/opensearch.mdx
@@ -41,7 +41,7 @@ WITH (
| :--------------- | :-------------------- |
| sink\_name | Name of the sink to be created. |
| sink\_from | A clause that specifies the direct source from which data will be output. `sink_from` can be a materialized view or a table. Either this clause or a SELECT query must be specified. |
-| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
| primary\_key | Optional. The primary keys of the sink. If the primary key has multiple columns, set a delimiter in the delimiter parameter below to join them. |
| index | Required if index\_column is not set. Name of the OpenSearch index that you want to write data to. |
| index\_column | This parameter enables you to create a sink that writes to multiple indexes dynamically. The sink decides which index to write to based on a column. It is mutually exclusive with the parameter index. Only one of them **can and must** be set.
When `index` is set, the write index of OpenSearch is index.
When `index_column` is set, the index of OpenSearch is the value of this column, which must be the string type.
Since OpenSearch sink defaults to the first column as the key, it is not recommended to place this column as the first column. |
diff --git a/integrations/destinations/postgresql.mdx b/integrations/destinations/postgresql.mdx
index 2ffb5404..3321b998 100644
--- a/integrations/destinations/postgresql.mdx
+++ b/integrations/destinations/postgresql.mdx
@@ -96,7 +96,7 @@ All `WITH` options are required unless noted.
| :------------------ | :-------------- |
| sink\_name | Name of the sink to be created. |
| sink\_from | A clause that specifies the direct source from which data will be output. `sink_from` can be a materialized view or a table. Either this clause or a SELECT query must be specified. |
-| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| AS select\_query | A SELECT query that specifies the data to be output to the sink. Either this query or a FROM clause must be specified. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
| connector | Sink connector type must be `jdbc` for PostgresQL sink. |
| jdbc.url | The JDBC URL of the destination database necessary for the driver to recognize and connect to the database. |
| jdbc.query.timeout | Specifies the timeout for the operations to downstream. If not set, the default is 10 minutes. |
diff --git a/integrations/destinations/snowflake.mdx b/integrations/destinations/snowflake.mdx
index f0c23376..07e5fe89 100644
--- a/integrations/destinations/snowflake.mdx
+++ b/integrations/destinations/snowflake.mdx
@@ -159,7 +159,7 @@ FROM
sub;
```
-Note that RisingWave uses `changelog` to transform streaming data into incremental logs. In the example above, `changelog_op` represents the type of modification (Insert/Update/Delete), while `_changelog_row_id` indicates the order of the modification. For more information, see [AS CHANGELOG](/docs/current/sql-as-changelog/).
+Note that RisingWave uses `changelog` to transform streaming data into incremental logs. In the example above, `changelog_op` represents the type of modification (Insert/Update/Delete), while `_changelog_row_id` indicates the order of the modification. For more information, see [AS CHANGELOG](/sql/commands/sql-as-changelog).
```sql Create sink in RisingWave
CREATE SINK snowflake_sink FROM ss_mv WITH (
diff --git a/integrations/other/dbt.mdx b/integrations/other/dbt.mdx
index 1c9bebba..68083947 100644
--- a/integrations/other/dbt.mdx
+++ b/integrations/other/dbt.mdx
@@ -61,9 +61,9 @@ RisingWave accepts these [materializations](https://docs.getdbt.com/docs/build/m
| :--------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| table | This materialization creates a table. To use this materialization, add `{{ config(materialized='table') }}` to your model SQL files. |
| view | Create a view. To use this materialization, add `{{ config(materialized='view') }}` to your model SQL files. |
-| ephemeral | This materialization uses [common table expressions](/docs/current/query-syntax-with-clause/) in RisingWave under the hood. To use this materialization, add `{{ config(materialized='ephemeral') }}` to your model SQL files. |
+| ephemeral | This materialization uses [common table expressions](/sql/query-syntax/with-clause) in RisingWave under the hood. To use this materialization, add `{{ config(materialized='ephemeral') }}` to your model SQL files. |
| materializedview | To be deprecated. It is available only for backward compatibility purposes. Use materialized\_view instead. |
-| materialized\_view | Creates a [materialized view](/docs/current/sql-create-mv/). This materialization corresponds the incremental one in dbt. To use this materialization, add `{{ config(materialized='materialized_view') }} `to your model SQL files. |
+| materialized\_view | Creates a [materialized view](/sql/commands/sql-create-mv). This materialization corresponds the incremental one in dbt. To use this materialization, add `{{ config(materialized='materialized_view') }} `to your model SQL files. |
| incremental | Use materialized\_view instead. Since RisingWave is designed to use materialized view to manage data transformation in an incremental way, you can just use the materialized\_view materilization. |
| source | Defines a source. To use this materialization, add `{{ config(materialized='source') }}` to your model SQL files. You need to provide your create source statement as a whole in this model. See [Example model files](#example-model-files) for details. |
| table\_with\_connector | Defines a table with connector settings. A table with connector settings is similar to a source. The difference is that a table object with connector settings persists raw streaming data in the source, while a source object does not. To use this materialization, add `{{ config(materialized='table\_with\_connector') }}` to your model SQL files. You need to provide your create table with connector statement as a whole in this model (see [Example model files](#example-model-files) for details). Because dbt table has its own semantics, RisingWave use table\_with\_connector to distinguish itself from a dbt table. |
diff --git a/integrations/sources/apache-iceberg.mdx b/integrations/sources/apache-iceberg.mdx
index 5b007873..3208387b 100644
--- a/integrations/sources/apache-iceberg.mdx
+++ b/integrations/sources/apache-iceberg.mdx
@@ -21,7 +21,7 @@ WITH (
```
-You don’t need to specify the column name for the Iceberg source, as RisingWave can derive it from the Iceberg table metadata directly. Use [DESCRIBE](/docs/current/sql-describe/) statement to view the column names and data types.
+You don’t need to specify the column name for the Iceberg source, as RisingWave can derive it from the Iceberg table metadata directly. Use [DESCRIBE](/sql/commands/sql-describe) statement to view the column names and data types.
## Parameters
@@ -177,7 +177,7 @@ SELECT * FROM s FOR SYSTEM_VERSION AS OF 3023402865675048688;
## System tables
-We currently support system tables [rw\_iceberg\_files and rw\_iceberg\_snapshots](/docs/current/rw_catalog/introduction#available-risingwave-catalogs). `rw_iceberg_files` contains the current files of the Iceberg source or table. Here is a simple example:
+We currently support system tables [rw\_iceberg\_files and rw\_iceberg\_snapshots](/sql/system-catalogs/rw-catalog#available-risingwave-catalogs). `rw_iceberg_files` contains the current files of the Iceberg source or table. Here is a simple example:
Read Iceberg files
diff --git a/integrations/sources/kafka.mdx b/integrations/sources/kafka.mdx
index b23983d1..8ff26140 100644
--- a/integrations/sources/kafka.mdx
+++ b/integrations/sources/kafka.mdx
@@ -99,7 +99,7 @@ Set `properties.ssl.endpoint.identification.algorithm` to `none` to bypass the v
### Specific parameters for Amazon MSK
-There are some specific parameters for Amazon Managed Streaming for Apache Kafka (MSK), please see [Access MSK in RisingWave](/docs/current/connector-amazon-msk/#access-msk-in-risingwave) for more details.
+There are some specific parameters for Amazon Managed Streaming for Apache Kafka (MSK), please see [Access MSK in RisingWave](/integrations/sources/amazon-msk#access-msk-in-risingwave) for more details.
## Examples[](#examples "Direct link to Examples")
@@ -321,7 +321,7 @@ To learn about compatibility types for Schema Registry and the changes allowed,
**PREMIUM EDITION FEATURE**
-This feature is exclusive to RisingWave Premium Edition that offers advanced capabilities beyond the free versions. For a full list of premium features, see [RisingWave Premium Edition](/docs/current/rw-premium-edition-intro/). If you encounter any questions, please contact sales team at [sales@risingwave-labs.com](mailto:sales@risingwave-labs.com).
+This feature is exclusive to RisingWave Premium Edition that offers advanced capabilities beyond the free versions. For a full list of premium features, see [RisingWave Premium Edition](/get-started/rw-premium-edition-intro). If you encounter any questions, please contact sales team at [sales@risingwave-labs.com](mailto:sales@risingwave-labs.com).
AWS Glue Schema Registry is a serverless feature of AWS Glue that allows you to centrally manage and enforce schemas for data streams, enabling data validation and compatibility checks. It helps in improving the quality of data streams by providing a central repository for managing and enforcing schemas across various AWS services and custom applications.
@@ -354,7 +354,7 @@ ENCODE AVRO (
## Create source with PrivateLink connection
-If your Kafka source service is located in a different VPC from RisingWave, use AWS PrivateLink to establish a secure and direct connection. For details on how to set up an AWS PrivateLink connection, see [Create an AWS PrivateLink connection](/docs/current/sql-create-connection/#create-an-aws-privatelink-connection).
+If your Kafka source service is located in a different VPC from RisingWave, use AWS PrivateLink to establish a secure and direct connection. For details on how to set up an AWS PrivateLink connection, see [Create an AWS PrivateLink connection](/sql/commands/sql-create-connection#create-an-aws-privatelink-connection).
To create a Kafka source with a PrivateLink connection, in the WITH section of your `CREATE SOURCE` or `CREATE TABLE` statement, specify the following parameters.
@@ -362,7 +362,7 @@ To create a Kafka source with a PrivateLink connection, in the WITH section of y
| :------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| privatelink.targets | The PrivateLink targets that correspond to the Kafka brokers. The targets should be in JSON format. Note that each target listed corresponds to each broker specified in the properties.bootstrap.server field. If the order is incorrect, there will be connectivity issues. |
| privatelink.endpoint | The DNS name of the VPC endpoint. If you're using RisingWave Cloud, you can find the auto-generated endpoint after you created a connection. See details in [Create a PrivateLink connection](/cloud/create-a-connection/#whats-next). |
-| connection.name | The name of the connection. This parameter should only be included if you are using a connection created with the [CREATE CONNECTION](/docs/current/sql-create-connection/) statement. Omit this parameter if you have provisioned a VPC endpoint using privatelink.endpoint (recommended). |
+| connection.name | The name of the connection. This parameter should only be included if you are using a connection created with the [CREATE CONNECTION](/sql/commands/sql-create-connection) statement. Omit this parameter if you have provisioned a VPC endpoint using privatelink.endpoint (recommended). |
Here is an example of creating a Kafka source using a PrivateLink connection. Notice that `{"port": 9094}` corresponds to the broker `broker1-endpoint`, `{"port": 9095}` corresponds to the broker `broker2-endpoint`, and `{"port": 9096}` corresponds to the broker `broker3-endpoint`.
diff --git a/integrations/sources/mongodb-cdc.mdx b/integrations/sources/mongodb-cdc.mdx
index 746277d6..ae6a18b5 100644
--- a/integrations/sources/mongodb-cdc.mdx
+++ b/integrations/sources/mongodb-cdc.mdx
@@ -58,7 +58,7 @@ SELECT * FROM test;
```
-You can see the [INCLUDE clause](/docs/current/include-clause/) for more details.
+You can see the [INCLUDE clause](/ingestion/ingest-additional-fields-with-include-clause) for more details.
### Metadata options
diff --git a/integrations/sources/mysql-cdc.mdx b/integrations/sources/mysql-cdc.mdx
index 7ba04220..430704e0 100644
--- a/integrations/sources/mysql-cdc.mdx
+++ b/integrations/sources/mysql-cdc.mdx
@@ -13,7 +13,7 @@ With this connector, RisingWave can connect to MySQL databases directly to obtai
* Using a CDC tool and a message broker
You can use a CDC tool and then use the Kafka, Pulsar, or Kinesis connector to send the CDC data to RisingWave.
-This topic describes how to ingest MySQL CDC data into RisingWave using the native MySQL CDC connector. Using an external CDC tool and a message broker is introduced in [Create source via event streaming systems](/docs/current/ingest-from-cdc/).
+This topic describes how to ingest MySQL CDC data into RisingWave using the native MySQL CDC connector. Using an external CDC tool and a message broker is introduced in [Create source via event streaming systems](/ingestion/change-data-capture-with-risingwave).
## Set up MySQL
@@ -170,7 +170,7 @@ SELECT * FROM t2 ORDER BY v1;
4 | dd | 2024-05-20 09:01:08+00:00
```
-You can see the [INCLUDE clause](/docs/current/include-clause/) for more details.
+You can see the [INCLUDE clause](/ingestion/ingest-additional-fields-with-include-clause) for more details.
### Debezium parameters
@@ -224,7 +224,7 @@ FROM mysql_source TABLE 'public.person';
## Examples
-Connect to the upstream database by creating a CDC source using the [CREATE SOURCE](/docs/current/sql-create-source/) command and MySQL CDC parameters. The data format is fixed as `FORMAT PLAIN ENCODE JSON` so it does not need to be specified.
+Connect to the upstream database by creating a CDC source using the [CREATE SOURCE](/sql/commands/sql-create-source) command and MySQL CDC parameters. The data format is fixed as `FORMAT PLAIN ENCODE JSON` so it does not need to be specified.
```sql
CREATE SOURCE mysql_mydb WITH (
@@ -260,7 +260,7 @@ CREATE TABLE t3_rw (
) FROM mysql_mydb TABLE 'mydb.t3';
```
-To check the progress of backfilling historical data, find the corresponding internal table using the [SHOW INTERNAL TABLES](/docs/current/sql-show-internal-tables/) command and query from it. For instance, the following SQL query shows the progress of a CDC table named `orders_rw`.
+To check the progress of backfilling historical data, find the corresponding internal table using the [SHOW INTERNAL TABLES](/sql/commands/sql-show-internal-tables) command and query from it. For instance, the following SQL query shows the progress of a CDC table named `orders_rw`.
```sql
SELECT * FROM __internal_orders_rw_4002_streamcdcscan_5002;
@@ -328,7 +328,7 @@ Please be aware that the range of specific values varies among MySQL types and R
## Use dbt to ingest data from MySQL CDC
-Here is an example of how to use dbt to ingest data from MySQL CDC. In this dbt example, `source` and `table_with_connector` models will be used. For more details about these two models, please refer to [Use dbt for data transformations](/docs/current/use-dbt/#define-dbt-models).
+Here is an example of how to use dbt to ingest data from MySQL CDC. In this dbt example, `source` and `table_with_connector` models will be used. For more details about these two models, please refer to [Use dbt for data transformations](/integrations/other/dbt#define-dbt-models).
First, we create a `source` model `mysql_mydb.sql`.
@@ -367,7 +367,7 @@ This feature is only available in the premium edition of RisingWave. The premium
PUBLIC PREVIEW
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](../../changelog/product-lifecycle#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
RisingWave supports automatically mapping the upstream table schema when creating a CDC table from a MySQL CDC source. Instead of defining columns individually, you can use `*` when creating a table to ingest all columns from the source table. Note that `*` cannot be used if other columns are specified in the table creation process.
@@ -407,7 +407,7 @@ This feature is only available in the premium edition of RisingWave. The premium
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](../../changelog/product-lifecycle#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
RisingWave supports auto schema changes in MySQL CDC. It ensures that your RisingWave pipeline stays synchronized with any schema changes in the source database, reducing the need for manual updates and preventing inconsistencies.
diff --git a/integrations/sources/nats-jetstream.mdx b/integrations/sources/nats-jetstream.mdx
index 36edb509..3330dae3 100644
--- a/integrations/sources/nats-jetstream.mdx
+++ b/integrations/sources/nats-jetstream.mdx
@@ -11,7 +11,7 @@ sidebarTitle: NATS JetStream
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](../../changelog/product-lifecycle#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Prerequisites
diff --git a/integrations/sources/overview.mdx b/integrations/sources/overview.mdx
index a471e311..c979a84f 100644
--- a/integrations/sources/overview.mdx
+++ b/integrations/sources/overview.mdx
@@ -5,4 +5,4 @@ mode: wide
sidebarTitle: Overview
---
- 6 items 5 items 1 item 3 items 3 item
+ 6 items 5 items 1 item 3 items 3 item
diff --git a/integrations/sources/postgresql-cdc.mdx b/integrations/sources/postgresql-cdc.mdx
index 035c5d20..f0f7d8c1 100644
--- a/integrations/sources/postgresql-cdc.mdx
+++ b/integrations/sources/postgresql-cdc.mdx
@@ -11,7 +11,7 @@ You can ingest CDC data from PostgreSQL into RisingWave in two ways:
* Using the built-in PostgreSQL CDC connector
With this connector, RisingWave can connect to PostgreSQL databases directly to obtain data from the binlog without starting additional services.
* Using a CDC tool and a message broker
-You can use a CDC tool and then use the Kafka, Pulsar, or Kinesis connector to send the CDC data to RisingWave. For more details, see the [Create source via event streaming systems](/docs/current/ingest-from-cdc/) topic.
+You can use a CDC tool and then use the Kafka, Pulsar, or Kinesis connector to send the CDC data to RisingWave. For more details, see the [Create source via event streaming systems](/ingestion/change-data-capture-with-risingwave) topic.
## Set up PostgreSQL
@@ -143,7 +143,7 @@ WITH (
FROM source TABLE pg_table_name;
```
-To check the progress of backfilling historical data, find the corresponding internal table using the [SHOW INTERNAL TABLES](/docs/current/sql-show-internal-tables/) command and query from it.
+To check the progress of backfilling historical data, find the corresponding internal table using the [SHOW INTERNAL TABLES](/sql/commands/sql-show-internal-tables) command and query from it.
### Connector parameters
@@ -197,7 +197,7 @@ SELECT * FROM t2 ORDER BY v1;
4 | dd | 2024-05-20 09:01:08+00:00
```
-You can see the [INCLUDE clause](/docs/current/include-clause/) for more details.
+You can see the [INCLUDE clause](/ingestion/ingest-additional-fields-with-include-clause) for more details.
### Debezium parameters
@@ -251,7 +251,7 @@ FROM pg_source TABLE 'public.person';
## Examples
-Connect to the upstream database by creating a CDC source using the [CREATE SOURCE](/docs/current/sql-create-source/) command and PostgreSQL CDC parameters. The data format is fixed as `FORMAT PLAIN ENCODE JSON` so it does not need to be specified.
+Connect to the upstream database by creating a CDC source using the [CREATE SOURCE](/sql/commands/sql-create-source) command and PostgreSQL CDC parameters. The data format is fixed as `FORMAT PLAIN ENCODE JSON` so it does not need to be specified.
```sql
CREATE SOURCE pg_mydb WITH (
@@ -286,7 +286,7 @@ CREATE TABLE tt4 (
) FROM pg_mydb TABLE 'ods.tt4';
```
-To check the progress of backfilling historical data, find the corresponding internal table using the [SHOW INTERNAL TABLES](/docs/current/sql-show-internal-tables/) command and query from it.
+To check the progress of backfilling historical data, find the corresponding internal table using the [SHOW INTERNAL TABLES](/sql/commands/sql-show-internal-tables) command and query from it.
## Data type mapping
@@ -335,7 +335,7 @@ RisingWave cannot correctly parse composite types from PostgreSQL as Debezium do
| TIME(1), TIME(2), TIME(3), TIME(4), TIME(5), TIME(6) | TIME WITHOUT TIME ZONE (limited to \[1973-03-03 09:46:40, 5138-11-16 09:46:40)) |
| TIMESTAMP(1), TIMESTAMP(2), TIMESTAMP(3) | TIMESTAMP WITHOUT TIME ZONE (limited to \[1973-03-03 09:46:40, 5138-11-16 09:46:40)) |
| TIMESTAMP(4), TIMESTAMP(5), TIMESTAMP(6), TIMESTAMP | TIMESTAMP WITHOUT TIME ZONE |
-| NUMERIC\[(M\[,D\])\], DECIMAL\[(M\[,D\])\] | numeric, [rw\_int256](/docs/current/data-type-rw%5Fint256/), or varchar. numeric supports values with a precision of up to 28 digits, and any values beyond this precision will be treated as NULL. To process values exceeding 28 digits, use rw\_int256 or varchar instead. When creating a table, make sure to specify the data type of the column corresponding to numeric as rw\_int256 or varchar. Note that rw\_int256 treats inf, \-inf, nan, or numeric with decimal parts as NULL. |
+| NUMERIC\[(M\[,D\])\], DECIMAL\[(M\[,D\])\] | numeric, [rw\_int256](/sql/data-types/rw-int256), or varchar. numeric supports values with a precision of up to 28 digits, and any values beyond this precision will be treated as NULL. To process values exceeding 28 digits, use rw\_int256 or varchar instead. When creating a table, make sure to specify the data type of the column corresponding to numeric as rw\_int256 or varchar. Note that rw\_int256 treats inf, \-inf, nan, or numeric with decimal parts as NULL. |
| MONEY\[(M\[,D\])\] | NUMERIC |
| HSTORE | No support |
| HSTORE | No support |
@@ -346,7 +346,7 @@ RisingWave cannot correctly parse composite types from PostgreSQL as Debezium do
## Use dbt to ingest data from PostgreSQL CDC
-Here is an example of how to use dbt to ingest data from PostgreSQL CDC. In this dbt example, `source` and `table_with_connector` models will be used. For more details about these two models, please refer to [Use dbt for data transformations](/docs/current/use-dbt/#define-dbt-models).
+Here is an example of how to use dbt to ingest data from PostgreSQL CDC. In this dbt example, `source` and `table_with_connector` models will be used. For more details about these two models, please refer to [Use dbt for data transformations](/integrations/other/dbt#define-dbt-models).
First, we create a `source` model `pg_mydb.sql`.
@@ -382,7 +382,7 @@ This feature is only available in the premium edition of RisingWave. The premium
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](../../changelog/product-lifecycle#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
RisingWave supports automatically mapping the upstream table schema when creating a CDC table from a PostgreSQL CDC source. Instead of defining columns individually, you can use `*` when creating a table to ingest all columns from the source table. Note that `*` cannot be used if other columns are specified in the table creation process.
diff --git a/integrations/sources/pulsar.mdx b/integrations/sources/pulsar.mdx
index 0559ab16..bf1552ed 100644
--- a/integrations/sources/pulsar.mdx
+++ b/integrations/sources/pulsar.mdx
@@ -7,7 +7,7 @@ sidebarTitle: Apache Pulsar
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](../../changelog/product-lifecycle#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
When creating a source, you can choose to persist the data from the source in RisingWave by using `CREATE TABLE` instead of `CREATE SOURCE` and specifying the connection settings and data format.
diff --git a/integrations/sources/sql-server-cdc.mdx b/integrations/sources/sql-server-cdc.mdx
index 93333c9d..4be76a9b 100644
--- a/integrations/sources/sql-server-cdc.mdx
+++ b/integrations/sources/sql-server-cdc.mdx
@@ -6,13 +6,13 @@ sidebarTitle: SQL Server CDC
**PREMIUM EDITION FEATURE**
-This feature is exclusive to RisingWave Premium Edition that offers advanced capabilities beyond the free versions. For a full list of premium features, see [RisingWave Premium Edition](/docs/current/rw-premium-edition-intro/). If you encounter any questions, please contact sales team at [sales@risingwave-labs.com](mailto:sales@risingwave-labs.com).
+This feature is exclusive to RisingWave Premium Edition that offers advanced capabilities beyond the free versions. For a full list of premium features, see [RisingWave Premium Edition](/get-started/rw-premium-edition-intro). If you encounter any questions, please contact sales team at [sales@risingwave-labs.com](mailto:sales@risingwave-labs.com).
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](../../changelog/product-lifecycle#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
Change Data Capture (CDC) refers to the process of identifying and capturing data changes in a database, and then delivering the changes to a downstream service in real time.
@@ -24,7 +24,7 @@ You can ingest CDC data from SQL Server into RisingWave in two ways:
* Using the built-in SQL Server CDC connector
With this connector, RisingWave can connect to SQL Server databases directly to obtain data from the binlog without starting additional services.
* Using a CDC tool and a message broker
-You can use a CDC tool and then use the Kafka, Pulsar, or Kinesis connector to send the CDC data to RisingWave. For more details, see the [Create source via event streaming systems](/docs/current/ingest-from-cdc/) topic.
+You can use a CDC tool and then use the Kafka, Pulsar, or Kinesis connector to send the CDC data to RisingWave. For more details, see the [Create source via event streaming systems](/ingestion/change-data-capture-with-risingwave) topic.
## Set up SQL Server
@@ -185,7 +185,7 @@ FROM mssql_source TABLE 'mydb.dbo.person';
## Examples
-Connect to the upstream database by creating a CDC source using the [CREATE SOURCE](/docs/current/sql-create-source/) command and SQL Server CDC parameters. The data format is fixed as `FORMAT PLAIN ENCODE JSON` so it does not need to be specified.
+Connect to the upstream database by creating a CDC source using the [CREATE SOURCE](/sql/commands/sql-create-source) command and SQL Server CDC parameters. The data format is fixed as `FORMAT PLAIN ENCODE JSON` so it does not need to be specified.
```sql
CREATE SOURCE mssql_mydb WITH (
@@ -219,7 +219,7 @@ CREATE TABLE tt4 (
) FROM mssql_mydb TABLE 'ods.tt4';
```
-To check the progress of backfilling historical data, find the corresponding internal table using the [SHOW INTERNAL TABLES](/docs/current/sql-show-internal-tables/) command and query from it.
+To check the progress of backfilling historical data, find the corresponding internal table using the [SHOW INTERNAL TABLES](/sql/commands/sql-show-internal-tables) command and query from it.
## Data type mapping
diff --git a/integrations/visualization/grafana.mdx b/integrations/visualization/grafana.mdx
index 57ba9323..df8b3864 100644
--- a/integrations/visualization/grafana.mdx
+++ b/integrations/visualization/grafana.mdx
@@ -13,7 +13,7 @@ This guide will go over how to add RisingWave as a data source in Grafana.
To install and start RisingWave locally, see the [Get started](/get-started/quickstart) guide. We recommend running RisingWave locally for testing purposes.
-Connect to streaming sources. For details on connecting to a streaming source and what connectors are supported with RisingWave, see [CREATE SOURCE](/docs/current/sql-create-source/).
+Connect to streaming sources. For details on connecting to a streaming source and what connectors are supported with RisingWave, see [CREATE SOURCE](/sql/commands/sql-create-source).
### Install and launch Grafana
@@ -48,7 +48,7 @@ To add a new read-only user, use the following SQL query:
CREATE USER grafanareader WITH PASSWORD 'password';
```
-Then, when adding RisingWave as a database, fill in the **User** and **Password** fields with the name and password of the new user created. For more details on creating a user, see the [CREATE USER](/docs/current/sql-create-user/) command.
+Then, when adding RisingWave as a database, fill in the **User** and **Password** fields with the name and password of the new user created. For more details on creating a user, see the [CREATE USER](/sql/commands/sql-create-user) command.
To allow the read-only user to query from a materialized view, use the following SQL query:
@@ -56,6 +56,6 @@ To allow the read-only user to query from a materialized view, use the following
GRANT SELECT ON MATERIALIZED VIEW mv_name TO grafanareader;
```
-See the [GRANT](/docs/current/sql-grant/) command for more details.
+See the [GRANT](/sql/commands/sql-grant) command for more details.
-Now that RisingWave is added as a database, you can start creating dashboards within Grafana using the data in RisingWave. For an extensive tutorial that covers how to create dashboards in Grafana with data queried from RisingWave, check out the [Use RisingWave to monitor RisingWave metrics](/docs/current/use-risingwave-to-monitor-risingwave-metrics/) tutorial, which uses a demo cluster so you can easily try it out on your device.
+Now that RisingWave is added as a database, you can start creating dashboards within Grafana using the data in RisingWave.
diff --git a/integrations/visualization/superset.mdx b/integrations/visualization/superset.mdx
index 4fffb3e6..70c61f15 100644
--- a/integrations/visualization/superset.mdx
+++ b/integrations/visualization/superset.mdx
@@ -15,7 +15,7 @@ This guide will go over how to:
To install and start RisingWave locally, see the [Get started](/get-started/quickstart) guide. We recommend running RisingWave locally for demo purposes.
-Connect to a streaming source. For details on connecting to streaming sources and what sources are supported with RisingWave, see [CREATE SOURCE](/docs/current/sql-create-source/).
+Connect to a streaming source. For details on connecting to streaming sources and what sources are supported with RisingWave, see [CREATE SOURCE](/sql/commands/sql-create-source).
### Install Apache Superset
diff --git a/operate/access-control.mdx b/operate/access-control.mdx
index 450010c5..3d9ad32b 100644
--- a/operate/access-control.mdx
+++ b/operate/access-control.mdx
@@ -3,7 +3,7 @@ title: "Access control"
description: "RisingWave uses a user-based access control to handle authentication and authorization. Privileges can be granted to or revoked by users to control what actions can be performed on different object levels."
---
-When creating a user, the administrator of an organization can determine the system-level permissions and set a password. The system permissions and the user names can be revised with the `ALTER USER` command. For details about the system permissions, see [System permissions](/docs/current/sql-create-user/#system-permissions).
+When creating a user, the administrator of an organization can determine the system-level permissions and set a password. The system permissions and the user names can be revised with the `ALTER USER` command. For details about the system permissions, see [System permissions](/sql/commands/sql-create-user#syntax-for-creating-a-new-user).
Database privileges can be configured later by using `GRANT` and `REVOKE` commands. The privileges are managed at these object levels:
@@ -25,7 +25,7 @@ Syntax:
CREATE USER user_name [ [ WITH ] system_permission [ ... ]['PASSWORD' { password | NULL }] ];
```
-For details about system permissions, see [System permissions](/docs/current/sql-create-user/#system-permissions).
+For details about system permissions, see [System permissions](/sql/commands/sql-create-user#syntax-for-creating-a-new-user).
Create a user with default permissions:
@@ -68,7 +68,7 @@ See the table below for the privileges available in RisingWave and the correspon
| CREATE | Permission to create new objects within the database. | Schema, Database, Table |
| CONNECT | Permission to connect to a database. | Database |
-You use the `GRANT` command to grant privileges to a user, and the `REVOKE` command to revoke privileges from a user. For the syntaxes of these two commands, see [GRANT](/docs/current/sql-grant/) and [REVOKE](/docs/current/sql-revoke/).
+You use the `GRANT` command to grant privileges to a user, and the `REVOKE` command to revoke privileges from a user. For the syntaxes of these two commands, see [GRANT](/sql/commands/sql-grant) and [REVOKE](/sql/commands/sql-revoke).
This statement grants the `SELECT` privilege for materialized view `mv1`, which is in schema `schema1` of database `db1`, to user `user1`. `user1` is able to grant the `SELECT` privilege to other users.
diff --git a/operate/dedicated-compute-node.mdx b/operate/dedicated-compute-node.mdx
index a837ded7..fcd78788 100644
--- a/operate/dedicated-compute-node.mdx
+++ b/operate/dedicated-compute-node.mdx
@@ -22,7 +22,7 @@ For specific changes required in the YAML file, see [Separate batch streaming mo
## Configure a `serving` compute node for batch queries
-You can use a TOML configuration file to configure a `serving` compute node. For detailed instructions, see [Node-specific configurations](/docs/current/node-specific-configurations/).
+You can use a TOML configuration file to configure a `serving` compute node. For detailed instructions, see [Node-specific configurations](/deploy/node-specific-configurations).
Unlike a general-purpose `both` compute node, a `serving` compute node doesn't require memory allocation or reservation for shared buffer and operator caches. Instead, it's more efficient to increase the sizes of the block and meta caches. However, making these caches too large can limit the scope of data that batch queries can execute.
diff --git a/operate/manage-a-large-number-of-streaming-jobs.mdx b/operate/manage-a-large-number-of-streaming-jobs.mdx
index 9ce034e8..2a377265 100644
--- a/operate/manage-a-large-number-of-streaming-jobs.mdx
+++ b/operate/manage-a-large-number-of-streaming-jobs.mdx
@@ -21,7 +21,7 @@ disable_automatic_parallelism_control=true
default_parallelism=8
```
-The adaptive parallelism feature in version 1.7.0 ensures that every streaming job can fully utilize all available CPUs. Therefore, we need to disable it. The `default_parallelism` setting determines the parallelism for newly created streaming jobs. Change the `streaming_parallelism` before creating streaming jobs can achieve the same effect. For guidance on how to set this value, refer to [How to adjust the resources allocated to each streaming query?](/docs/current/performance-faq/#how-to-adjust-the-resources-allocated-to-each-streaming-query).
+The adaptive parallelism feature in version 1.7.0 ensures that every streaming job can fully utilize all available CPUs. Therefore, we need to disable it. The `default_parallelism` setting determines the parallelism for newly created streaming jobs. Change the `streaming_parallelism` before creating streaming jobs can achieve the same effect. For guidance on how to set this value, refer to [How to adjust the resources allocated to each streaming query?](/performance/performance-faq#how-to-adjust-the-resources-allocated-to-each-streaming-query).
### Limit the concurrency of creating stream jobs
diff --git a/operate/manage-secrets.mdx b/operate/manage-secrets.mdx
index f2312589..5a7df70d 100644
--- a/operate/manage-secrets.mdx
+++ b/operate/manage-secrets.mdx
@@ -12,18 +12,18 @@ RisingWave provides four key secret management operations:
* Using secrets as a file.
* Dropping secrets.
-In addition, you can use the [rw\_secrets](/docs/current/rw%5Fcatalog/) catalog to view the ID, name, owner, and access control of secret objects.
+In addition, you can use the [rw\_secrets](/sql/system-catalogs/rw-catalog) catalog to view the ID, name, owner, and access control of secret objects.
**PREMIUM EDITION FEATURE**
-This feature is exclusive to RisingWave Premium Edition that offers advanced capabilities beyond the free versions. For a full list of premium features, see [RisingWave Premium Edition](/docs/current/rw-premium-edition-intro/). If you encounter any questions, please contact sales team at [sales@risingwave-labs.com](mailto:sales@risingwave-labs.com).
+This feature is exclusive to RisingWave Premium Edition that offers advanced capabilities beyond the free versions. For a full list of premium features, see [RisingWave Premium Edition](/get-started/rw-premium-edition-intro). If you encounter any questions, please contact sales team at [sales@risingwave-labs.com](mailto:sales@risingwave-labs.com).
**PUBLIC PREVIEW**
-This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](../../changelog/product-lifecycle#features-in-the-public-preview-stage).
+This feature is in the public preview stage, meaning it's nearing the final product but is not yet fully stable. If you encounter any issues or have feedback, please contact us through our [Slack channel](https://www.risingwave.com/slack). Your input is valuable in helping us improve the feature. For more information, see our [Public preview feature list](/changelog/product-lifecycle#features-in-the-public-preview-stage).
## Create secrets
@@ -139,5 +139,5 @@ To specify the temporary secret file directory, set `RW_TEMP_SECRET_FILE_DIR`. T
## See also
-* [CREATE SECRET](/docs/current/sql-create-secret/): Creating a secret.
-* [DROP SECRET](/docs/current/sql-drop-secret/): Dropping a secret.
+* [CREATE SECRET](/sql/commands/sql-create-secret): Creating a secret.
+* [DROP SECRET](/sql/commands/sql-drop-secret): Dropping a secret.
diff --git a/operate/monitor-statement-progress.mdx b/operate/monitor-statement-progress.mdx
index 2cede070..48965da5 100644
--- a/operate/monitor-statement-progress.mdx
+++ b/operate/monitor-statement-progress.mdx
@@ -28,7 +28,7 @@ CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM mv1;
ERROR: QueryError: Scheduler error: Cancelled: create
```
-Alternatively, you can use the [SHOW JOBS](/docs/current/sql-show-jobs/) command to get all streaming jobs (that is, the creation of a materialized view, an index, a table, a source, or a sink) that are in progress. The IDs, specific statements, and their progresses will be returned in the result. You can then cancel specific jobs by their IDs using the [CANCEL JOBS](/docs/current/sql-cancel-jobs/) command. The `CANCEL JOBS` command will return IDs of the jobs that are canceled successfully.
+Alternatively, you can use the [SHOW JOBS](/sql/commands/sql-show-jobs) command to get all streaming jobs (that is, the creation of a materialized view, an index, a table, a source, or a sink) that are in progress. The IDs, specific statements, and their progresses will be returned in the result. You can then cancel specific jobs by their IDs using the [CANCEL JOBS](/sql/commands/sql-cancel-jobs) command. The `CANCEL JOBS` command will return IDs of the jobs that are canceled successfully.
```sql Show all jobs
@@ -54,6 +54,6 @@ Id
## Related topics
-
-
+
+
diff --git a/operate/tune-reserved-memory.mdx b/operate/tune-reserved-memory.mdx
index ad1d314f..2a479602 100644
--- a/operate/tune-reserved-memory.mdx
+++ b/operate/tune-reserved-memory.mdx
@@ -102,5 +102,5 @@ The best configuration depends on your specific workload and data pattern. We re
## See also
-* [Memory usage](/docs/current/performance-metrics/#memory-usage): The memory control mechanism of RisingWave.
-* [What consists of the memory usage and disk usage?](/docs/current/faq-using-risingwave/#what-consists-of-the-memory-usage-and-disk-usage)
+* [Memory usage](/performance/performance-metrics#memory-usage): The memory control mechanism of RisingWave.
+* [What consists of the memory usage and disk usage?](/faq/faq-using-risingwave#what-consists-of-the-memory-usage-and-disk-usage)
diff --git a/operate/view-configure-runtime-parameters.mdx b/operate/view-configure-runtime-parameters.mdx
index e7411165..25fdc325 100644
--- a/operate/view-configure-runtime-parameters.mdx
+++ b/operate/view-configure-runtime-parameters.mdx
@@ -68,7 +68,7 @@ Below is the detailed information about the parameters you may see after using t
| row\_security | true/false | See [here](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-ROW-SECURITY) for details. Unused in RisingWave, support for compatibility. |
| standard\_conforming\_strings | on | See [here](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STANDARD-CONFORMING-STRINGS) for details. |
| source\_rate\_limit | default/positive integer/0 | Set the maximum number of records per second per source, for each parallelism. This parameter is applied when creating new sources and tables with sources.
The value can be default, 0, or a positive integer.
SET `SOURCE_RATE_LIMIT` TO 0 will pause the source read for sources.
SET `SOURCE_RATE_LIMIT` TO DEFAULT will remove the rate limit.
Setting this variable will only affect new DDLs within the session, but not change the rate limits of existing jobs. Use `ALTER` to change the rate limits in existing [sources](/sql/commands/sql-alter-source/#set-source-rate-limit) and [tables that have source](/sql/commands/sql-alter-table/#set-source-rate-limit).
Note that the total throughput of a streaming job is determined by multiplying the parallelism with the throttle rate. To obtain the parallelism value for a streaming job, you can refer to the `streaming_parallelism` runtime parameter in this table. |
-| backfill\_rate\_limit | default/positive integer/0 | Set the maximum number of records per second per parallelism for the backfill process of materialized views, sinks, and indexes. This parameter is applied when creating new jobs, and throttles the backfill from upstream materialized views and sources.
The value can be default, 0, or a positive integer. SET `BACKFILL_RATE_LIMIT` TO 0 will pause the backfill. SET `BACKFILL_RATE_LIMIT` TO default will remove the backfill rate limit.
Setting this variable will only affect new DDLs within the session, but not change the rate limits of existing jobs. Use `ALTER` to change the backfill rate limits in existing [materialized views](/sql-alter-materialized-view/#set-backfill%5Frate%5Flimit) and [CDC tables](/sql/commands/sql-alter-table/#set-backfill%5Frate%5Flimit).
Note that the total throughput of a streaming job is determined by multiplying the parallelism with the throttle rate. To obtain the parallelism value for a streaming job, you can refer to the `streaming_parallelism` runtime parameter in this table. |
+| backfill\_rate\_limit | default/positive integer/0 | Set the maximum number of records per second per parallelism for the backfill process of materialized views, sinks, and indexes. This parameter is applied when creating new jobs, and throttles the backfill from upstream materialized views and sources.
The value can be default, 0, or a positive integer. SET `BACKFILL_RATE_LIMIT` TO 0 will pause the backfill. SET `BACKFILL_RATE_LIMIT` TO default will remove the backfill rate limit.
Setting this variable will only affect new DDLs within the session, but not change the rate limits of existing jobs. Use `ALTER` to change the backfill rate limits in existing [materialized views](/sql/commands/sql-alter-materialized-view#set-backfill-rate-limit) and [CDC tables](/sql/commands/sql-alter-table/#set-backfill%5Frate%5Flimit).
Note that the total throughput of a streaming job is determined by multiplying the parallelism with the throttle rate. To obtain the parallelism value for a streaming job, you can refer to the `streaming_parallelism` runtime parameter in this table. |
| rw\_streaming\_over\_window\_cache\_policy | full | Cache policy for partition cache in streaming over window. Can be `full`, `recent`, `recent_first_n` or `recent_last_n`. |
| background\_ddl | true/false | Run DDL statements in background. |
| server\_encoding | UTF8 | Show the server-side character set encoding. At present, this parameter can be shown but not set, because the encoding is determined at database creation time. |
@@ -92,9 +92,9 @@ SET parameter_name { TO | = } { value | 'value' | DEFAULT};
Where `parameter_name` is the name of the parameter, and `value` or `'value'` is the new value of the parameter. `DEFAULT` can be written to specify resetting the parameter to its default value.
-For details about the `set_config()` function, see [System administration functions](/docs/current/sql-function-sys-admin/#set%5Fconfig), and for details about the `SET` command, see [SET](/docs/current/sql-set/).
+For details about the `set_config()` function, see [System administration functions](/sql/functions/sys-admin#set-config), and for details about the `SET` command, see [SET](/sql/commands/sql-set).
-You can also use the [ALTER SYSTEM SET](/docs/current/sql-alter-system/) command to set a system-wide default value for a runtime parameter. This configuration will then be applied to every new session.
+You can also use the [ALTER SYSTEM SET](/sql/commands/sql-alter-system) command to set a system-wide default value for a runtime parameter. This configuration will then be applied to every new session.
```sql Syntax
ALTER SYSTEM SET session_param_name TO session_param_value;
diff --git a/operate/view-configure-system-parameters.mdx b/operate/view-configure-system-parameters.mdx
index 9ca1cef4..96fb425b 100644
--- a/operate/view-configure-system-parameters.mdx
+++ b/operate/view-configure-system-parameters.mdx
@@ -20,7 +20,7 @@ Currently, these system parameters are available in RisingWave.
| data\_directory | The remote directory for storing data and metadata objects. |
| backup\_storage\_url | The URL of the remote storage for backups. |
| backup\_storage\_directory | The directory of the remote storage for backups. |
-| telemetry\_enabled | Whether to enable telemetry or not. For more information, see [Telemetry](/docs/current/telemetry/). |
+| telemetry\_enabled | Whether to enable telemetry or not. For more information, see [Telemetry](/operate/telemetry). |
| max\_concurrent\_creating\_streaming\_jobs | The maximum number of streaming jobs that can be created concurrently. That is, the maximum of materialized views, indexes, tables, sinks, or sources that can be created concurrently. |
| pause\_on\_next\_bootstrap | This parameter is used for debugging and maintenance purposes. Setting it to true will pause all data sources, such as connectors and DMLs, when the cluster restarts. This parameter will then be reset to its default value (false). To resume data ingestion, simply run `risectl meta resume` or restart the cluster again. |
| enable\_tracing | Whether to enable distributed tracing. This parameter is used to toggle the opentelemetry tracing during runtime. Its default value is false. |
diff --git a/performance/performance-best-practices.mdx b/performance/performance-best-practices.mdx
index a583ac49..c72c8198 100644
--- a/performance/performance-best-practices.mdx
+++ b/performance/performance-best-practices.mdx
@@ -6,7 +6,7 @@ sidebarTitle: Best practices
## When to create indexes
-Indexes in RisingWave are used to accelerate batch queries. See the basics of indexes in [CREATE INDEX](/docs/current/sql-create-index/).
+Indexes in RisingWave are used to accelerate batch queries. See the basics of indexes in [CREATE INDEX](/sql/commands/sql-create-index).
We determine how the index should be created by checking:
@@ -64,7 +64,7 @@ In terms of compactor nodes, we favor scaling up when the resources of the compa
In RisingWave, we can declare a source, a table, and an append-only table when connecting to an external upstream system.
-The difference between the source and the two types of tables is that a source does not persist the ingested data within RisingWave while the tables do. When the data is stored within RisingWave, it gives users the ability to insert, delete, and update data to a table and only insert data to an append-only table. Therefore, users make a tradeoff between taking up more storage space and giving up the ability to modify source data within RisingWave. See details in [CREATE SOURCE](/docs/current/sql-create-source/).
+The difference between the source and the two types of tables is that a source does not persist the ingested data within RisingWave while the tables do. When the data is stored within RisingWave, it gives users the ability to insert, delete, and update data to a table and only insert data to an append-only table. Therefore, users make a tradeoff between taking up more storage space and giving up the ability to modify source data within RisingWave. See details in [CREATE SOURCE](/sql/commands/sql-create-source).
Another difference is the performance implication. Unlike the table, the source and the append-only table will never process any updates or deletes. This gives us opportunities for optimization.
diff --git a/processing/emit-on-window-close.mdx b/processing/emit-on-window-close.mdx
index 07c56877..bbc780df 100644
--- a/processing/emit-on-window-close.mdx
+++ b/processing/emit-on-window-close.mdx
@@ -31,7 +31,7 @@ However, in certain scenarios, selecting the emit-on-window-close triggering pol
* When the downstream system of the sink is append-only, such as Kafka or S3, and we prefer to write the result only once it is finalized, rather than performing multiple writes and updates.
* When certain calculations in the query cannot efficiently handle incremental updates, such as percentile calculations, and we want to trigger the calculation only when the window closes for better performance.
-To fulfill these requirements, RisingWave offers support for transforming queries into emit-on-window-close semantics using the `EMIT ON WINDOW CLOSE` clause. Additionally, a watermark must be defined on the data source, as it determines when the window can be closed. For a more detailed explanation of watermarks, please refer to [Watermarks](/docs/current/watermarks/).
+To fulfill these requirements, RisingWave offers support for transforming queries into emit-on-window-close semantics using the `EMIT ON WINDOW CLOSE` clause. Additionally, a watermark must be defined on the data source, as it determines when the window can be closed. For a more detailed explanation of watermarks, please refer to [Watermarks](/processing/watermarks).
We can modify the query above to use emit-on-window-close semantics:
diff --git a/processing/indexes.mdx b/processing/indexes.mdx
index 681a0e60..efcfe6c8 100644
--- a/processing/indexes.mdx
+++ b/processing/indexes.mdx
@@ -15,7 +15,7 @@ Indexes can be particularly useful for optimizing the performance of queries tha
## How to use indexes
-You can use the [CREATE INDEX](/docs/current/sql-create-index/) command to construct an index on a table or a materialized view. The syntax is as follows:
+You can use the [CREATE INDEX](/sql/commands/sql-create-index) command to construct an index on a table or a materialized view. The syntax is as follows:
```sql
CREATE INDEX [IF NOT EXISTS] index_name ON object_name ( index_column [ ASC | DESC ], [, ...] )
@@ -90,7 +90,7 @@ SELECT c_name, c_address FROM customers WHERE c_phone = '123456789';
```
-You can use the [EXPLAIN](/docs/current/sql-explain/) command to view the execution plan.
+You can use the [EXPLAIN](/sql/commands/sql-explain) command to view the execution plan.
## How to decide the index distribution key?
@@ -126,7 +126,7 @@ Then you might want to create an index like the following to improve the perform
CREATE INDEX people_names ON people ((first_name || ' ' || last_name));
```
-This syntax is quite useful when working with a semi-structured table that utilizes the [JSONB](/docs/current/data-type-jsonb/) datatype. Here is an example of creating an index on a specific field within a JSONB column.
+This syntax is quite useful when working with a semi-structured table that utilizes the [JSONB](/sql/data-types/jsonb) datatype. Here is an example of creating an index on a specific field within a JSONB column.
```sql
dev=> create table t(v jsonb);
@@ -144,16 +144,16 @@ dev=> explain select * from t where (v->'field')::int = 123;
## See also
-
+
Create an index constructed on a table or a materialized view.
-
+
Remove an index constructed on a table or a materialized view.
-
+
See what query was used to create the specified index.
-
+
Modify an index.
diff --git a/processing/maintain-wide-table-with-table-sinks.mdx b/processing/maintain-wide-table-with-table-sinks.mdx
index 7909afc5..353d32bb 100644
--- a/processing/maintain-wide-table-with-table-sinks.mdx
+++ b/processing/maintain-wide-table-with-table-sinks.mdx
@@ -3,7 +3,7 @@ title: "Maintain wide table with table sinks"
description: "This guide introduces how to maintain a wide table whose columns come from different sources. Traditional data warehouses or ETL use a join query for this purpose. However, streaming join brings issues such as low efficiency and high memory consumption."
---
-In some cases with limitation, use the [CREATE SINK INTO TABLE](/docs/current/sql-create-sink-into/) and [ON CONFLICT clause](/sql/commands/sql-create-table#pk-conflict-behavior) can save the resources and achieve high efficiency.
+In some cases with limitation, use the [CREATE SINK INTO TABLE](/sql/commands/sql-create-sink-into) and [ON CONFLICT clause](/sql/commands/sql-create-table#pk-conflict-behavior) can save the resources and achieve high efficiency.
## Merge multiple sinks with the same primary key
diff --git a/processing/overview.mdx b/processing/overview.mdx
index ad797f81..d1eec14b 100644
--- a/processing/overview.mdx
+++ b/processing/overview.mdx
@@ -18,9 +18,9 @@ RisingWave uses Postgres-compatible SQL as the interface for declaring data proc
There are 2 execution modes in our system serving different analytics purposes. The results of these two modes are the same and the difference lies in the timing of data processing, whether it occurs at the time of data ingestion(on write) or when the query is executed(on read).
-**Streaming**: RisingWave allows users to predefine SQL queries with [CREATE MATERIALIZED VIEW](/docs/current/sql-create-mv/) statement. RisingWave continuously listens changes in upstream tables (in the `FROM` clause) and incrementally update the results automatically.
+**Streaming**: RisingWave allows users to predefine SQL queries with [CREATE MATERIALIZED VIEW](/sql/commands/sql-create-mv) statement. RisingWave continuously listens changes in upstream tables (in the `FROM` clause) and incrementally update the results automatically.
-**Ad-hoc**: Also like traditional databases, RisingWave allows users to send [SELECT](/docs/current/sql-select/) statement to query the result. At this point, RisingWave reads the data from the current snapshot, processes it, and returns the results.
+**Ad-hoc**: Also like traditional databases, RisingWave allows users to send [SELECT](/sql/commands/sql-select) statement to query the result. At this point, RisingWave reads the data from the current snapshot, processes it, and returns the results.
diff --git a/processing/sql/time-windows.mdx b/processing/sql/time-windows.mdx
index 9652aebb..0bfd5649 100644
--- a/processing/sql/time-windows.mdx
+++ b/processing/sql/time-windows.mdx
@@ -114,7 +114,7 @@ The result looks like the table below. Note that the number of rows in the resul
## Session windows
-In RisingWave, session windows are supported by a special type of window function frame: `SESSION` frame. You can refer to [Window function calls](/docs/current/query-syntax-value-exp/#window-function-calls) for detailed syntax.
+In RisingWave, session windows are supported by a special type of window function frame: `SESSION` frame. You can refer to [Window function calls](/sql/query-syntax/value-exp#window-function-calls) for detailed syntax.
Currently, `SESSION` frame is only supported in batch mode and emit-on-window-close streaming mode.
diff --git a/processing/time-travel-queries.mdx b/processing/time-travel-queries.mdx
index 87afaf18..ec4009c7 100644
--- a/processing/time-travel-queries.mdx
+++ b/processing/time-travel-queries.mdx
@@ -6,7 +6,7 @@ description: This guide describes how to leverage the time travel feature. This
**PREMIUM EDITION FEATURE**
-This feature is exclusive to RisingWave Premium Edition that offers advanced capabilities beyond the free versions. For a full list of premium features, see [RisingWave Premium Edition](/docs/current/rw-premium-edition-intro/). If you encounter any questions, please contact sales team at [sales@risingwave-labs.com](mailto:sales@risingwave-labs.com).
+This feature is exclusive to RisingWave Premium Edition that offers advanced capabilities beyond the free versions. For a full list of premium features, see [RisingWave Premium Edition](/get-started/rw-premium-edition-intro). If you encounter any questions, please contact sales team at [sales@risingwave-labs.com](mailto:sales@risingwave-labs.com).
diff --git a/processing/watermarks.mdx b/processing/watermarks.mdx
index d57cc80d..9abe1e7c 100644
--- a/processing/watermarks.mdx
+++ b/processing/watermarks.mdx
@@ -50,7 +50,7 @@ WATERMARK FOR time_col as time_col - INTERVAL 'string' time_unit
Supported `time_unit` values include: second, minute, hour, day, month, and year. For more details, see the `interval` data type under [Overview of data types](/sql/data-types/overview).
-Currently, RisingWave only supports using one of the columns from the table as the watermark column. To use nested fields (e.g., fields in `STRUCT`), or perform expression evaluation on the input rows (e.g., casting data types), please refer to [generated columns](/docs/current/query-syntax-generated-columns/).
+Currently, RisingWave only supports using one of the columns from the table as the watermark column. To use nested fields (e.g., fields in `STRUCT`), or perform expression evaluation on the input rows (e.g., casting data types), please refer to [generated columns](/sql/query-syntax/generated-columns).
### Example
@@ -72,7 +72,7 @@ CREATE SOURCE s1 (
) FORMAT PLAIN ENCODE JSON;
```
-The following query uses a [generated column](/docs/current/query-syntax-generated-columns/) to extract the timestamp column first, and then generates the watermark using it.
+The following query uses a [generated column](/sql/query-syntax/generated-columns) to extract the timestamp column first, and then generates the watermark using it.
```sql
CREATE SOURCE s2 (
diff --git a/reference/key-concepts.mdx b/reference/key-concepts.mdx
index 08292f80..efc56965 100644
--- a/reference/key-concepts.mdx
+++ b/reference/key-concepts.mdx
@@ -27,7 +27,7 @@ Indexes in a database are typically created on one or more columns of a table, a
### Materialized views[](#materialized-views "Direct link to Materialized views")
-When the results of a view expression are stored in a database system, they are called materialized views. In RisingWave, the result of a materialized view is updated when a relevant event arrives in the system. When you query the result, it is returned instantly as the computation has already been completed when the data comes in. You need to use the [CREATE MATERIALIZED VIEW](/docs/current/sql-create-mv/) statement to create a materialized view.
+When the results of a view expression are stored in a database system, they are called materialized views. In RisingWave, the result of a materialized view is updated when a relevant event arrives in the system. When you query the result, it is returned instantly as the computation has already been completed when the data comes in. You need to use the [CREATE MATERIALIZED VIEW](/sql/commands/sql-create-mv) statement to create a materialized view.
### Meta node[](#meta-node "Direct link to Meta node")
@@ -43,7 +43,7 @@ A node is a logical collection of IT resources that handles specific workloads b
### Parallelism[](#parallelism "Direct link to Parallelism")
-Parallelism refers to the technique of simultaneously executing multiple database operations or queries to improve performance and increase efficiency. It involves dividing a database workload into smaller tasks and executing them concurrently on multiple processors or machines. In RisingWave, you can set the parallelism of streaming jobs, like [tables](/sql/commands/sql-alter-table#set-parallelism), [materialized views](/docs/current/sql-alter-materialized-view/#set-parallelism), and [sinks](/docs/current/sql-alter-sink/#set-parallelism).
+Parallelism refers to the technique of simultaneously executing multiple database operations or queries to improve performance and increase efficiency. It involves dividing a database workload into smaller tasks and executing them concurrently on multiple processors or machines. In RisingWave, you can set the parallelism of streaming jobs, like [tables](/sql/commands/sql-alter-table#set-parallelism), [materialized views](/sql/commands/sql-alter-materialized-view#set-parallelism), and [sinks](/sql/commands/sql-alter-sink#set-parallelism).
### Sinks[](#sinks "Direct link to Sinks")
@@ -51,7 +51,7 @@ A sink is an external target to which you can send data. RisingWave now supports
### Sources[](#sources "Direct link to Sources")
-A source is a resource that RisingWave can read data from. Common sources include message brokers such as Apache Kafka and Apache Pulsar and databases such as MySQL and PostgreSQL. You can create a source in RisingWave using the [CREATE SOURCE](/docs/current/sql-create-source/) command.
+A source is a resource that RisingWave can read data from. Common sources include message brokers such as Apache Kafka and Apache Pulsar and databases such as MySQL and PostgreSQL. You can create a source in RisingWave using the [CREATE SOURCE](/sql/commands/sql-create-source) command.
If you want to persist the data from the source, you should use the [CREATE TABLE](/sql/commands/sql-create-table) command with connector settings.
@@ -87,7 +87,7 @@ Avro is an open-source data serialization system that facilitates data exchange
### Connection[](#connection "Direct link to Connection")
-A connection allows access to services located outside of your VPC. AWS PrivateLink provides a network connection used to create a private connection between VPCs, private networks, and other services. In RisingWave, the [CREATE CONNECTION](/docs/current/sql-create-connection/) command establishes a connection between RisingWave and an external service. Then, a source or sink can be created to receive or send messages.
+A connection allows access to services located outside of your VPC. AWS PrivateLink provides a network connection used to create a private connection between VPCs, private networks, and other services. In RisingWave, the [CREATE CONNECTION](/sql/commands/sql-create-connection) command establishes a connection between RisingWave and an external service. Then, a source or sink can be created to receive or send messages.
### Change data capture (CDC)[](#change-data-capture-cdc "Direct link to Change data capture (CDC)")
@@ -95,7 +95,7 @@ Change data capture refers to the process of identifying and capturing changes a
### Data persistence[](#data-persistence "Direct link to Data persistence")
-Data persistence means that data survives after the process that generated the data has ended. For a database to be considered persistent, it must write to non-volatile storage. This type of storage is able to retain data in the absence of a power supply. To learn about how data is persisted in RisingWave, see [Data persistence](/docs/current/data-persistence/).
+Data persistence means that data survives after the process that generated the data has ended. For a database to be considered persistent, it must write to non-volatile storage. This type of storage is able to retain data in the absence of a power supply. To learn about how data is persisted in RisingWave, see [Data persistence](/reference/data-persistence).
### Debezium[](#debezium "Direct link to Debezium")
@@ -107,7 +107,7 @@ Object storage, or object-based storage, is a technology that stores data in a h
### Protobuf[](#protobuf "Direct link to Protobuf")
-Protocol buffers (commonly known as Protobuf) are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data. It is similar to XML, but smaller, faster, and simpler. RisingWave supports decoding Protobuf data. When creating a source that uses the Protobuf format, you need to specify the schema. For details about the requirements, see [Protobuf requirements](/docs/current/sql-create-source/#protobuf).
+Protocol buffers (commonly known as Protobuf) are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data. It is similar to XML, but smaller, faster, and simpler. RisingWave supports decoding Protobuf data. When creating a source that uses the Protobuf format, you need to specify the schema. For details about the requirements, see [Protobuf requirements](/ingestion/supported-sources-and-formats#protobuf).
### `psql`[](#psql "Direct link to psql")
diff --git a/sql/commands/overview.mdx b/sql/commands/overview.mdx
index 1db8b32b..aa176925 100644
--- a/sql/commands/overview.mdx
+++ b/sql/commands/overview.mdx
@@ -11,7 +11,7 @@ sidebarTitle: Overview
title="ALTER CONNECTION"
icon="link"
iconType="solid"
- href="/docs/current/sql-alter-connection/"
+ href="/sql/commands/sql-alter-connection"
>
Modify the properties of a connection.
@@ -19,7 +19,7 @@ sidebarTitle: Overview
title="ALTER DATABASE"
icon="database"
iconType="solid"
- href="/docs/current/sql-alter-database/"
+ href="/sql/commands/sql-alter-database"
>
Modify the properties of a database.
@@ -27,7 +27,7 @@ sidebarTitle: Overview
title="ALTER FUNCTION"
icon="function"
iconType="solid"
- href="/docs/current/sql-alter-function/"
+ href="/sql/commands/sql-alter-function"
>
Modify the properties of a function.
@@ -35,7 +35,7 @@ sidebarTitle: Overview
title="ALTER INDEX"
icon="table"
iconType="solid"
- href="/docs/current/sql-alter-index/"
+ href="/sql/commands/sql-alter-index"
>
Modify the properties of an index.
@@ -43,7 +43,7 @@ sidebarTitle: Overview
title="ALTER MATERIALIZED VIEW"
icon="table"
iconType="solid"
- href="/docs/current/sql-alter-materialized-view/"
+ href="/sql/commands/sql-alter-materialized-view"
>
Modify the properties of a materialized view.
@@ -55,10 +55,10 @@ sidebarTitle: Overview
>
Modify the properties of a schema.
- Modify the properties of a sink. Modify the properties of a source. Modify a server configuration parameter. Modify the properties of a table. Modify the properties of a user. Modify the properties of a view. Convert stream into an append-only changelog. Start a transaction. Cancel specific streaming jobs. Add comments on tables or columns. Commit the current transaction. Create a user-defined aggregate function. Create a connection between VPCs. Create a new database. Create a user-defined function. Create an index on a column of a table or a materialized view to speed up data retrieval. Create a materialized view. Create a new schema. Create a secret to store credentials. Create a sink into RisingWave's table. Create a sink. Supported data sources and how to connect RisingWave to the sources. Create a table. Create a new user account. Create a non-materialized view.
- Remove rows from a table. Get information about the columns in a table, source, sink, view, or materialized view. Discard session state. Drop a user-defined aggregate function. Remove a connection. Remove a database. Drop a user-defined function. Remove an index. Remove a materialized view. Remove a schema. Drop a secret. Remove a sink. Remove a source. Remove a table. Remove a user. Drop a view. Show the execution plan of a statement. Commit pending data changes and persists updated data to storage. Grant a user privileges. Insert new rows of data into a table. Trigger recovery manually. Revoke privileges from a user. Retrieve data from a table or a materialized view. Run Data Definition Language (DDL) operations in the background. Enable or disable implicit flushes after batch operations. Set time zone. Change a run-time parameter.
- Show the details of your RisingWave cluster. Show columns in a table, source, sink, view or materialized view. Show existing connections. Show the query used to create the specified index. Show the query used to create the specified materialized view. Show the query used to create the specified sink. Show the query used to create the specified source. Show the query used to create the specified table. Show the query used to create the specified view. Show all cursors in the current session. Show existing databases. Show all user-defined functions. Show existing indexes from a particular table. Show internal tables to learn about the existing internal states. Show all streaming jobs. Show existing materialized views. Show the details of the system parameters.
- Display system current workload. Show existing schemas. Shows all sinks. Show existing sources. Show all subscription cursors in the current session. Show existing tables. Show existing views. Start a transaction. Modify existing rows in a table.
+ Modify the properties of a sink. Modify the properties of a source. Modify a server configuration parameter. Modify the properties of a table. Modify the properties of a user. Modify the properties of a view. Convert stream into an append-only changelog. Start a transaction. Cancel specific streaming jobs. Add comments on tables or columns. Commit the current transaction. Create a user-defined aggregate function. Create a connection between VPCs. Create a new database. Create a user-defined function. Create an index on a column of a table or a materialized view to speed up data retrieval. Create a materialized view. Create a new schema. Create a secret to store credentials. Create a sink into RisingWave's table. Create a sink. Supported data sources and how to connect RisingWave to the sources. Create a table. Create a new user account. Create a non-materialized view.
+ Remove rows from a table. Get information about the columns in a table, source, sink, view, or materialized view. Discard session state. Drop a user-defined aggregate function. Remove a connection. Remove a database. Drop a user-defined function. Remove an index. Remove a materialized view. Remove a schema. Drop a secret. Remove a sink. Remove a source. Remove a table. Remove a user. Drop a view. Show the execution plan of a statement. Commit pending data changes and persists updated data to storage. Grant a user privileges. Insert new rows of data into a table. Trigger recovery manually. Revoke privileges from a user. Retrieve data from a table or a materialized view. Run Data Definition Language (DDL) operations in the background. Enable or disable implicit flushes after batch operations. Set time zone. Change a run-time parameter.
+ Show the details of your RisingWave cluster. Show columns in a table, source, sink, view or materialized view. Show existing connections. Show the query used to create the specified index. Show the query used to create the specified materialized view. Show the query used to create the specified sink. Show the query used to create the specified source. Show the query used to create the specified table. Show the query used to create the specified view. Show all cursors in the current session. Show existing databases. Show all user-defined functions. Show existing indexes from a particular table. Show internal tables to learn about the existing internal states. Show all streaming jobs. Show existing materialized views. Show the details of the system parameters.
+ Display system current workload. Show existing schemas. Shows all sinks. Show existing sources. Show all subscription cursors in the current session. Show existing tables. Show existing views. Start a transaction. Modify existing rows in a table.
diff --git a/sql/commands/sql-alter-materialized-view.mdx b/sql/commands/sql-alter-materialized-view.mdx
index 5426af31..70a297c6 100644
--- a/sql/commands/sql-alter-materialized-view.mdx
+++ b/sql/commands/sql-alter-materialized-view.mdx
@@ -63,7 +63,7 @@ SET PARALLELISM = parallelism_number;
| Parameter or clause | Description |
| :-------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| **SET PARALLELISM** | This clause controls the degree of [parallelism](/docs/current/key-concepts/#parallelism) for the targeted [streaming job](/docs/current/key-concepts/#streaming-jobs). |
+| **SET PARALLELISM** | This clause controls the degree of [parallelism](/reference/key-concepts#parallelism) for the targeted [streaming job](/reference/key-concepts#streaming-jobs). |
| _parallelism\_number_ | This parameter can be ADAPTIVE or a fixed number, like 1, 2, 3, etc. Altering the parameter to ADAPTIVE will expand the streaming job's degree of parallelism to encompass all available units, whereas setting it to a fixed number will lock the job's parallelism at that specific figure. Setting it to 0 is equivalent to ADAPTIVE. |
```sql
diff --git a/sql/commands/sql-alter-sink.mdx b/sql/commands/sql-alter-sink.mdx
index ba829f01..f128eb82 100644
--- a/sql/commands/sql-alter-sink.mdx
+++ b/sql/commands/sql-alter-sink.mdx
@@ -63,7 +63,7 @@ SET PARALLELISM = parallelism_number;
| Parameter or clause | Description |
| :-------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| **SET PARALLELISM** | This clause controls the degree of [parallelism](/docs/current/key-concepts/#parallelism) for the targeted [streaming job](/docs/current/key-concepts/#streaming-jobs). |
+| **SET PARALLELISM** | This clause controls the degree of [parallelism](/reference/key-concepts#parallelism) for the targeted [streaming job](/reference/key-concepts#streaming-jobs). |
| _parallelism\_number_ | This parameter can be ADAPTIVE or a fixed number, like 1, 2, 3, etc. Altering the parameter to ADAPTIVE will expand the streaming job's degree of parallelism to encompass all available units, whereas setting it to a fixed number will lock the job's parallelism at that specific figure. Setting it to 0 is equivalent to ADAPTIVE. |
```sql
diff --git a/sql/commands/sql-alter-system.mdx b/sql/commands/sql-alter-system.mdx
index 057336d0..cb25d1e9 100644
--- a/sql/commands/sql-alter-system.mdx
+++ b/sql/commands/sql-alter-system.mdx
@@ -3,7 +3,7 @@ title: "ALTER SYSTEM"
description: "The `ALTER SYSTEM` command modifies the value of a server configuration parameter."
---
-You can use this command to configure some parameters, like the [system parameters](/operate/view-configure-system-parameters#how-to-configure-system-parameters) and [runtime parameters](/docs/current/view-configure-runtime-parameters/#how-to-configure-runtime-parameters).
+You can use this command to configure some parameters, like the [system parameters](/operate/view-configure-system-parameters#how-to-configure-system-parameters) and [runtime parameters](/operate/view-configure-runtime-parameters#how-to-configure-runtime-parameters).
```sql Syntax
ALTER SYSTEM SET configuration_parameter { TO | = } { value [, ...] | DEFAULT }
diff --git a/sql/commands/sql-alter-table.mdx b/sql/commands/sql-alter-table.mdx
index 9a9aed29..d4281352 100644
--- a/sql/commands/sql-alter-table.mdx
+++ b/sql/commands/sql-alter-table.mdx
@@ -108,8 +108,8 @@ SET PARALLELISM = parallelism_number;
| Parameter or clause | Description |
| :-------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| **SET PARALLELISM** | This clause controls the degree of [parallelism](/docs/current/key-concepts/#parallelism) for the targeted [streaming job](/docs/current/key-concepts/#streaming-jobs). |
-| _parallelism\_number_ | This parameter can be ADAPTIVE or a fixed number, like 1, 2, 3, etc. Altering the parameter to ADAPTIVE will expand the streaming job's degree of parallelism to encompass all available units, whereas setting it to a fixed number will lock the job's parallelism at that specific figure. Setting it to 0 is equivalent to ADAPTIVE. After setting the parallelism, the parallelism status of a table can be observed within the internal [rw\_table\_fragments](/docs/current/view-configure-runtime-parameters/) table or the [rw\_fragments](/docs/current/view-configure-runtime-parameters/)table. |
+| **SET PARALLELISM** | This clause controls the degree of [parallelism](/reference/key-concepts#parallelism) for the targeted [streaming job](/reference/key-concepts#streaming-jobs). |
+| _parallelism\_number_ | This parameter can be ADAPTIVE or a fixed number, like 1, 2, 3, etc. Altering the parameter to ADAPTIVE will expand the streaming job's degree of parallelism to encompass all available units, whereas setting it to a fixed number will lock the job's parallelism at that specific figure. Setting it to 0 is equivalent to ADAPTIVE. After setting the parallelism, the parallelism status of a table can be observed within the internal [rw\_table\_fragments](/operate/view-configure-runtime-parameters) table or the [rw\_fragments](/operate/view-configure-runtime-parameters)table. |
```sql
ALTER TABLE test_table SET PARALLELISM = 8;
@@ -117,13 +117,13 @@ ALTER TABLE test_table SET PARALLELISM = 8;
Here is a more detailed example for you to practise this clause:
-First, let's set the parallelism to `3` by the [SET command](/docs/current/view-configure-runtime-parameters/#how-to-configure-runtime-parameters).
+First, let's set the parallelism to `3` by the [SET command](/operate/view-configure-runtime-parameters#how-to-configure-runtime-parameters).
```sql
SET streaming_parallelism = 3;
```
-Then let's create a table to view the parallelism we set. As mentioned, the parallelism status of a table can be observed within the [rw\_fragments](/docs/current/view-configure-runtime-parameters/) table.
+Then let's create a table to view the parallelism we set. As mentioned, the parallelism status of a table can be observed within the [rw\_fragments](/operate/view-configure-runtime-parameters) table.
```sql
-- Create a table.
@@ -210,7 +210,7 @@ ALTER TABLE table_name
```
For tables with connector, this statement controls the rate limit of the associated source.
-For the specific value of `SOURCE_RATE_LIMIT`, refer to [How to view runtime parameters](/docs/current/view-configure-runtime-parameters/#how-to-view-runtime-parameters).
+For the specific value of `SOURCE_RATE_LIMIT`, refer to [How to view runtime parameters](/operate/view-configure-runtime-parameters#how-to-view-runtime-parameters).
```sql Example
-- Create a table with source
diff --git a/sql/commands/sql-alter-user.mdx b/sql/commands/sql-alter-user.mdx
index ace8cdf5..0ddb3394 100644
--- a/sql/commands/sql-alter-user.mdx
+++ b/sql/commands/sql-alter-user.mdx
@@ -30,7 +30,7 @@ ALTER USER user_name WITH oauth (
| :------------------- | :---------------------------------------------------------------------------------------------------------------------- |
| _user\_name_ | The name of the user to be modified. |
| _new\_user\_name_ | The new name of the user. |
-| _system\_permission_ | See [the options for system permissions of the CREATE USER command](/docs/current/sql-create-user/#system-permissions). |
+| _system\_permission_ | See [the options for system permissions of the CREATE USER command](/sql/commands/sql-create-user#syntax-for-creating-a-new-user). |
For the alter user authentication method, the `jwks_url` and `issuer` parameters are mandatory. On the other hand, `other_params_should_match` is an optional parameter that will be validated against `jwt.claims`. Ensure that all keys in the options are in **lowercase**.
diff --git a/sql/commands/sql-begin.mdx b/sql/commands/sql-begin.mdx
index 43e71e70..0839115b 100644
--- a/sql/commands/sql-begin.mdx
+++ b/sql/commands/sql-begin.mdx
@@ -26,5 +26,5 @@ BEGIN
-
+
diff --git a/sql/commands/sql-cancel-jobs.mdx b/sql/commands/sql-cancel-jobs.mdx
index aa96bce2..0b0bb5cd 100644
--- a/sql/commands/sql-cancel-jobs.mdx
+++ b/sql/commands/sql-cancel-jobs.mdx
@@ -2,7 +2,7 @@
title: "CANCEL JOBS"
description: "Use `CANCEL JOBS/JOB` to cancel specific streaming jobs that are in progress."
---
-A streaming job is a job that creates an index, a materialized view, a table, a sink, or a source with connectors. You need to specify the IDs of the jobs that you want to cancel. You can use [SHOW JOBS](/docs/current/sql-show-jobs/) to get the IDs of the jobs that are in progress.
+A streaming job is a job that creates an index, a materialized view, a table, a sink, or a source with connectors. You need to specify the IDs of the jobs that you want to cancel. You can use [SHOW JOBS](/sql/commands/sql-show-jobs) to get the IDs of the jobs that are in progress.
## Syntax
@@ -37,7 +37,7 @@ Id
diff --git a/sql/commands/sql-comment-on.mdx b/sql/commands/sql-comment-on.mdx
index b21b16b2..1d065c38 100644
--- a/sql/commands/sql-comment-on.mdx
+++ b/sql/commands/sql-comment-on.mdx
@@ -67,6 +67,6 @@ DESCRIBE t1;
## Related topics
+ iconType="solid" href="/sql/commands/sql-describe" horizontal/>
diff --git a/sql/commands/sql-commit.mdx b/sql/commands/sql-commit.mdx
index 83fc1067..831417b5 100644
--- a/sql/commands/sql-commit.mdx
+++ b/sql/commands/sql-commit.mdx
@@ -35,7 +35,7 @@ COMMIT
title="BEGIN"
icon="play"
iconType="solid"
- href="/docs/current/sql-begin/"
+ href="/sql/commands/sql-begin"
horizontal
/>
Drop a user-defined aggregate function
@@ -146,7 +146,7 @@ select * from mv;
title="CREATE FUNCTION"
icon="plus"
iconType="solid"
- href="/docs/current/sql-create-function/"
+ href="/sql/commands/sql-create-function"
>
Create a user-defined scalar or table function
diff --git a/sql/commands/sql-create-database.mdx b/sql/commands/sql-create-database.mdx
index dc569f6d..95781b53 100644
--- a/sql/commands/sql-create-database.mdx
+++ b/sql/commands/sql-create-database.mdx
@@ -26,6 +26,6 @@ CREATE DATABASE IF NOT EXISTS travel
```
-Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
+Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/sql/identifiers).
diff --git a/sql/commands/sql-create-function.mdx b/sql/commands/sql-create-function.mdx
index 6ea8f64f..b3b670ab 100644
--- a/sql/commands/sql-create-function.mdx
+++ b/sql/commands/sql-create-function.mdx
@@ -118,7 +118,7 @@ CREATE FUNCTION function_name ( argument_type [, ...] )
title="SHOW FUNCTIONS"
icon="list"
iconType="solid"
- href="/docs/current/sql-show-functions/"
+ href="/sql/commands/sql-show-functions"
>
Show all user-defined functions
@@ -126,7 +126,7 @@ CREATE FUNCTION function_name ( argument_type [, ...] )
title="DROP FUNCTION"
icon="trash"
iconType="solid"
- href="/docs/current/sql-drop-function/"
+ href="/sql/commands/sql-drop-function"
>
Drop a user-defined function
diff --git a/sql/commands/sql-create-index.mdx b/sql/commands/sql-create-index.mdx
index c45d0a15..9b3c2c27 100644
--- a/sql/commands/sql-create-index.mdx
+++ b/sql/commands/sql-create-index.mdx
@@ -1,6 +1,6 @@
---
title: "CREATE INDEX"
-description: "Use the `CREATE INDEX` command to construct an [index](/docs/current/indexes/) on a table or a materialized view."
+description: "Use the `CREATE INDEX` command to construct an [index](/processing/indexes) on a table or a materialized view."
---
## Syntax
@@ -20,8 +20,8 @@ CREATE INDEX [ IF NOT EXISTS ] index_name ON object_name ( index_column [ ASC |
| _object\_name_ | The name of the table or materialized view where the index is created. |
| _index\_column_ | The name of the column on which the index is created. |
| **DESC** | Sort the data returned in descending order. |
-| **INCLUDE** clause | Specify the columns to include in the index as non-key columns.An index-only query can return the values of non-key columns without having to visit the indexed table thus improving the performance.If you omit the INCLUDE clause, all columns of the table or materialized view will be indexed. This is recommended in RisingWave.If you only want to include the index\_column, use CREATE INDEX ON object\_name(index\_column) INCLUDE(index\_column);.See [How to decide which columns to include](/docs/current/indexes/#how-to-decide-which-columns-to-include) for more information. |
-| **DISTRIBUTED BY** clause | Specify the index distribution key.As a distributed database, RisingWave distributes the data across multiple nodes. When an index is created, the distribution key is used to determine how the data should be distributed across these nodes.If you omit the DISTRIBUTED BY clause, the first index column will be be used as the default distribution key.distributed\_column has to be the prefix of index\_column.See [How to decide the index distribution key](/docs/current/indexes/#how-to-decide-the-index-distribution-key) for more information. |
+| **INCLUDE** clause | Specify the columns to include in the index as non-key columns.An index-only query can return the values of non-key columns without having to visit the indexed table thus improving the performance.If you omit the INCLUDE clause, all columns of the table or materialized view will be indexed. This is recommended in RisingWave.If you only want to include the index\_column, use CREATE INDEX ON object\_name(index\_column) INCLUDE(index\_column);.See [How to decide which columns to include](/processing/indexes#how-to-decide-which-columns-to-include) for more information. |
+| **DISTRIBUTED BY** clause | Specify the index distribution key.As a distributed database, RisingWave distributes the data across multiple nodes. When an index is created, the distribution key is used to determine how the data should be distributed across these nodes.If you omit the DISTRIBUTED BY clause, the first index column will be be used as the default distribution key.distributed\_column has to be the prefix of index\_column.See [How to decide the index distribution key](/processing/indexes#how-to-decide-the-index-distribution-key) for more information. |
## Examples
@@ -73,4 +73,4 @@ SELECT * FROM customers JOIN orders ON c_custkey = o_custkey
WHERE c_phone = '123456789';
```
-RisingWave supports creating indexes on expressions. For more details, see [Indexes on expressions](/docs/current/indexes/#Indexes-on-expressions).
+RisingWave supports creating indexes on expressions. For more details, see [Indexes on expressions](/processing/indexes#indexes-on-expressions).
diff --git a/sql/commands/sql-create-mv.mdx b/sql/commands/sql-create-mv.mdx
index 4c392590..a2b4b0d4 100644
--- a/sql/commands/sql-create-mv.mdx
+++ b/sql/commands/sql-create-mv.mdx
@@ -19,10 +19,10 @@ To perform the operations in the background, you can execute `SET BACKGROUND_DDL
| Parameter or clause | Description |
| :------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _mv\_name_ | The name of the materialized view to be created. |
-| _select\_query_ | A SELECT query that retrieves data for the materialized view. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| _select\_query_ | A SELECT query that retrieves data for the materialized view. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
-Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
+Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/sql/identifiers).
@@ -39,7 +39,7 @@ Refer to this [tutorial](/docs/current/server-performance-anomaly-detection/) fo
diff --git a/sql/commands/sql-create-schema.mdx b/sql/commands/sql-create-schema.mdx
index 74872311..f9a015a1 100644
--- a/sql/commands/sql-create-schema.mdx
+++ b/sql/commands/sql-create-schema.mdx
@@ -28,7 +28,7 @@ CREATE SCHEMA IF NOT EXISTS schema_1;
```
-Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
+Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/sql/identifiers).
diff --git a/sql/commands/sql-create-secret.mdx b/sql/commands/sql-create-secret.mdx
index 69504a94..a8f0dfc6 100644
--- a/sql/commands/sql-create-secret.mdx
+++ b/sql/commands/sql-create-secret.mdx
@@ -60,7 +60,7 @@ SHOW CREATE SOURCE mysql_source;
title="DROP SECRET"
icon="trash"
iconType="solid"
- href="/docs/current/sql-drop-secret/"
+ href="/sql/commands/sql-drop-secret"
>
Dropping a secret
diff --git a/sql/commands/sql-create-sink-into.mdx b/sql/commands/sql-create-sink-into.mdx
index 170a7929..d757d514 100644
--- a/sql/commands/sql-create-sink-into.mdx
+++ b/sql/commands/sql-create-sink-into.mdx
@@ -84,7 +84,7 @@ DROP SINK orders_sink0;
title="DROP SINK"
icon="trash"
icontype="solid"
- href="/docs/current/sql-drop-sink/"
+ href="/sql/commands/sql-drop-sink"
>
Remove a sink
@@ -92,7 +92,7 @@ DROP SINK orders_sink0;
title="SHOW CREATE SINK"
icon="eye"
icontype="solid"
- href="/docs/current/sql-show-create-sink/"
+ href="/sql/commands/sql-show-create-sink"
>
Show the SQL statement used to create a sink
diff --git a/sql/commands/sql-create-sink.mdx b/sql/commands/sql-create-sink.mdx
index 4b788380..0f952c95 100644
--- a/sql/commands/sql-create-sink.mdx
+++ b/sql/commands/sql-create-sink.mdx
@@ -70,7 +70,7 @@ Click a sink name to see the SQL syntax, options, and sample statement of sinkin
Remove a sink
@@ -78,7 +78,7 @@ Click a sink name to see the SQL syntax, options, and sample statement of sinkin
Show the SQL statement used to create a sink
@@ -86,7 +86,7 @@ Click a sink name to see the SQL syntax, options, and sample statement of sinkin
Create a sink into RisingWave's table
diff --git a/sql/commands/sql-create-source.mdx b/sql/commands/sql-create-source.mdx
index 58afc3d8..ec1ff3ef 100644
--- a/sql/commands/sql-create-source.mdx
+++ b/sql/commands/sql-create-source.mdx
@@ -29,9 +29,9 @@ CREATE SOURCE [ IF NOT EXISTS ] source_name (
## Notes
-A [generated column](/docs/current/query-syntax-generated-columns/) is defined with non-deterministic functions. When the data is ingested, the function will be evaluated to generate the value of this field.
+A [generated column](/sql/query-syntax/generated-columns) is defined with non-deterministic functions. When the data is ingested, the function will be evaluated to generate the value of this field.
-Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
+Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/sql/identifiers).
To know when a data record is loaded to RisingWave, you can define a column that is generated based on the processing time (` timestamptz AS proctime()`) when creating the table or source. See also [proctime()](/sql/functions/datetime#proctime).
@@ -64,9 +64,9 @@ The generated column is created in RisingWave and will not be accessed through t
| _source\_name_ | The name of the source. If a schema name is given (for example, CREATE SOURCE \.\
## Watermarks
-RisingWave supports generating watermarks when creating a source. Watermarks are like markers or signals that track the progress of event time, allowing you to process events within their corresponding time windows. The [WATERMARK](/docs/current/watermarks/) clause should be used within the `schema_definition`. For more information on how to create a watermark, see [Watermarks](/docs/current/watermarks/).
+RisingWave supports generating watermarks when creating a source. Watermarks are like markers or signals that track the progress of event time, allowing you to process events within their corresponding time windows. The [WATERMARK](/processing/watermarks) clause should be used within the `schema_definition`. For more information on how to create a watermark, see [Watermarks](/processing/watermarks).
## Change Data Capture (CDC)
@@ -181,7 +181,7 @@ Shared sources do not support `ALTER SOURCE`. Use non-shared sources if you requ
title="DROP SOURCE"
icon="trash"
iconType="solid"
- href="/docs/current/sql-drop-source/"
+ href="/sql/commands/sql-drop-source"
>
Remove a source
@@ -189,7 +189,7 @@ Shared sources do not support `ALTER SOURCE`. Use non-shared sources if you requ
title="SHOW CREATE SOURCE"
icon="eye"
iconType="solid"
- href="/docs/current/sql-show-create-source/"
+ href="/sql/commands/sql-show-create-source"
>
Show the SQL statement used to create a source
diff --git a/sql/commands/sql-create-table.mdx b/sql/commands/sql-create-table.mdx
index 9844a045..0f5e0158 100644
--- a/sql/commands/sql-create-table.mdx
+++ b/sql/commands/sql-create-table.mdx
@@ -3,12 +3,12 @@ title: "CREATE TABLE"
description: "Use the `CREATE TABLE` command to create a new table. Tables consist of fixed columns and insertable rows. "
---
-Rows can be added using the [INSERT](/docs/current/sql-insert/) command. When creating a table, you can specify connector settings and data format.
+Rows can be added using the [INSERT](/sql/commands/sql-insert) command. When creating a table, you can specify connector settings and data format.
-If you choose not to persist the data from the source in RisingWave, use [CREATE SOURCE](/docs/current/sql-create-source/) instead. For more details about the differences between sources and tables, see [here](/docs/current/data-ingestion/#table-with-connectors).
+If you choose not to persist the data from the source in RisingWave, use [CREATE SOURCE](/sql/commands/sql-create-source) instead. For more details about the differences between sources and tables, see [here](/docs/current/data-ingestion/#table-with-connectors).
## Syntax
@@ -36,11 +36,11 @@ CREATE TABLE [ IF NOT EXISTS ] table_name (
- For tables with primary key constraints, if you insert a new data record with an existing key, the new record will overwrite the existing record.
-- A [generated column](/docs/current/query-syntax-generated-columns/) that is defined with non-deterministic functions cannot be specified as part of the primary key. For example, if `A1` is defined as `current_timestamp()`, then it cannot be part of the primary key.
+- A [generated column](/sql/query-syntax/generated-columns) that is defined with non-deterministic functions cannot be specified as part of the primary key. For example, if `A1` is defined as `current_timestamp()`, then it cannot be part of the primary key.
-- Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
+- Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/sql/identifiers).
-- The syntax for creating a table with connector settings and the supported connectors are the same as for creating a source. See [CREATE SOURCE](/docs/current/sql-create-source/) for a full list of supported connectors and data formats.
+- The syntax for creating a table with connector settings and the supported connectors are the same as for creating a source. See [CREATE SOURCE](/sql/commands/sql-create-source) for a full list of supported connectors and data formats.
- To know when a data record is loaded to RisingWave, you can define a column that is generated based on the processing time (` timestamptz AS proctime()`) when creating the table or source. See also [proctime()](/sql/functions/datetime#proctime).
@@ -72,13 +72,13 @@ CREATE TABLE [ IF NOT EXISTS ] table_name (
| col\_name | The name of a column. |
| data\_type | The data type of a column. With the struct data type, you can create a nested table. Elements in a nested table need to be enclosed with angle brackets (\<>). |
| DEFAULT | The DEFAULT clause allows you to assign a default value to a column. This default value is used when a new row is inserted, and no explicit value is provided for that column. default\_expr is any constant value or variable-free expression that does not reference other columns in the current table or involve subqueries. The data type of default\_expr must match the data type of the column. |
-| generation\_expression | The expression for the generated column. For details about generated columns, see [Generated columns](/docs/current/query-syntax-generated-columns/). |
-| watermark\_clause | A clause that defines the watermark for a timestamp column. The syntax is WATERMARK FOR column\_name as expr. For the watermark clause to be valid, the table must be an append-only table. That is, the APPEND ONLY option must be specified. This restriction only applies to a table. For details about watermarks, refer to [Watermarks](/docs/current/watermarks/). |
+| generation\_expression | The expression for the generated column. For details about generated columns, see [Generated columns](/sql/query-syntax/generated-columns). |
+| watermark\_clause | A clause that defines the watermark for a timestamp column. The syntax is WATERMARK FOR column\_name as expr. For the watermark clause to be valid, the table must be an append-only table. That is, the APPEND ONLY option must be specified. This restriction only applies to a table. For details about watermarks, refer to [Watermarks](/processing/watermarks). |
| APPEND ONLY | When this option is specified, the table will be created as an append-only table. An append-only table cannot have primary keys. UPDATE and DELETE statements are not valid for append-only tables. Note that append-only tables is in the [public preview stage](/changelog/product-lifecycle#features-in-the-public-preview-stage). |
| ON CONFLICT | Specify the alternative action when the newly inserted record brings a violation of PRIMARY KEY constraint on the table. See [PK conflict behavior](#pk-conflict-behavior) below for more information. |
-| **INCLUDE** clause | Extract fields not included in the payload as separate columns. For more details on its usage, see [INCLUDE clause](/docs/current/include-clause/). |
+| **INCLUDE** clause | Extract fields not included in the payload as separate columns. For more details on its usage, see [INCLUDE clause](/ingestion/ingest-additional-fields-with-include-clause). |
| **WITH** clause | Specify the connector settings here if trying to store all the source data. See the [Data ingestion](/docs/current/data-ingestion/) page for the full list of supported source as well as links to specific connector pages detailing the syntax for each source. |
-| **FORMAT** and **ENCODE** options | Specify the data format and the encoding format of the source data. To learn about the supported data formats, see [Data formats](/docs/current/sql-create-source/#supported-formats). |
+| **FORMAT** and **ENCODE** options | Specify the data format and the encoding format of the source data. To learn about the supported data formats, see [Data formats](/ingestion/supported-sources-and-formats#supported-formats). |
Please distinguish between the parameters set in the FORMAT and ENCODE options and those set in the WITH clause. Ensure that you place them correctly and avoid any misuse.
@@ -86,11 +86,11 @@ Please distinguish between the parameters set in the FORMAT and ENCODE options a
## Watermarks
-RisingWave supports generating watermarks when creating an append-only streaming table. Watermarks are like markers or signals that track the progress of event time, allowing you to process events within their corresponding time windows. For more information on the syntax on how to create a watermark, see [Watermarks](/docs/current/watermarks/).
+RisingWave supports generating watermarks when creating an append-only streaming table. Watermarks are like markers or signals that track the progress of event time, allowing you to process events within their corresponding time windows. For more information on the syntax on how to create a watermark, see [Watermarks](/processing/watermarks).
## PK conflict behavior
-The record with insert operation could introduce duplicate records with the same primary key in the table. In that case, an alternative action specified by the `ON CONFLICT` clause will be adopted. The record can come from Insert DML statement, external connectors of the table, or sinks into the table [CREATE SINK INTO](/docs/current/sql-create-sink-into/).
+The record with insert operation could introduce duplicate records with the same primary key in the table. In that case, an alternative action specified by the `ON CONFLICT` clause will be adopted. The record can come from Insert DML statement, external connectors of the table, or sinks into the table [CREATE SINK INTO](/sql/commands/sql-create-sink-into).
The action could one of the following. A column not in the primary key can be specified as the version column for `DO UPDATE FULL` and `DO UPDATE IF NOT NULL`. When version column is specified, the insert operation will take effect only when the newly inserted value is greater or equal than the exist data record in the table's specified column.
diff --git a/sql/commands/sql-create-user.mdx b/sql/commands/sql-create-user.mdx
index d4767418..bdea0fd7 100644
--- a/sql/commands/sql-create-user.mdx
+++ b/sql/commands/sql-create-user.mdx
@@ -74,7 +74,7 @@ psql -h localhost -p 4566 -d dev -U user1
Enter the password to log in.
-Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
+Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/sql/identifiers).
### Create a user with OAuth authentication
diff --git a/sql/commands/sql-create-view.mdx b/sql/commands/sql-create-view.mdx
index c798bbed..0297d08f 100644
--- a/sql/commands/sql-create-view.mdx
+++ b/sql/commands/sql-create-view.mdx
@@ -14,11 +14,11 @@ CREATE VIEW [IF NOT EXISTS] view_name [ ( column_name [, ...] ) ] AS select_quer
| :-------------- | :------------------------------------------------------------------------------------------------------------------------------------------ |
| _mv\_name_ | The name of the view to be created. |
| _column\_name_ | Specify the columns of the view. |
-| _select\_query_ | A SELECT query that retrieves data for the view. See [SELECT](/docs/current/sql-select/) for the syntax and examples of the SELECT command. |
+| _select\_query_ | A SELECT query that retrieves data for the view. See [SELECT](/sql/commands/sql-select) for the syntax and examples of the SELECT command. |
## Examples
-The following statements create views based a plain table and a table with connector settings, and then create a new view based on the existing views. The data for the table is generated by the built-in [load generator](/docs/current/ingest-from-datagen/).
+The following statements create views based a plain table and a table with connector settings, and then create a new view based on the existing views. The data for the table is generated by the built-in [load generator](/ingestion/generate-test-data).
```sql
-- Create a table and add some records.
@@ -64,5 +64,5 @@ SELECT * FROM v3;
```
-Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/docs/current/sql-identifiers/).
+Names and unquoted identifiers are case-insensitive. Therefore, you must double-quote any of these fields for them to be case-sensitive. See also [Identifiers](/sql/identifiers).
diff --git a/sql/commands/sql-delete.mdx b/sql/commands/sql-delete.mdx
index 8f355b3b..93b61914 100644
--- a/sql/commands/sql-delete.mdx
+++ b/sql/commands/sql-delete.mdx
@@ -6,7 +6,7 @@ description: "Use the `DELETE` command to permanently remove rows from a table."
-Call [FLUSH](/docs/current/sql-flush/) after `DELETE` to persist the changes to storage. This ensures that the changes are committed and visible for subsequent reads.
+Call [FLUSH](/sql/commands/sql-flush) after `DELETE` to persist the changes to storage. This ensures that the changes are committed and visible for subsequent reads.
## Syntax
diff --git a/sql/commands/sql-describe.mdx b/sql/commands/sql-describe.mdx
index b5155b49..1a9dfcc8 100644
--- a/sql/commands/sql-describe.mdx
+++ b/sql/commands/sql-describe.mdx
@@ -3,7 +3,7 @@ title: "DESCRIBE"
description: "Use the `DESCRIBE` command to view columns in the specified table, source, sink, view, or materialized view."
---
-`DESCRIBE` is a shortcut for [SHOW COLUMNS](/docs/current/sql-show-columns/).
+`DESCRIBE` is a shortcut for [SHOW COLUMNS](/sql/commands/sql-show-columns).
`DESCRIBE` also lists the indexes on a table or materialized view, whereas `SHOW COLUMNS` doesn't.
diff --git a/sql/commands/sql-drop-aggregate.mdx b/sql/commands/sql-drop-aggregate.mdx
index f025743a..3ae2d3c8 100644
--- a/sql/commands/sql-drop-aggregate.mdx
+++ b/sql/commands/sql-drop-aggregate.mdx
@@ -27,7 +27,7 @@ DROP AGGREGATE function_name ( argument_type [, ...] );
```sql
DROP AGGREGATE function_name;
```
-You can run [SHOW FUNCTIONS;](/docs/current/sql-show-functions/) to list all existing UDFs to see if a function name is unique.
+You can run [SHOW FUNCTIONS;](/sql/commands/sql-show-functions) to list all existing UDFs to see if a function name is unique.
## See also
@@ -36,7 +36,7 @@ You can run [SHOW FUNCTIONS;](/docs/current/sql-show-functions/) to list all exi
title="CREATE AGGREGATE"
icon="plus"
iconType="solid"
- href="/docs/current/sql-create-aggregate/"
+ href="/sql/commands/sql-create-aggregate"
>
Create a user-defined aggregate function
@@ -44,7 +44,7 @@ You can run [SHOW FUNCTIONS;](/docs/current/sql-show-functions/) to list all exi
title="DROP FUNCTIONS"
icon="trash"
iconType="solid"
- href="/docs/current/sql-drop-function/"
+ href="/sql/commands/sql-drop-function"
>
Drop a user-defined function
diff --git a/sql/commands/sql-drop-connection.mdx b/sql/commands/sql-drop-connection.mdx
index df9f6ab1..b07515f7 100644
--- a/sql/commands/sql-drop-connection.mdx
+++ b/sql/commands/sql-drop-connection.mdx
@@ -30,7 +30,7 @@ DROP CONNECTION c1;
title="CREATE CONNECTION"
icon="plug"
iconType="solid"
- href="/docs/current/sql-create-connection/"
+ href="/sql/commands/sql-create-connection"
>
Create a connection
diff --git a/sql/commands/sql-drop-database.mdx b/sql/commands/sql-drop-database.mdx
index eb75c529..c8643297 100644
--- a/sql/commands/sql-drop-database.mdx
+++ b/sql/commands/sql-drop-database.mdx
@@ -3,7 +3,7 @@ title: "DROP DATABASE"
description: "Use the `DROP DATABASE` command to remove a database from your RisingWave instance."
---
-Before you can remove a database, you must use [DROP SCHEMA](/docs/current/sql-drop-schema/) to remove all its dependent schemas.
+Before you can remove a database, you must use [DROP SCHEMA](/sql/commands/sql-drop-schema) to remove all its dependent schemas.
@@ -22,7 +22,7 @@ DROP DATABASE [ IF EXISTS ] database_name;
| Parameter or clause | Description |
| :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------- |
| **IF EXISTS** clause | Do not return an error if the specified database does not exist. |
-| _database\_name_ | The name of the database you want to remove. You can use [SHOW DATABASES](/docs/current/sql-show-databases/) to get a list of all available databases. |
+| _database\_name_ | The name of the database you want to remove. You can use [SHOW DATABASES](/sql/commands/sql-show-databases) to get a list of all available databases. |
## Examples
diff --git a/sql/commands/sql-drop-function.mdx b/sql/commands/sql-drop-function.mdx
index ae388af9..d9e665af 100644
--- a/sql/commands/sql-drop-function.mdx
+++ b/sql/commands/sql-drop-function.mdx
@@ -28,7 +28,7 @@ DROP FUNCTION function_name ( argument_type [, ...] );
```sql
DROP FUNCTION function_name;
```
-You can run [SHOW FUNCTIONS;](/docs/current/sql-show-functions/) to list all existing UDFs to see if a function name is unique.
+You can run [SHOW FUNCTIONS;](/sql/commands/sql-show-functions) to list all existing UDFs to see if a function name is unique.
`DROP FUNCTION function_name();` drops a function with zero arguments.
@@ -38,7 +38,7 @@ You can run [SHOW FUNCTIONS;](/docs/current/sql-show-functions/) to list all exi
## Examples
-First, let's [create some functions](/docs/current/sql-create-function/).
+First, let's [create some functions](/sql/commands/sql-create-function).
```sql
CREATE FUNCTION f1() RETURNS real LANGUAGE python AS func1 USING LINK 'http://localhost:8815';
@@ -93,7 +93,7 @@ DROP FUNCTION f1;
title="SHOW FUNCTIONS"
icon="list"
iconType="solid"
- href="/docs/current/sql-show-functions/"
+ href="/sql/commands/sql-show-functions"
>
Show all existing UDFs
diff --git a/sql/commands/sql-drop-index.mdx b/sql/commands/sql-drop-index.mdx
index e23c54c9..d39f28cd 100644
--- a/sql/commands/sql-drop-index.mdx
+++ b/sql/commands/sql-drop-index.mdx
@@ -14,8 +14,8 @@ DROP INDEX [ IF EXISTS ] [ schema_name.]index_name [ CASCADE ];
| Parameter | Description |
| :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **IF EXISTS** clause | Do not return an error if the specified index does not exist. |
-| _schema\_name_ | The schema of the index that you want to remove. You can use [SHOW SCHEMAS](/docs/current/sql-show-schemas/) to get a list of all available schemas. If you don't specify a schema, the specified index in the default schema public will be removed. |
-| _index\_name_ | The name of the index to remove. You can use [DESCRIBE](/docs/current/sql-describe/) to show the indexes of a table. |
+| _schema\_name_ | The schema of the index that you want to remove. You can use [SHOW SCHEMAS](/sql/commands/sql-show-schemas) to get a list of all available schemas. If you don't specify a schema, the specified index in the default schema public will be removed. |
+| _index\_name_ | The name of the index to remove. You can use [DESCRIBE](/sql/commands/sql-describe) to show the indexes of a table. |
| **CASCADE** option | If this option is specified, all objects that depend on the index, and in turn all objects that depend on those objects will be dropped. |
## Examples
@@ -39,7 +39,7 @@ DROP INDEX rw_schema.id_index;
title="CREATE INDEX"
icon="database"
iconType="solid"
- href="/docs/current/sql-create-index/"
+ href="/sql/commands/sql-create-index"
>
Construct an index on a table or a materialized view to speed up queries
diff --git a/sql/commands/sql-drop-mv.mdx b/sql/commands/sql-drop-mv.mdx
index 4030de15..27646474 100644
--- a/sql/commands/sql-drop-mv.mdx
+++ b/sql/commands/sql-drop-mv.mdx
@@ -15,8 +15,8 @@ DROP MATERIALIZED VIEW [ IF EXISTS ] [schema_name.]mv_name [ CASCADE ];
| Parameter | Description |
| :----------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| _schema\_name_ | Specify the name of a schema to remove the materialized view in that schema. You can use [SHOW SCHEMAS](/docs/current/sql-show-schemas/) to get a list of all available schemas. If you don't specify a schema, the specified materialized view in the default schema public will be removed. |
-| _mv\_name_ | The name of the materialized view to remove. You can use [SHOW MATERIALIZED VIEWS](/docs/current/sql-show-mv/) to get a list of all available materialized views. |
+| _schema\_name_ | Specify the name of a schema to remove the materialized view in that schema. You can use [SHOW SCHEMAS](/sql/commands/sql-show-schemas) to get a list of all available schemas. If you don't specify a schema, the specified materialized view in the default schema public will be removed. |
+| _mv\_name_ | The name of the materialized view to remove. You can use [SHOW MATERIALIZED VIEWS](/sql/commands/sql-show-mv) to get a list of all available materialized views. |
| **CASCADE** option | If this option is specified, all objects (such as other materialized views or regular views) that depend on the materialized view, and in turn all objects that depend on those objects will be dropped. |
## Examples
diff --git a/sql/commands/sql-drop-schema.mdx b/sql/commands/sql-drop-schema.mdx
index 95d8a729..a2c8a6cf 100644
--- a/sql/commands/sql-drop-schema.mdx
+++ b/sql/commands/sql-drop-schema.mdx
@@ -16,8 +16,8 @@ DROP SCHEMA [ IF EXISTS ] [database_name.]schema_name;
| Parameter or clause | Description |
| :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **IF EXISTS** clause | Do not return an error if the specified schema does not exist. |
-| _database_ | Specify the name of a database to remove the schema in that database. You can use [SHOW DATABASES](/docs/current/sql-show-databases/) to get a list of all available databases. If you don't specify a database, the specified schema in the default database will be removed. |
-| _schema_ | The name of the schema you want to remove. The default schema is public. You can use [SHOW SCHEMAS](/docs/current/sql-show-schemas/) to get a list of all available schemas. |
+| _database_ | Specify the name of a database to remove the schema in that database. You can use [SHOW DATABASES](/sql/commands/sql-show-databases) to get a list of all available databases. If you don't specify a database, the specified schema in the default database will be removed. |
+| _schema_ | The name of the schema you want to remove. The default schema is public. You can use [SHOW SCHEMAS](/sql/commands/sql-show-schemas) to get a list of all available schemas. |
## Examples
diff --git a/sql/commands/sql-drop-secret.mdx b/sql/commands/sql-drop-secret.mdx
index 7713c74a..bb647e0d 100644
--- a/sql/commands/sql-drop-secret.mdx
+++ b/sql/commands/sql-drop-secret.mdx
@@ -42,7 +42,7 @@ DROP_SECRET
title="CREATE SECRET"
icon="plus"
iconType="solid"
- href="/docs/current/sql-create-secret/"
+ href="/sql/commands/sql-create-secret"
>
Create a secret
diff --git a/sql/commands/sql-drop-sink.mdx b/sql/commands/sql-drop-sink.mdx
index abdcfe35..3effbfbd 100644
--- a/sql/commands/sql-drop-sink.mdx
+++ b/sql/commands/sql-drop-sink.mdx
@@ -13,7 +13,7 @@ DROP SINK [ IF EXISTS ] [schema_name.]sink_name [ CASCADE ];
| Parameter | Description |
| :----------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| _schema\_name_ | The schema of the sink that you want to remove. You can use [SHOW SCHEMAS](/docs/current/sql-show-schemas/) to get a list of all available schemas. If you don't specify a schema, the specified sink in the default schema public will be removed. |
+| _schema\_name_ | The schema of the sink that you want to remove. You can use [SHOW SCHEMAS](/sql/commands/sql-show-schemas) to get a list of all available schemas. If you don't specify a schema, the specified sink in the default schema public will be removed. |
| _sink\_name_ | The name of the sink to remove. |
| **CASCADE** option | If this option is specified, all objects (such as materialized views) that depend on the sink, and in turn all objects that depend on those objects will be dropped. |
diff --git a/sql/commands/sql-drop-source.mdx b/sql/commands/sql-drop-source.mdx
index d3b0a47a..4e4b26be 100644
--- a/sql/commands/sql-drop-source.mdx
+++ b/sql/commands/sql-drop-source.mdx
@@ -1,9 +1,9 @@
---
title: "DROP SOURCE"
-description: "Use the `DROP SOURCE` command to remove a [source](/docs/current/sql-create-source/) if you no longer need the data inflow from the source."
+description: "Use the `DROP SOURCE` command to remove a [source](/sql/commands/sql-create-source) if you no longer need the data inflow from the source."
---
-Before you can remove a source, you must use [DROP MATERIALIZED VIEW](/docs/current/sql-drop-mv/) to remove all its dependent materialized views.
+Before you can remove a source, you must use [DROP MATERIALIZED VIEW](/sql/commands/sql-drop-mv) to remove all its dependent materialized views.
## Syntax
@@ -15,7 +15,7 @@ DROP SOURCE [ IF EXISTS ] [schema_name.]source_name [ CASCADE ];
| Parameter | Description |
| :----------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| _schema\_name_ | The schema of the source that you want to remove. You can use [SHOW SCHEMAS](/docs/current/sql-show-schemas/) to get a list of all available schemas. If you don't specify a schema, the specified source in the default schema public will be removed. |
+| _schema\_name_ | The schema of the source that you want to remove. You can use [SHOW SCHEMAS](/sql/commands/sql-show-schemas) to get a list of all available schemas. If you don't specify a schema, the specified source in the default schema public will be removed. |
| _source\_name_ | The name of the source to remove. |
| **CASCADE** option | If this option is specified, all objects (such as materialized views) that depend on the source, and in turn all objects that depend on those objects will be dropped. |
@@ -40,7 +40,7 @@ DROP SOURCE IF EXISTS rw_schema.rw_source;
title="CREATE SOURCE"
icon="circle-plus"
icontype="solid"
- href="/docs/current/sql-create-source/"
+ href="/sql/commands/sql-create-source"
>
Create a source
diff --git a/sql/commands/sql-drop-table.mdx b/sql/commands/sql-drop-table.mdx
index a723480e..4dab5da0 100644
--- a/sql/commands/sql-drop-table.mdx
+++ b/sql/commands/sql-drop-table.mdx
@@ -14,8 +14,8 @@ DROP TABLE [ IF EXISTS ] [schema_name.]table_name [ CASCADE ];
| Parameter | Description |
| :----------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| _schema_ | Specify the name of a schema to remove the table in that schema. You can use [SHOW SCHEMAS](/docs/current/sql-show-schemas/) to get a list of all available schemas. If you don't specify a schema, the specified source in the default schema public will be removed. |
-| _table_ | The name of the table to remove. You can use [SHOW TABLES](/docs/current/sql-show-tables/) to get a list of all available tables. |
+| _schema_ | Specify the name of a schema to remove the table in that schema. You can use [SHOW SCHEMAS](/sql/commands/sql-show-schemas) to get a list of all available schemas. If you don't specify a schema, the specified source in the default schema public will be removed. |
+| _table_ | The name of the table to remove. You can use [SHOW TABLES](/sql/commands/sql-show-tables) to get a list of all available tables. |
| **CASCADE** option | If this option is specified, all objects (such as materialized views) that depend on the table, and in turn all objects that depend on those objects will be dropped. |
## Examples
diff --git a/sql/commands/sql-drop-view.mdx b/sql/commands/sql-drop-view.mdx
index ceb08e28..46081dc4 100644
--- a/sql/commands/sql-drop-view.mdx
+++ b/sql/commands/sql-drop-view.mdx
@@ -31,7 +31,7 @@ DROP VIEW IF EXISTS sales_report;
title="CREATE VIEW"
icon="plus"
iconType="solid"
- href="/docs/current/sql-create-view/"
+ href="/sql/commands/sql-create-view"
>
Create a non-materialized view
@@ -39,7 +39,7 @@ DROP VIEW IF EXISTS sales_report;
title="SHOW CREATE VIEW"
icon="eye"
iconType="solid"
- href="/docs/current/sql-show-create-view/"
+ href="/sql/commands/sql-show-create-view"
>
Show query used to create specified view
@@ -47,7 +47,7 @@ DROP VIEW IF EXISTS sales_report;
title="SHOW VIEWS"
icon="list"
iconType="solid"
- href="/docs/current/sql-show-views/"
+ href="/sql/commands/sql-show-views"
>
List existing views in a particular schema
diff --git a/sql/commands/sql-insert.mdx b/sql/commands/sql-insert.mdx
index df132c46..a7354d07 100644
--- a/sql/commands/sql-insert.mdx
+++ b/sql/commands/sql-insert.mdx
@@ -7,7 +7,7 @@ description: "Use the `INSERT` command to insert new rows into an existing table
* For tables with primary keys, if you insert a row with an existing key, the new row will overwrite the existing row.
-* Call [FLUSH](/docs/current/sql-flush/) after `INSERT` to persist the changes to storage. This ensures that the changes are committed and visible for subsequent reads.
+* Call [FLUSH](/sql/commands/sql-flush) after `INSERT` to persist the changes to storage. This ensures that the changes are committed and visible for subsequent reads.
## Syntax
@@ -24,8 +24,8 @@ INSERT INTO table_name [ ( col_name [ , ... ] ) ]
| :------------------ | :----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _table\_name_ | The table where you want to insert rows. |
| _col\_name_ | The column where you want to insert corresponding values. Currently, you must provide all columns in the table in order or leave this field empty. |
-| _value_ | An expression or value to assign to the corresponding column. You can use [DESCRIBE](/docs/current/sql-describe/) to check the order of the columns in the table. |
-| _select\_query_ | A [SELECT](/docs/current/sql-select/) statement that returns the rows you want to insert to the table. |
+| _value_ | An expression or value to assign to the corresponding column. You can use [DESCRIBE](/sql/commands/sql-describe) to check the order of the columns in the table. |
+| _select\_query_ | A [SELECT](/sql/commands/sql-select) statement that returns the rows you want to insert to the table. |
| **RETURNING** | Returns the values of any column based on each inserted row. |
## Example
diff --git a/sql/commands/sql-select.mdx b/sql/commands/sql-select.mdx
index 5ae2ba06..fc84423c 100644
--- a/sql/commands/sql-select.mdx
+++ b/sql/commands/sql-select.mdx
@@ -31,7 +31,7 @@ Where `from_item` can be:
| Parameter or clause | Description |
| :--------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| **WITH** clause | Provides a way to write supplemental statements for a larger query. For more information, see [WITH clause](/docs/current/query-syntax-with-clause/). |
+| **WITH** clause | Provides a way to write supplemental statements for a larger query. For more information, see [WITH clause](/sql/query-syntax/with-clause). |
| **DISTINCT** clause | This clause eliminates duplicate rows from the result. SELECT DISTINCT eliminates duplicate rows based on **all selected columns**. SELECT DISTINCT ON allows you to specify expressions or columns and returns only the first row for each unique combination. It requires the use of the ORDER BY clause to determine the first row, and the DISTINCT ON expression must match the leftmost ORDER BY expression. The ORDER BY clause will normally contain additional expressions that determine the desired precedence of rows within each DISTINCT ON group. In this case, this expression can be an alternative with group [topN](/docs/current/sql-pattern-topn/) when "N=1". See [examples of this clause](#distinct-clause) below to know more about it. |
| **EXCEPT** clause | Exclude one or more columns from the result set. By specifying _except\_column_, the query will return all columns in the result set except those specified. |
| _expression_ | A column or an expression. |
diff --git a/sql/commands/sql-set-background-ddl.mdx b/sql/commands/sql-set-background-ddl.mdx
index 08f3c2c9..90c952b5 100644
--- a/sql/commands/sql-set-background-ddl.mdx
+++ b/sql/commands/sql-set-background-ddl.mdx
@@ -27,7 +27,7 @@ SET BACKGROUND_DDL = { true | false };
title="CREATE MATERIALIZED VIEW"
icon="table"
iconType="solid"
- href="/docs/current/sql-create-mv/"
+ href="/sql/commands/sql-create-mv"
horizontal
/>
Create a connection
diff --git a/sql/commands/sql-show-create-index.mdx b/sql/commands/sql-show-create-index.mdx
index 2d644f10..fbd80fc8 100644
--- a/sql/commands/sql-show-create-index.mdx
+++ b/sql/commands/sql-show-create-index.mdx
@@ -48,7 +48,7 @@ SHOW CREATE INDEX idx1;
title="CREATE INDEX"
icon="database"
iconType="solid"
- href="/docs/current/sql-create-index/"
+ href="/sql/commands/sql-create-index"
horizontal
/>
diff --git a/sql/commands/sql-show-create-mv.mdx b/sql/commands/sql-show-create-mv.mdx
index 83117e27..05de0908 100644
--- a/sql/commands/sql-show-create-mv.mdx
+++ b/sql/commands/sql-show-create-mv.mdx
@@ -33,21 +33,21 @@ Here is the result.
## Related topics
-[SHOW CREATE VIEW](/docs/current/sql-show-create-view/)
+[SHOW CREATE VIEW](/sql/commands/sql-show-create-view)
-[SHOW CREATE TABLE](/docs/current/sql-show-create-table/)
+[SHOW CREATE TABLE](/sql/commands/sql-show-create-table)
diff --git a/sql/commands/sql-show-create-source.mdx b/sql/commands/sql-show-create-source.mdx
index 129451b2..6689574f 100644
--- a/sql/commands/sql-show-create-source.mdx
+++ b/sql/commands/sql-show-create-source.mdx
@@ -22,7 +22,7 @@ SHOW CREATE SOURCE source_name;
title="CREATE SOURCE"
icon="database"
iconType="solid"
- href="/docs/current/sql-create-source/"
+ href="/sql/commands/sql-create-source"
>
Create a source
diff --git a/sql/commands/sql-show-create-table.mdx b/sql/commands/sql-show-create-table.mdx
index 82ca8090..cf481fa1 100644
--- a/sql/commands/sql-show-create-table.mdx
+++ b/sql/commands/sql-show-create-table.mdx
@@ -42,14 +42,14 @@ Here is the result. Note that the `IF NOT EXISTS` clause is omitted while the `W
title="SHOW CREATE MATERIALIZED VIEW"
icon="eye"
iconType="solid"
- href="/docs/current/sql-show-create-mv/"
+ href="/sql/commands/sql-show-create-mv"
/>
diff --git a/sql/commands/sql-show-create-view.mdx b/sql/commands/sql-show-create-view.mdx
index b440d367..b494b59a 100644
--- a/sql/commands/sql-show-create-view.mdx
+++ b/sql/commands/sql-show-create-view.mdx
@@ -38,14 +38,14 @@ Here is the result.
title="SHOW CREATE MATERIALIZED VIEW"
icon="eye"
iconType="solid"
- href="/docs/current/sql-show-create-mv/"
+ href="/sql/commands/sql-show-create-mv"
/>
diff --git a/sql/commands/sql-show-indexes.mdx b/sql/commands/sql-show-indexes.mdx
index a819fdef..b66ae5ce 100644
--- a/sql/commands/sql-show-indexes.mdx
+++ b/sql/commands/sql-show-indexes.mdx
@@ -49,6 +49,6 @@ SHOW INDEXES FROM t3;
title="CREATE INDEX"
icon="database"
iconType="solid"
- href="/docs/current/sql-create-index/"
+ href="/sql/commands/sql-create-index"
/>
diff --git a/sql/commands/sql-show-internal-tables.mdx b/sql/commands/sql-show-internal-tables.mdx
index 6b9fb58b..b3df10d8 100644
--- a/sql/commands/sql-show-internal-tables.mdx
+++ b/sql/commands/sql-show-internal-tables.mdx
@@ -3,7 +3,7 @@ title: "SHOW INTERNAL TABLES"
description: "Use the `SHOW INTERNAL TABLES` command to view the existing internal tables in RisingWave. Internal tables are tables that store intermediate results (also known as internal states) of queries."
---
-In addition to `SHOW INTERNAL TABLES`, you can also use the [rw\_internal\_tables](/docs/current/rw_catalog/introduction#available-risingwave-catalogs) table to display internal table information. This is useful when you need to join internal table information with other data.
+In addition to `SHOW INTERNAL TABLES`, you can also use the [rw\_internal\_tables](/sql/system-catalogs/rw-catalog#available-risingwave-catalogs) table to display internal table information. This is useful when you need to join internal table information with other data.
## Syntax
diff --git a/sql/commands/sql-show-jobs.mdx b/sql/commands/sql-show-jobs.mdx
index fac02295..6780f96c 100644
--- a/sql/commands/sql-show-jobs.mdx
+++ b/sql/commands/sql-show-jobs.mdx
@@ -3,7 +3,7 @@ title: "SHOW JOBS"
description: "Use `SHOW JOBS` to get all streaming jobs that are in progress, including their IDs, the specific statements, and their progresses."
---
-In RisingWave, a streaming job refers to the creation of a materialized view, an index, a table, a sink or a source with connectors. If a streaming job takes too long, you can use the [CANCEL JOBS](/docs/current/sql-cancel-jobs/) to cancel it.
+In RisingWave, a streaming job refers to the creation of a materialized view, an index, a table, a sink or a source with connectors. If a streaming job takes too long, you can use the [CANCEL JOBS](/sql/commands/sql-cancel-jobs) to cancel it.
## Syntax
@@ -41,7 +41,7 @@ SHOW JOBS;
title="CANCEL JOBS"
icon="circle-stop"
iconType="solid"
- href="/docs/current/sql-cancel-jobs/"
+ href="/sql/commands/sql-cancel-jobs"
/>
diff --git a/sql/commands/sql-show-views.mdx b/sql/commands/sql-show-views.mdx
index 0565354f..77a7035b 100644
--- a/sql/commands/sql-show-views.mdx
+++ b/sql/commands/sql-show-views.mdx
@@ -46,13 +46,13 @@ v3
title="CREATE VIEW"
icon="circle-plus"
iconType="solid"
- href="/docs/current/sql-create-view/"
+ href="/sql/commands/sql-create-view"
/>
diff --git a/sql/commands/sql-start-transaction.mdx b/sql/commands/sql-start-transaction.mdx
index 6b330ce3..1e0cba54 100644
--- a/sql/commands/sql-start-transaction.mdx
+++ b/sql/commands/sql-start-transaction.mdx
@@ -44,7 +44,7 @@ START_TRANSACTION
title="BEGIN"
icon="play"
iconType="solid"
- href="/docs/current/sql-begin/"
+ href="/sql/commands/sql-begin"
horizontal
/>
@@ -52,7 +52,7 @@ START_TRANSACTION
title="COMMIT"
icon="check"
iconType="solid"
- href="/docs/current/sql-commit/"
+ href="/sql/commands/sql-commit"
horizontal
/>
diff --git a/sql/commands/sql-update.mdx b/sql/commands/sql-update.mdx
index e85640b6..5db5abfa 100644
--- a/sql/commands/sql-update.mdx
+++ b/sql/commands/sql-update.mdx
@@ -7,7 +7,7 @@ description: "Use the `UPDATE` command to modify values of existing rows in a ta
* `UPDATE` cannot modify data in the primary key column of a table.
-* Call [FLUSH](/docs/current/sql-flush/) after `UPDATE` to persist the changes to storage. This ensures that the changes are committed and visible for subsequent reads.
+* Call [FLUSH](/sql/commands/sql-flush) after `UPDATE` to persist the changes to storage. This ensures that the changes are committed and visible for subsequent reads.
## Syntax
diff --git a/sql/data-types/casting.mdx b/sql/data-types/casting.mdx
index 1c7877e8..940604d2 100644
--- a/sql/data-types/casting.mdx
+++ b/sql/data-types/casting.mdx
@@ -6,7 +6,7 @@ mode: wide
* **Implicitly cast to**: Values can be automatically converted to the target type.
* **Assigned to**: Values can be automatically converted when inserted to a column of the target type.
-* **Explicitly cast to**: Values can be converted to the target type using explicit [Type casts](/docs/current/query-syntax-value-exp/#type-casts).
+* **Explicitly cast to**: Values can be converted to the target type using explicit [Type casts](/sql/query-syntax/value-exp#type-casts).
| From type | Implicitly cast to | Assigned to | Explicitly cast to |
| :-- | :-- | :-- | :-- |
diff --git a/sql/functions/aggregate.mdx b/sql/functions/aggregate.mdx
index 4ebae69f..b18feeb3 100644
--- a/sql/functions/aggregate.mdx
+++ b/sql/functions/aggregate.mdx
@@ -3,7 +3,7 @@ title: "Aggregate functions"
description: "Aggregate functions compute a single result from a set of input values."
---
-For details about the supported syntaxes of aggregate expressions, see [Aggregate function calls](/docs/current/query-syntax-value-exp/#aggregate-function-calls).
+For details about the supported syntaxes of aggregate expressions, see [Aggregate function calls](/sql/query-syntax/value-exp#aggregate-function-calls).
## General-purpose aggregate functions
@@ -210,7 +210,7 @@ SELECT mode() WITHIN GROUP (ORDER BY column1) FROM table1;
### `percentile_cont`
-At present, `percentile_cont` is not supported for [streaming queries](/docs/current/key-concepts/#streaming-queries) yet.
+At present, `percentile_cont` is not supported for [streaming queries](/reference/key-concepts#streaming-queries) yet.
Computes the continuous percentile, which is a value corresponding to the specified fraction within the ordered set of aggregated argument values. It can interpolate between adjacent input items if needed.
diff --git a/sql/functions/sys-admin.mdx b/sql/functions/sys-admin.mdx
index e81afa37..a356b789 100644
--- a/sql/functions/sys-admin.mdx
+++ b/sql/functions/sys-admin.mdx
@@ -102,7 +102,7 @@ t
set_config ( setting_name text, new_value text, is_local boolean ) → text
```
-Sets the parameter `setting_name` to `new_value`, and returns that value. If `is_local` is `true`, the new value will only apply during the current transaction. If you want the new value to apply for the rest of the current session, use `false` instead. This function corresponds to the SQL command [SET](/docs/current/sql-set/).
+Sets the parameter `setting_name` to `new_value`, and returns that value. If `is_local` is `true`, the new value will only apply during the current transaction. If you want the new value to apply for the rest of the current session, use `false` instead. This function corresponds to the SQL command [SET](/sql/commands/sql-set).
```sql Example
diff --git a/sql/functions/window-functions.mdx b/sql/functions/window-functions.mdx
index 2a4392b6..81df1ad2 100644
--- a/sql/functions/window-functions.mdx
+++ b/sql/functions/window-functions.mdx
@@ -3,7 +3,7 @@ title: "Window functions"
description: 'Window functions compute a single result for each row over a set of rows that are related to the current row (the "window").'
---
-For details about the syntax of window function calls, see [Window function calls](/docs/current/query-syntax-value-exp/#window-function-calls).
+For details about the syntax of window function calls, see [Window function calls](/sql/query-syntax/value-exp#window-function-calls).
## General-purpose window functions
diff --git a/sql/query-syntax/generated-columns.mdx b/sql/query-syntax/generated-columns.mdx
index c5f2f383..ceecf3ae 100644
--- a/sql/query-syntax/generated-columns.mdx
+++ b/sql/query-syntax/generated-columns.mdx
@@ -4,7 +4,7 @@ description: "A generated column is a special column that is always computed fro
---
-To create a generated column, use the `AS ` clause in [CREATE TABLE](/sql/commands/sql-create-table) or [CREATE SOURCE](/docs/current/sql-create-source/) statements, for example:
+To create a generated column, use the `AS ` clause in [CREATE TABLE](/sql/commands/sql-create-table) or [CREATE SOURCE](/sql/commands/sql-create-source) statements, for example:
```sql
CREATE TABLE t1 (v1 int AS v2-1, v2 int, v3 int AS v2+1);
diff --git a/sql/system-catalogs/pg-catalog.mdx b/sql/system-catalogs/pg-catalog.mdx
index 806c6abb..61b03b55 100644
--- a/sql/system-catalogs/pg-catalog.mdx
+++ b/sql/system-catalogs/pg-catalog.mdx
@@ -3,7 +3,7 @@ title: "PostgreSQL catalogs"
description: "RisingWave supports these system catalogs and views of PostgreSQL."
---
-For information about RisingWave and PostgreSQL system functions, see [System administration functions](/docs/current/sql-function-sys-admin/) and [System information functions](/docs/current/sql-function-sys-info/).
+For information about RisingWave and PostgreSQL system functions, see [System administration functions](/sql/functions/sys-admin) and [System information functions](/docs/current/sql-function-sys-info/).
RisingWave does not fully support all PostgreSQL system catalog columns.
diff --git a/sql/system-catalogs/rw-catalog.mdx b/sql/system-catalogs/rw-catalog.mdx
index c8367745..1420f4d9 100644
--- a/sql/system-catalogs/rw-catalog.mdx
+++ b/sql/system-catalogs/rw-catalog.mdx
@@ -83,7 +83,7 @@ SELECT name, initialized_at, created_at FROM rw_sources;
| rw\_databases | Contains information about the databases available in the database, such as the IDs, names, and owners. |
| rw\_depend | Contains the dependency relationships between tables, indexes, views, materialized views, sources, and sinks. |
| rw\_ddl\_progress | Contains the progress of running DDL statements. You can use this relation to view the progress of running DDL statements. For details, see [Monitor statement progress](/operate/monitor-statement-progress). |
-| rw\_description | Contains optional descriptions (comments) for each database object. Descriptions can be added with the [COMMENT ON](/docs/current/sql-comment-on/) command and viewed with DESCRIBE or SHOW COLUMNS FROM command. |
+| rw\_description | Contains optional descriptions (comments) for each database object. Descriptions can be added with the [COMMENT ON](/sql/commands/sql-comment-on) command and viewed with DESCRIBE or SHOW COLUMNS FROM command. |
| rw\_event\_logs | Contains information about events, including event IDs, timestamps, event types, and additional information if available. |
| rw\_fragment\_id\_to\_ddl | Contains information about the database schema change operations (DDL) and their corresponding fragment\_id identifiers. The outputs include fragment IDs, job IDs, schema IDs, DDL types, and names of the affected object. |
| rw\_fragment\_parallelism | Contains information about the parallelism configuration at the fragment level, including fragment IDs, parallelism, and more. |
@@ -102,7 +102,7 @@ SELECT name, initialized_at, created_at FROM rw_sources;
| rw\_iceberg\_files | Contains the current files of the Iceberg source or table. |
| rw\_iceberg\_snapshots | Contains all Iceberg snapshots in RisingWave. Based on it, you can read a specific snapshot by a time travel query. |
| rw\_indexes | Contains information about indexes in the database, including their IDs, names, schema identifiers, definitions, and more. |
-| rw\_internal\_tables | Contains information about internal tables in the database. Internal tables are tables that store intermediate results (also known as internal states) of queries. Equivalent to the [SHOW INTERNAL TABLES](/docs/current/sql-show-internal-tables/) command. |
+| rw\_internal\_tables | Contains information about internal tables in the database. Internal tables are tables that store intermediate results (also known as internal states) of queries. Equivalent to the [SHOW INTERNAL TABLES](/sql/commands/sql-show-internal-tables) command. |
| rw\_materialized\_views | Contains information about materialized views in the database, including their unique IDs, names, schema IDs, owner IDs, definitions, append-only information, access control lists, initialization and creation timestamps, and the cluster version when the materialized view was initialized and created. |
| rw\_meta\_snapshot | Contains information about existing snapshots of the RisingWave meta service. You can use this relation to get IDs of meta snapshots and then restore the meta service from a snapshot. For details, see [Back up and restore meta service](/operate/meta-backup). |
| rw\_parallel\_units | Contains information about parallel worker units used for executing database operations, including their unique IDs, worker IDs, and primary keys. |
diff --git a/sql/udfs/embedded-python-udfs.mdx b/sql/udfs/embedded-python-udfs.mdx
index a6c264f1..f9718f36 100644
--- a/sql/udfs/embedded-python-udfs.mdx
+++ b/sql/udfs/embedded-python-udfs.mdx
@@ -1,6 +1,6 @@
---
title: "Embedded Python UDFs"
-description: "You can define embedded Python UDFs, which will be executed in an embedded Python interpreter within RisingWave. The Python code is directly included within the [CREATE FUNCTION](/docs/current/sql-create-function/) statement."
+description: "You can define embedded Python UDFs, which will be executed in an embedded Python interpreter within RisingWave. The Python code is directly included within the [CREATE FUNCTION](/sql/commands/sql-create-function) statement."
sidebarTitle: Python
---
@@ -92,7 +92,7 @@ $$;
## Define your aggregate functions
-You can create aggregate functions using the [CREATE AGGREGATE](/docs/current/sql-create-aggregate/) command. Refer to the syntax below:
+You can create aggregate functions using the [CREATE AGGREGATE](/sql/commands/sql-create-aggregate) command. Refer to the syntax below:
```sql
CREATE AGGREGATE function_name ( argument_type [, ...] )
diff --git a/sql/udfs/sql-udfs.mdx b/sql/udfs/sql-udfs.mdx
index e86d5cef..a4d65a7a 100644
--- a/sql/udfs/sql-udfs.mdx
+++ b/sql/udfs/sql-udfs.mdx
@@ -1,6 +1,6 @@
---
title: "SQL UDFs"
-description: "You can define SQL UDFs in RisingWave by using the [CREATE FUNCTION](/docs/current/sql-create-function/) command."
+description: "You can define SQL UDFs in RisingWave by using the [CREATE FUNCTION](/sql/commands/sql-create-function) command."
---
## Syntax
diff --git a/sql/udfs/use-udfs-in-java.mdx b/sql/udfs/use-udfs-in-java.mdx
index 320ae1ac..30dbcdd5 100644
--- a/sql/udfs/use-udfs-in-java.mdx
+++ b/sql/udfs/use-udfs-in-java.mdx
@@ -169,7 +169,7 @@ The UDF server will start running, allowing you to call the defined UDFs from Ri
## 4\. Declare your functions in RisingWave
-In RisingWave, use the [CREATE FUNCTION](/docs/current/sql-create-function/) command to declare the functions you defined.
+In RisingWave, use the [CREATE FUNCTION](/sql/commands/sql-create-function) command to declare the functions you defined.
Here are the SQL statements for declaring the two UDFs defined in [step 3](#3-define-your-functions-in-java).
diff --git a/sql/udfs/use-udfs-in-javascript.mdx b/sql/udfs/use-udfs-in-javascript.mdx
index 5ce82e54..8ab0f6d2 100644
--- a/sql/udfs/use-udfs-in-javascript.mdx
+++ b/sql/udfs/use-udfs-in-javascript.mdx
@@ -8,7 +8,7 @@ JavaScript code is inlined in `CREATE FUNCTION` statement and then run on the em
## Define your functions
-You can use the [CREATE FUNCTION](/docs/current/sql-create-function/) command to create JavaScript UDFs. See the syntax as follows:
+You can use the [CREATE FUNCTION](/sql/commands/sql-create-function) command to create JavaScript UDFs. See the syntax as follows:
```js
CREATE FUNCTION function_name ( arg_name arg_type [, ...] )
@@ -56,7 +56,7 @@ SELECT * from series(5);
## Define your aggregate functions
-You can create aggregate functions using the [CREATE AGGREGATE](/docs/current/sql-create-aggregate/) command. Refer to the syntax below:
+You can create aggregate functions using the [CREATE AGGREGATE](/sql/commands/sql-create-aggregate) command. Refer to the syntax below:
```sql
CREATE AGGREGATE function_name ( argument_type [, ...] )
diff --git a/sql/udfs/use-udfs-in-python.mdx b/sql/udfs/use-udfs-in-python.mdx
index 0af44948..070e3bdf 100644
--- a/sql/udfs/use-udfs-in-python.mdx
+++ b/sql/udfs/use-udfs-in-python.mdx
@@ -129,7 +129,7 @@ The UDF server will start running, allowing you to call the defined UDFs from Ri
## 4\. Declare your functions in RisingWave
-In RisingWave, use the [CREATE FUNCTION](/docs/current/sql-create-function/) command to declare the functions you defined.
+In RisingWave, use the [CREATE FUNCTION](/sql/commands/sql-create-function) command to declare the functions you defined.
Here are the SQL statements for declaring the four UDFs defined in [step 2](#2-define-your-functions-in-a-python-file).
diff --git a/sql/udfs/use-udfs-in-rust.mdx b/sql/udfs/use-udfs-in-rust.mdx
index 8249dcf7..e3e6b461 100644
--- a/sql/udfs/use-udfs-in-rust.mdx
+++ b/sql/udfs/use-udfs-in-rust.mdx
@@ -8,7 +8,7 @@ Rust functions are compiled into WebAssembly modules and then run on the embedde
## Declare your functions in RisingWave
-You can utilize the [CREATE FUNCTION](/docs/current/sql-create-function/) command to develop UDFs in Rust. The syntax is outlined below:
+You can utilize the [CREATE FUNCTION](/sql/commands/sql-create-function) command to develop UDFs in Rust. The syntax is outlined below:
```js
CREATE FUNCTION function_name ( arg_name arg_type [, ...] )
@@ -151,7 +151,7 @@ wasm-tools strip ./target/wasm32-wasip1/release/udf.wasm > udf.wasm
## 4\. Declare your functions in RisingWave
-In RisingWave, use the [CREATE FUNCTION](/docs/current/sql-create-function/) command to declare the functions you defined.
+In RisingWave, use the [CREATE FUNCTION](/sql/commands/sql-create-function) command to declare the functions you defined.
There are two ways to load the WASM module:
diff --git a/sql/udfs/user-defined-functions.mdx b/sql/udfs/user-defined-functions.mdx
index 832f5008..1af3f52d 100644
--- a/sql/udfs/user-defined-functions.mdx
+++ b/sql/udfs/user-defined-functions.mdx
@@ -19,7 +19,7 @@ It is important to note that UDFs have lower execution efficiency compared to bu
At present, there are three ways to define your UDF. The first option is to use it as an external function, which runs as a standalone service and provides maximum flexibility. The second option is to use an embedded UDF, which runs internally in RisingWave. The third is to use a SQL UDF, which allows for the same capabilities as regular SQL while offering a more concise way to express complex queries.
-You can create all types of UDFs mentioned above using the [CREATE FUNCTION](/docs/current/sql-create-function/) command. However, the syntax may vary slightly depending on the type of UDF you want to create. We have provided specific guides for each type of UDF below, so you can choose the one that best meets your needs.
+You can create all types of UDFs mentioned above using the [CREATE FUNCTION](/sql/commands/sql-create-function) command. However, the syntax may vary slightly depending on the type of UDF you want to create. We have provided specific guides for each type of UDF below, so you can choose the one that best meets your needs.
### UDFs as external functions
@@ -57,7 +57,7 @@ The other two dimensions are:
* The input and output of functions.
For this dimension, since common SQL functions include scalar functions, table functions, aggregate functions, and window functions, UDFS can be classified as user-defined scalar functions (abbreviated as UDFs), user-defined table functions (UDTFs), user-defined aggregate functions (UDAFs), and user-defined window functions (UDWFs).
RisingWave supports UDFs, UDTFs and UDAFs, covering most practical needs. You can find their guides in our documentation too.
-For example, for UDAFs, you can use the [CREATE AGGREGATE](/docs/current/sql-create-aggregate/) command to create functions. Meanwhile, we also offer dedicated sections for creating UDAFs in [Embedded Python UDFs](/docs/current/udf-python-embedded/#define-your-aggregate-functions) and [Embedded JavaScript UDFs](/docs/current/udf-javascript/#define-your-aggregate-functions).
+For example, for UDAFs, you can use the [CREATE AGGREGATE](/sql/commands/sql-create-aggregate) command to create functions. Meanwhile, we also offer dedicated sections for creating UDAFs in [Embedded Python UDFs](/docs/current/udf-python-embedded/#define-your-aggregate-functions) and [Embedded JavaScript UDFs](/docs/current/udf-javascript/#define-your-aggregate-functions).
* The language used to write functions.
RisingWave currently supports using SQL, Python, Java, JavaScript, and Rust to write UDFs.
diff --git a/troubleshoot/troubleshoot-oom.mdx b/troubleshoot/troubleshoot-oom.mdx
index 630ba2e2..4aee4edd 100644
--- a/troubleshoot/troubleshoot-oom.mdx
+++ b/troubleshoot/troubleshoot-oom.mdx
@@ -25,7 +25,7 @@ If OOM happens during the creation of a new materialized view, it might be cause
CREATE MATERIALIZED VIEW mv WITH ( source_rate_limit = 200 ) AS ...
```
-The parameter [source\_rate\_limit](/docs/current/view-configure-runtime-parameters/#how-to-view-runtime-parameters) refers to the maximum number of records per second for each parallelism on each source, where the default parallelism for streaming jobs is the total number of CPU cores across the cluster. For example, assuming a materialized view has four parallelisms and two sources joining together, each source's throughput will be throttled to `4 * source_rate_limit` records/s.
+The parameter [source\_rate\_limit](/operate/view-configure-runtime-parameters#how-to-view-runtime-parameters) refers to the maximum number of records per second for each parallelism on each source, where the default parallelism for streaming jobs is the total number of CPU cores across the cluster. For example, assuming a materialized view has four parallelisms and two sources joining together, each source's throughput will be throttled to `4 * source_rate_limit` records/s.
Alternatively, you may use `risectl` to alter the streaming rate limit of an existent materialized view, where the `` can be found either from the RisingWave Dashboard or `rw_catalog` schema.
From 21fb5e9747263779fde4233f9dbd848460e6704e Mon Sep 17 00:00:00 2001
From: WanYixian
Date: Tue, 26 Nov 2024 15:53:19 +0800
Subject: [PATCH 09/10] transaction related issue
---
mint.json | 3 ++-
processing/sql/transactions.mdx | 26 ++++++++++++++++++++++++++
sql/commands/sql-begin.mdx | 4 ++--
sql/commands/sql-commit.mdx | 4 ++--
sql/commands/sql-start-transaction.mdx | 4 ++--
5 files changed, 34 insertions(+), 7 deletions(-)
create mode 100644 processing/sql/transactions.mdx
diff --git a/mint.json b/mint.json
index f9e8ffc1..bf9781b8 100644
--- a/mint.json
+++ b/mint.json
@@ -429,7 +429,8 @@
"processing/sql/temporal-filters",
"processing/sql/joins",
"processing/sql/time-windows",
- "processing/sql/top-n-by-group"
+ "processing/sql/top-n-by-group",
+ "processing/sql/transactions"
]
},
"processing/deletes-and-updates",
diff --git a/processing/sql/transactions.mdx b/processing/sql/transactions.mdx
new file mode 100644
index 00000000..36df6a63
--- /dev/null
+++ b/processing/sql/transactions.mdx
@@ -0,0 +1,26 @@
+---
+title: "Transactions"
+---
+
+Transactions in databases refer to logical units of work that consist of one or more database operations. A transaction is a sequence of database operations, such as reads (queries) and writes (updates or inserts), that are treated as a single indivisible and consistent unit. The main purpose of transactions is to ensure data integrity and maintain the ACID (Atomicity, Consistency, Isolation, Durability) properties of the database.
+
+## Read-only transactions
+
+RisingWave supports read-only transactions, where all reads within a transaction are executed against the consistent Hummock snapshot. Hummock is the LSM-Tree-based storage engine in RisingWave that is specifically optimized for streaming workloads.
+
+To initiate a transaction, use either the `START TRANSACTION READ ONLY` or `BEGIN READ ONLY` command. Subsequently, you can execute queries to read data from the consistent snapshot. To finalize the transaction and submit the queries as a single unit, use the `COMMIT` command.
+
+Please note that data modifications are not allowed while a transaction is initiated but not yet committed. The statements listed below are not allowed within a transaction:
+
+- All DDL statements (`CREATE`, `ALTER`, and `DROP`)
+- Most of DML statements (`INSERT`, `UPDATE`, and `DELETE`)
+- Statements related to `USER`. This category may overlap with DDL statements.
+- All privilege-related statements, including `GRANT` and `REVOKE`.
+
+## Transactions within a CDC table
+
+When you create a table to ingest CDC streams, you can enable this feature by setting transactional to true in the `WITH` clause of the `CREATE TABLE` statement. Note that this feature is only available if you are using the native [MySQL CDC](/integrations/sources/mysql-cdc) or [PostgreSQL CDC](/integrations/sources/postgresql-cdc) connectors.
+
+For performance considerations, transactions involving changes to more than 4096 rows cannot be guaranteed.
+
+
diff --git a/sql/commands/sql-begin.mdx b/sql/commands/sql-begin.mdx
index 0839115b..bdbd2137 100644
--- a/sql/commands/sql-begin.mdx
+++ b/sql/commands/sql-begin.mdx
@@ -3,7 +3,7 @@ title: "BEGIN"
description: "RisingWave supports read-only transactions. You can use the `BEGIN READ ONLY` command to start a read-only transaction."
---
-For more information about transactions in RisingWave, see [Transactions](/docs/current/transactions/).
+For more information about transactions in RisingWave, see [Transactions](/processing/sql/transactions).
The `BEGIN` command starts the read-write transaction mode, which is not supported yet in RisingWave. For compatibility reasons, this command will still succeed but no transaction is actually started. That is why you need to specify the `READ ONLY` option to start a transaction in read-only mode.
@@ -23,7 +23,7 @@ BEGIN
## Related topics
-
diff --git a/sql/commands/sql-commit.mdx b/sql/commands/sql-commit.mdx
index 831417b5..d17cd774 100644
--- a/sql/commands/sql-commit.mdx
+++ b/sql/commands/sql-commit.mdx
@@ -3,7 +3,7 @@ title: "COMMIT"
description: "RisingWave supports read-only transactions. You can use the `COMMIT` command to commit the current transaction."
---
-For more information about transactions in RisingWave, see [Transactions](/docs/current/transactions/).
+For more information about transactions in RisingWave, see [Transactions](/processing/sql/transactions).
You can start a read-only transaction by using the `BEGIN READ ONLY` or `START TRANSACTION READ ONLY` command.
@@ -28,7 +28,7 @@ COMMIT
title="Transactions"
icon="rotate"
iconType="solid"
- href="/docs/current/transactions/"
+ href="/processing/sql/transactions"
horizontal
/>
From b5024948e8ef8cee2bce0dd5b3ba3589555be2de Mon Sep 17 00:00:00 2001
From: WanYixian
Date: Tue, 26 Nov 2024 16:55:23 +0800
Subject: [PATCH 10/10] save work
---
cloud/develop-overview.mdx | 20 ++++++------
cloud/monitor-materialized-views.mdx | 11 ++-----
cloud/update-database-version.mdx | 2 +-
integrations/sources/hivemq.mdx | 2 +-
python-sdk/intro.mdx | 4 +--
sql/commands/sql-alter-materialized-view.mdx | 2 +-
sql/commands/sql-alter-sink.mdx | 2 +-
sql/commands/sql-create-aggregate.mdx | 4 +--
sql/commands/sql-create-function.mdx | 12 ++++----
sql/commands/sql-create-mv.mdx | 4 ---
sql/commands/sql-create-source.mdx | 4 +--
sql/commands/sql-create-table.mdx | 4 +--
sql/commands/sql-select.mdx | 16 +++++-----
sql/commands/sql-show-columns.mdx | 2 +-
sql/commands/sql-show-connections.mdx | 2 +-
sql/commands/sql-show-databases.mdx | 2 +-
sql/commands/sql-show-functions.mdx | 2 +-
sql/commands/sql-show-indexes.mdx | 2 +-
sql/commands/sql-show-internal-tables.mdx | 2 +-
sql/commands/sql-show-jobs.mdx | 2 +-
sql/commands/sql-show-mv.mdx | 2 +-
sql/commands/sql-show-schemas.mdx | 2 +-
sql/commands/sql-show-sinks.mdx | 2 +-
sql/commands/sql-show-sources.mdx | 2 +-
sql/commands/sql-show-tables.mdx | 2 +-
sql/commands/sql-show-views.mdx | 2 +-
sql/data-types/array-type.mdx | 2 +-
sql/data-types/jsonb.mdx | 4 +--
sql/data-types/map-type.mdx | 2 +-
sql/data-types/overview.mdx | 2 +-
sql/functions/datetime.mdx | 2 +-
sql/functions/overview.mdx | 32 ++++++++++----------
sql/functions/set-returning.mdx | 2 +-
sql/functions/window-functions.mdx | 2 +-
sql/overview.mdx | 14 ++++-----
sql/query-syntax/overview.mdx | 26 ++++++++--------
sql/query-syntax/value-exp.mdx | 4 +--
sql/query-syntax/with-ordinality-clause.mdx | 2 +-
sql/system-catalogs/overview.mdx | 6 ++--
sql/system-catalogs/pg-catalog.mdx | 2 +-
sql/udfs/embedded-python-udfs.mdx | 4 +--
sql/udfs/use-udfs-in-java.mdx | 4 +--
sql/udfs/use-udfs-in-javascript.mdx | 2 +-
sql/udfs/use-udfs-in-rust.mdx | 6 ++--
sql/udfs/user-defined-functions.mdx | 14 ++++-----
troubleshoot/node-failure.mdx | 2 +-
troubleshoot/overview.mdx | 2 +-
troubleshoot/troubleshoot-high-latency.mdx | 2 +-
48 files changed, 120 insertions(+), 131 deletions(-)
diff --git a/cloud/develop-overview.mdx b/cloud/develop-overview.mdx
index 4b3f52b7..6a101be5 100644
--- a/cloud/develop-overview.mdx
+++ b/cloud/develop-overview.mdx
@@ -12,7 +12,7 @@ Developers can refer to the user documentation for RisingWave to develop streami
title="RisingWave user docs"
icon="arrow-right"
iconType="solid"
- href="/docs/current/intro/"
+ href="/cloud/intro"
horizontal
/>
See how RisingWave can integrate with your existing data stack. Vote for your favorite data tools and streaming services to help us prioritize the integration development.
- Connect to and ingest data from external sources such as databases and message brokers. See supported data sources.
+ Connect to and ingest data from external sources such as databases and message brokers. See supported data sources. Stream processed data out of RisingWave to message brokers and databases. See supported data destinations.
@@ -63,20 +63,20 @@ Select the version of the corresponding docs when using the RisingWave user docs
SQL syntax and functionality supported by RisingWave. While RisingWave is wire-compatible with PostgreSQL, it has some unique features and notable differences.
diff --git a/cloud/monitor-materialized-views.mdx b/cloud/monitor-materialized-views.mdx
index 23638bb8..c42e1e70 100644
--- a/cloud/monitor-materialized-views.mdx
+++ b/cloud/monitor-materialized-views.mdx
@@ -5,18 +5,11 @@ mode: wide
---
1. Go to the [project details page](/cloud/check-status-and-metrics/#check-project-details).
+
2. Select the **Workspace** \> **Materialized Views** tab.
-
-
-
3. Click on a materialized view to see the details.
You can view the direct acyclic graph of streaming executors for maintaining the materialized view.
-
-
-
+
4. You can click **< > SQL** to see the query defined in the materialized view (i.e. the `AS` clause).
-
-
-
diff --git a/cloud/update-database-version.mdx b/cloud/update-database-version.mdx
index 5199a3b2..e13aa4cf 100644
--- a/cloud/update-database-version.mdx
+++ b/cloud/update-database-version.mdx
@@ -1,6 +1,6 @@
---
title: "Update RisingWave version"
-description: "When a newer version of RisingWave is available, you can update the database version of your project to the latest. See the [Release Notes of RisingWave](/release-notes/) for feature updates of each version."
+description: "When a newer version of RisingWave is available, you can update the database version of your project to the latest. See the [Release Notes of RisingWave](/changelog/release-notes) for feature updates of each version."
---
## Prerequisite
diff --git a/integrations/sources/hivemq.mdx b/integrations/sources/hivemq.mdx
index 4a377f06..ace0ba6e 100644
--- a/integrations/sources/hivemq.mdx
+++ b/integrations/sources/hivemq.mdx
@@ -63,7 +63,7 @@ For detailed setup, refer to the [HiveMQ Quick Start Guide](https://docs.hivemq.
## Set Up a RisingWave cluster
-To ingest data into RisingWave, you'll need to create a RisingWave cluster. Sign up for a free plan at [RisingWave Cloud](https://cloud.risingwave.com/) to explore its features. You can refer to the [RisingWave Documentation](https://docs.risingwave.com/docs/current/intro/) for comprehensive, step-by-step instructions. For further assistance or to join the community, connect with us on [Slack](https://www.risingwave.com/slack).
+To ingest data into RisingWave, you'll need to create a RisingWave cluster. Sign up for a free plan at [RisingWave Cloud](https://cloud.risingwave.com/) to explore its features. You can refer to the [RisingWave Documentation](/cloud/intro) for comprehensive, step-by-step instructions. For further assistance or to join the community, connect with us on [Slack](https://www.risingwave.com/slack).
![RisingWave Cloud Sign-Up](/images/risingwave_cloud_sign_up.png)
diff --git a/python-sdk/intro.mdx b/python-sdk/intro.mdx
index 34fbd798..836c4178 100644
--- a/python-sdk/intro.mdx
+++ b/python-sdk/intro.mdx
@@ -11,7 +11,7 @@ This SDK provides a simple way to perform ad-hoc queries, subscribe to changes,
[risingwave-py](https://pypi.org/project/risingwave-py/) is a RisingWave Python SDK that provides the following capabilities:
- Interact with RisingWave via Pandas DataFrame.
- Subscribe and process changes from RisingWave tables or materialized views.
-- Run [SQL commands](../sql/overview) supported in RisingWave.
+- Run [SQL commands](/sql/commands/overview) supported in RisingWave.
### Run RisingWave
@@ -96,7 +96,7 @@ rw.execute("""
)""")
```
-For supported sources and the SQL syntax, see [this topic](https://docs.risingwave.com/docs/current/data-ingestion/).
+For supported sources and the SQL syntax, see [this topic](https://docs.risingwave.com/ingestion/overview).
### Query from RisingWave
diff --git a/sql/commands/sql-alter-materialized-view.mdx b/sql/commands/sql-alter-materialized-view.mdx
index 70a297c6..484490e9 100644
--- a/sql/commands/sql-alter-materialized-view.mdx
+++ b/sql/commands/sql-alter-materialized-view.mdx
@@ -6,7 +6,7 @@ description: "The `ALTER MATERIALIZED VIEW` command modifies the metadata of a m
INFO
-To modify the SQL definition of a materialized view, please refer to [Alter a streaming job](/docs/current/alter-streaming/).
+To modify the SQL definition of a materialized view, please refer to [Alter a streaming job](/operate/alter-streaming).
## Syntax
diff --git a/sql/commands/sql-alter-sink.mdx b/sql/commands/sql-alter-sink.mdx
index f128eb82..f042e92a 100644
--- a/sql/commands/sql-alter-sink.mdx
+++ b/sql/commands/sql-alter-sink.mdx
@@ -6,7 +6,7 @@ description: The `ALTER SINK` command modifies the metadata of a sink.
INFO
-To modify the SQL definition of a sink, please refer to [Alter a streaming job](/docs/current/alter-streaming/).
+To modify the SQL definition of a sink, please refer to [Alter a streaming job](/operate/alter-streaming).
## Syntax
diff --git a/sql/commands/sql-create-aggregate.mdx b/sql/commands/sql-create-aggregate.mdx
index 65dd9f5b..ee6847d5 100644
--- a/sql/commands/sql-create-aggregate.mdx
+++ b/sql/commands/sql-create-aggregate.mdx
@@ -72,7 +72,7 @@ def finish(state):
$$;
```
-For more details, see [Use UDFs in Python](/docs/current/udf-python-embedded/).
+For more details, see [Use UDFs in Python](/sql/udfs/embedded-python-udfs).
### JavaScript
@@ -108,7 +108,7 @@ create aggregate weighted_avg(value int, weight int) returns float language java
$$;
```
-For more details, see [Use UDFs in JavaScript](/docs/current/udf-javascript/).
+For more details, see [Use UDFs in JavaScript](/sql/udfs/use-udfs-in-javascript).
### Using UDAFs
diff --git a/sql/commands/sql-create-function.mdx b/sql/commands/sql-create-function.mdx
index b3b670ab..2e8fc472 100644
--- a/sql/commands/sql-create-function.mdx
+++ b/sql/commands/sql-create-function.mdx
@@ -35,14 +35,14 @@ CREATE FUNCTION function_name ( argument_type [, ...] )
### Examples
-Use `CREATE FUNCTION` to declare a UDF defined by Python. For more details, see [Use UDFs in Python](/docs/current/udf-python/).
+Use `CREATE FUNCTION` to declare a UDF defined by Python. For more details, see [Use UDFs in Python](/sql/udfs/use-udfs-in-python).
```sql
CREATE FUNCTION gcd(int, int) RETURNS int
LANGUAGE python AS gcd USING LINK 'http://localhost:8815'; -- If you are running RisingWave using Docker, replace the address with 'http://host.docker.internal:8815'.
```
-Use `CREATE FUNCTION` to declare a UDF defined by Java. For more details, see [Use UDFs in Java](/docs/current/udf-java/).
+Use `CREATE FUNCTION` to declare a UDF defined by Java. For more details, see [Use UDFs in Java](/sql/udfs/use-udfs-in-java).
```sql
CREATE FUNCTION gcd(int, int) RETURNS int
@@ -66,7 +66,7 @@ def gcd(a, b):
$$;
```
-For more details, see [Embedded Python UDFs](/docs/current/udf-python-embedded/).
+For more details, see [Embedded Python UDFs](/sql/udfs/embedded-python-udfs).
```sql Embedded UDFs
# Embedded JavaScript UDF
@@ -80,7 +80,7 @@ create function gcd(a int, b int) returns int language javascript as $$
$$;
```
-For more details, see [Use UDFs in JavaScript](/docs/current/udf-javascript/).
+For more details, see [Use UDFs in JavaScript](/sql/udfs/use-udfs-in-javascript).
```sql Embedded UDFs
# Embedded Rust UDF
@@ -96,13 +96,13 @@ create function gcd(int, int) returns int language rust as $$
$$;
```
-For more details, see [Use UDFs in Rust](/docs/current/udf-rust/).
+For more details, see [Use UDFs in Rust](/sql/udfs/use-udfs-in-rust).
## SQL UDFs
SQL UDFs in RisingWave are designed to expand directly into expressions at the frontend, resulting in minimal performance difference compared to manually calling multiple functions.
-The `CREATE FUNCTION` command is used to define SQL UDFs. You can read our guide on [SQL UDFs](/docs/current/ql-udfs/) for more details.
+The `CREATE FUNCTION` command is used to define SQL UDFs. You can read our guide on [SQL UDFs](/sql/udfs/sql-udfs) for more details.
```sql Syntax of SQL UDFs
CREATE FUNCTION function_name ( argument_type [, ...] )
diff --git a/sql/commands/sql-create-mv.mdx b/sql/commands/sql-create-mv.mdx
index a2b4b0d4..dfe18dfa 100644
--- a/sql/commands/sql-create-mv.mdx
+++ b/sql/commands/sql-create-mv.mdx
@@ -29,10 +29,6 @@ Names and unquoted identifiers are case-insensitive. Therefore, you must double-
The `ORDER BY` clause in the `CREATE MATERIALIZED VIEW` statement is allowed but not considered as part of the definition of the materialized view. It's only used in the initial creation of the materialized view and not during refreshes.
-## Examples
-
-Refer to this [tutorial](/docs/current/server-performance-anomaly-detection/) for examples of creating materialized views based on external sources or existing materialized views.
-
## See also
diff --git a/sql/commands/sql-create-source.mdx b/sql/commands/sql-create-source.mdx
index ec1ff3ef..e033b403 100644
--- a/sql/commands/sql-create-source.mdx
+++ b/sql/commands/sql-create-source.mdx
@@ -5,7 +5,7 @@ description: "A source is a resource that RisingWave can read data from. You can
For the full list of the sources we support, see [Supported sources](/ingestion/supported-sources-and-formats#supported-sources).
-If you choose to persist the data from the source in RisingWave, use the [CREATE TABLE](/sql/commands/sql-create-table) command with connector settings. Or if you need to create the primary key (which is required by some formats like FORMAT UPSERT/DEBEZIUM), you have to use `CREATE TABLE` too. For more details about the differences between sources and tables, see [here](/docs/current/data-ingestion/#table-with-connectors).
+If you choose to persist the data from the source in RisingWave, use the [CREATE TABLE](/sql/commands/sql-create-table) command with connector settings. Or if you need to create the primary key (which is required by some formats like FORMAT UPSERT/DEBEZIUM), you have to use `CREATE TABLE` too. For more details about the differences between sources and tables, see [here](/ingestion/overview#table-with-connectors).
Regardless of whether the data is persisted in RisingWave, you can create materialized views to perform analysis or data transformations.
@@ -175,7 +175,7 @@ Shared sources do not support `ALTER SOURCE`. Use non-shared sources if you requ
title="Overview of data ingestion"
icon="database"
iconType="solid"
- href="/docs/current/data-ingestion/"
+ href="/ingestion/overview"
/>
-If you choose not to persist the data from the source in RisingWave, use [CREATE SOURCE](/sql/commands/sql-create-source) instead. For more details about the differences between sources and tables, see [here](/docs/current/data-ingestion/#table-with-connectors).
+If you choose not to persist the data from the source in RisingWave, use [CREATE SOURCE](/sql/commands/sql-create-source) instead. For more details about the differences between sources and tables, see [here](/ingestion/overview#table-with-connectors).
## Syntax
@@ -77,7 +77,7 @@ CREATE TABLE [ IF NOT EXISTS ] table_name (
| APPEND ONLY | When this option is specified, the table will be created as an append-only table. An append-only table cannot have primary keys. UPDATE and DELETE statements are not valid for append-only tables. Note that append-only tables is in the [public preview stage](/changelog/product-lifecycle#features-in-the-public-preview-stage). |
| ON CONFLICT | Specify the alternative action when the newly inserted record brings a violation of PRIMARY KEY constraint on the table. See [PK conflict behavior](#pk-conflict-behavior) below for more information. |
| **INCLUDE** clause | Extract fields not included in the payload as separate columns. For more details on its usage, see [INCLUDE clause](/ingestion/ingest-additional-fields-with-include-clause). |
-| **WITH** clause | Specify the connector settings here if trying to store all the source data. See the [Data ingestion](/docs/current/data-ingestion/) page for the full list of supported source as well as links to specific connector pages detailing the syntax for each source. |
+| **WITH** clause | Specify the connector settings here if trying to store all the source data. See the [Data ingestion](/ingestion/overview) page for the full list of supported source as well as links to specific connector pages detailing the syntax for each source. |
| **FORMAT** and **ENCODE** options | Specify the data format and the encoding format of the source data. To learn about the supported data formats, see [Data formats](/ingestion/supported-sources-and-formats#supported-formats). |
diff --git a/sql/commands/sql-select.mdx b/sql/commands/sql-select.mdx
index fc84423c..faadd56f 100644
--- a/sql/commands/sql-select.mdx
+++ b/sql/commands/sql-select.mdx
@@ -32,14 +32,14 @@ Where `from_item` can be:
| Parameter or clause | Description |
| :--------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **WITH** clause | Provides a way to write supplemental statements for a larger query. For more information, see [WITH clause](/sql/query-syntax/with-clause). |
-| **DISTINCT** clause | This clause eliminates duplicate rows from the result. SELECT DISTINCT eliminates duplicate rows based on **all selected columns**. SELECT DISTINCT ON allows you to specify expressions or columns and returns only the first row for each unique combination. It requires the use of the ORDER BY clause to determine the first row, and the DISTINCT ON expression must match the leftmost ORDER BY expression. The ORDER BY clause will normally contain additional expressions that determine the desired precedence of rows within each DISTINCT ON group. In this case, this expression can be an alternative with group [topN](/docs/current/sql-pattern-topn/) when "N=1". See [examples of this clause](#distinct-clause) below to know more about it. |
+| **DISTINCT** clause | This clause eliminates duplicate rows from the result. SELECT DISTINCT eliminates duplicate rows based on **all selected columns**. SELECT DISTINCT ON allows you to specify expressions or columns and returns only the first row for each unique combination. It requires the use of the ORDER BY clause to determine the first row, and the DISTINCT ON expression must match the leftmost ORDER BY expression. The ORDER BY clause will normally contain additional expressions that determine the desired precedence of rows within each DISTINCT ON group. In this case, this expression can be an alternative with group [topN](/processing/sql/top-n-by-group) when "N=1". See [examples of this clause](#distinct-clause) below to know more about it. |
| **EXCEPT** clause | Exclude one or more columns from the result set. By specifying _except\_column_, the query will return all columns in the result set except those specified. |
| _expression_ | A column or an expression. |
-| **VALUES** clause | This clause generates one or more rows of data as a table expression. For details, see [VALUES clause](/docs/current/sql/query-syntax/query-syntax-values-clause/). |
+| **VALUES** clause | This clause generates one or more rows of data as a table expression. For details, see [VALUES clause](/sql/query-syntax/values-clause). |
| _alias_ | A temporary alternative name for a table or materialized view in a query. |
| _table\_name_ | A table or materialized view. |
| _grouping\_expression_ | Values can be:Input column namesInput column expressions without subqueries or correlated columns |
-| **ORDER BY** clause | By default, sorting is in ascending (ASC) order, with NULL values treated as the largest. For more information, see [ORDER BY clause](/docs/current/query-syntax-order-by-clause/). |
+| **ORDER BY** clause | By default, sorting is in ascending (ASC) order, with NULL values treated as the largest. For more information, see [ORDER BY clause](/sql/query-syntax/order-by-clause). |
| _sort\_expression_ | Values can be:Output column namesOutput column ordinal numbersHidden select expressions |
| _count\_number_ | The number of results you want to get. |
| **OFFSET** clause | The OFFSET clause can only be used with the LIMIT and ORDER BY clauses. |
@@ -48,11 +48,11 @@ Where `from_item` can be:
| _join\_condition_ | Conditions for the ON clause that must be met before the two from\_items can be joined. |
| _window\_type_ | The type of the time window function. Possible values are HOP and TUMBLE. |
| _interval\_expression_ | The interval expression, in the format of INTERVAL '\'. For example: INTERVAL '2 MINUTES'. The standard SQL format, which places time units outside of quotation marks (for example, INTERVAL '2' MINUTE), is also supported. |
-| **FROM** clause | Specifies the source of the data on which the query should operate. For more information, see [FROM clause](/docs/current/query-syntax-from-clause/). |
-| **GROUP BY** clause | Groups rows in a table with identical data, thus eliminating redundancy in the output and aggregates that apply to these groups. For more information, see [GROUP BY clause](/docs/current/query-syntax-group-by-clause/). |
-| **HAVING** clause | Eliminates group rows that do not satisfy a given condition. For more information, see [HAVING clause](/docs/current/query-syntax-having-clause/). |
-| **LIMIT** clause | When the ORDER BY clause is not present, the LIMIT clause cannot be used as part of a materialized view. For more information, see [LIMIT clause](/docs/current/query-syntax-limit-clause/). |
-| **WHERE** clause | Specifies any conditions or filters to apply to your data. For more information, see [WHERE clause](/docs/current/query-syntax-where-clause/). |
+| **FROM** clause | Specifies the source of the data on which the query should operate. For more information, see [FROM clause](/sql/query-syntax/from-clause). |
+| **GROUP BY** clause | Groups rows in a table with identical data, thus eliminating redundancy in the output and aggregates that apply to these groups. For more information, see [GROUP BY clause](/sql/query-syntax/group-by-clause). |
+| **HAVING** clause | Eliminates group rows that do not satisfy a given condition. For more information, see [HAVING clause](/sql/query-syntax/having-clause). |
+| **LIMIT** clause | When the ORDER BY clause is not present, the LIMIT clause cannot be used as part of a materialized view. For more information, see [LIMIT clause](/sql/query-syntax/limit-clause). |
+| **WHERE** clause | Specifies any conditions or filters to apply to your data. For more information, see [WHERE clause](/sql/query-syntax/where-clause). |
## Examples
diff --git a/sql/commands/sql-show-columns.mdx b/sql/commands/sql-show-columns.mdx
index 7a230897..4658a8fb 100644
--- a/sql/commands/sql-show-columns.mdx
+++ b/sql/commands/sql-show-columns.mdx
@@ -15,7 +15,7 @@ SHOW COLUMNS FROM relation_name [ LIKE_expression ];
| Parameter or clause | Description |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _relation\_name_ | The name of the table, source, sink, view, or materialized view from which the columns will be listed. |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Examples
diff --git a/sql/commands/sql-show-connections.mdx b/sql/commands/sql-show-connections.mdx
index 5cdebe6f..4d5ab353 100644
--- a/sql/commands/sql-show-connections.mdx
+++ b/sql/commands/sql-show-connections.mdx
@@ -13,7 +13,7 @@ SHOW CONNECTIONS [ LIKE_expression ];
| Parameter or clause | Description |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/commands/sql-show-databases.mdx b/sql/commands/sql-show-databases.mdx
index e2cf5efc..f746d3ca 100644
--- a/sql/commands/sql-show-databases.mdx
+++ b/sql/commands/sql-show-databases.mdx
@@ -13,7 +13,7 @@ SHOW DATABASES [ LIKE_expression ];
| Parameter or clause | Description |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/commands/sql-show-functions.mdx b/sql/commands/sql-show-functions.mdx
index 51bae471..1b524447 100644
--- a/sql/commands/sql-show-functions.mdx
+++ b/sql/commands/sql-show-functions.mdx
@@ -13,7 +13,7 @@ SHOW FUNCTIONS [ LIKE_expression ];
| Parameter or clause | Description |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/commands/sql-show-indexes.mdx b/sql/commands/sql-show-indexes.mdx
index b66ae5ce..6dee1f0a 100644
--- a/sql/commands/sql-show-indexes.mdx
+++ b/sql/commands/sql-show-indexes.mdx
@@ -14,7 +14,7 @@ SHOW INDEXES FROM table_name [ LIKE_expression ];
| Parameter | Description |
| :--------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _table\_name_ | The table from which indexes will be displayed. |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example[](#example "Direct link to Example")
diff --git a/sql/commands/sql-show-internal-tables.mdx b/sql/commands/sql-show-internal-tables.mdx
index b3df10d8..da7455a8 100644
--- a/sql/commands/sql-show-internal-tables.mdx
+++ b/sql/commands/sql-show-internal-tables.mdx
@@ -16,7 +16,7 @@ SHOW INTERNAL TABLES [ FROM schema_name ] [ LIKE_expression ];
| Parameter | Description |
| :--------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _schema\_name_ | The schema in which tables will be listed. If not given, tables from the default schema, public, will be listed. |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/commands/sql-show-jobs.mdx b/sql/commands/sql-show-jobs.mdx
index 6780f96c..d10ca7a4 100644
--- a/sql/commands/sql-show-jobs.mdx
+++ b/sql/commands/sql-show-jobs.mdx
@@ -15,7 +15,7 @@ SHOW JOBS [ LIKE_expression ];
| Parameter | Description |
| :--------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
```sql
diff --git a/sql/commands/sql-show-mv.mdx b/sql/commands/sql-show-mv.mdx
index 165b701e..3ed5c957 100644
--- a/sql/commands/sql-show-mv.mdx
+++ b/sql/commands/sql-show-mv.mdx
@@ -14,7 +14,7 @@ SHOW MATERIALIZED VIEWS [ FROM schema_name ] [ LIKE_expression ];
| Parameter | Description |
| :--------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _schema\_name_ | The schema in which the materialized views will be listed. If not given, materialized views from the default schema, public, will be listed |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/commands/sql-show-schemas.mdx b/sql/commands/sql-show-schemas.mdx
index 8ca5182d..d3750da3 100644
--- a/sql/commands/sql-show-schemas.mdx
+++ b/sql/commands/sql-show-schemas.mdx
@@ -14,7 +14,7 @@ SHOW SCHEMAS [ LIKE_expression ];
| Parameter or clause | Description |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/commands/sql-show-sinks.mdx b/sql/commands/sql-show-sinks.mdx
index 747b1db0..a4aa0bc9 100644
--- a/sql/commands/sql-show-sinks.mdx
+++ b/sql/commands/sql-show-sinks.mdx
@@ -14,7 +14,7 @@ SHOW SINKS [ FROM schema_name ] [ LIKE_expression ];
| Clause | Description |
| :------------------ | :-------------------------- |
| schema\_name |The schema of the sinks to be listed.|
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/commands/sql-show-sources.mdx b/sql/commands/sql-show-sources.mdx
index f425a44f..bbcf4fff 100644
--- a/sql/commands/sql-show-sources.mdx
+++ b/sql/commands/sql-show-sources.mdx
@@ -14,7 +14,7 @@ SHOW SOURCES [ FROM schema_name ] [ LIKE_expression ];
| Parameter or clause | Description |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _schema\_name_ | The schema of the sources to be listed. The default schema is public. |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Examples
diff --git a/sql/commands/sql-show-tables.mdx b/sql/commands/sql-show-tables.mdx
index 43e8c561..dafcfda1 100644
--- a/sql/commands/sql-show-tables.mdx
+++ b/sql/commands/sql-show-tables.mdx
@@ -14,7 +14,7 @@ SHOW TABLES [ FROM schema_name ] [ LIKE_expression ];
| Parameter or clause | Description |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _schema\_name_ | The schema in which tables will be listed. If not given, tables from the default schema, public, will be listed. |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/commands/sql-show-views.mdx b/sql/commands/sql-show-views.mdx
index 77a7035b..d23c60c9 100644
--- a/sql/commands/sql-show-views.mdx
+++ b/sql/commands/sql-show-views.mdx
@@ -14,7 +14,7 @@ SHOW VIEWS [ FROM schema_name ] [ LIKE_expression ];
| Parameter or clause | Description |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| _schema\_name_ | The schema from which existing views will be listed. If not given, views from the default schema, "public", will be listed. |
-| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/docs/current/sql-function-string/#like-pattern-matching-expressions). |
+| LIKE\_expression | Filters the output based on names by applying pattern matching. See details in [LIKE pattern matching expressions](/sql/functions/string#like-pattern-matching-expressions). |
## Example
diff --git a/sql/data-types/array-type.mdx b/sql/data-types/array-type.mdx
index 94eebcaa..cff8789b 100644
--- a/sql/data-types/array-type.mdx
+++ b/sql/data-types/array-type.mdx
@@ -145,4 +145,4 @@ SELECT unnest(array[1,2,3,4]);
## Array functions and operators
-For the full list of array functions and operators, see [Array functions and operators](/docs/current/sql-function-array/).
+For the full list of array functions and operators, see [Array functions and operators](/sql/functions/array).
diff --git a/sql/data-types/jsonb.mdx b/sql/data-types/jsonb.mdx
index 74b5892c..bcf66feb 100644
--- a/sql/data-types/jsonb.mdx
+++ b/sql/data-types/jsonb.mdx
@@ -74,7 +74,7 @@ VALUES
To retrieve data from a `JSONB` column, use the `->` or `->>` operators to access the JSON object's properties. The `->` operator returns a `jsonb` value, while the `->>` operator returns a varchar value.
-For details about the JSON operators, see [JSON operators](/docs/current/sql-function-json/#json-operators).
+For details about the JSON operators, see [JSON operators](/sql/functions/json#json-operators).
`JSONB` data types can be cast to other data types such as bool, smallint, int, bigint, decimal, real, and double precision. Casting is performed using the `::data-type` cast notation, such as `::int` for casting to an integer data type.
@@ -106,4 +106,4 @@ The output shows that the `brand_bad` column contains additional double quotes.
## JSONB functions and operators
-For the full list of JSONB functions and operators, see [JSON functions and operators](/docs/current/sql-function-json/).
+For the full list of JSONB functions and operators, see [JSON functions and operators](/sql/functions/json).
diff --git a/sql/data-types/map-type.mdx b/sql/data-types/map-type.mdx
index 9cec42cb..2adce2e8 100644
--- a/sql/data-types/map-type.mdx
+++ b/sql/data-types/map-type.mdx
@@ -63,4 +63,4 @@ SELECT map_insert(MAP {'key1': 1, 'key2': 2, 'key3': 3}, 'key2', 4);
## Map functions and operators
-For the full list of map functions and operators, see [Map functions and operators](/docs/current/sql-function-map/).
+For the full list of map functions and operators, see [Map functions and operators](/sql/functions/map).
diff --git a/sql/data-types/overview.mdx b/sql/data-types/overview.mdx
index 535f6a6b..862b4fc0 100644
--- a/sql/data-types/overview.mdx
+++ b/sql/data-types/overview.mdx
@@ -32,5 +32,5 @@ Scientific notation (e.g., 1e6, 1.25e5, and 1e-4) is supported in SELECT and INS
## Casting
-For details about data type casting, see [Casting](/docs/current/data-type-casting/).
+For details about data type casting, see [Casting](/sql/data-types/casting).
diff --git a/sql/functions/datetime.mdx b/sql/functions/datetime.mdx
index 4f7f021e..e10dfe8d 100644
--- a/sql/functions/datetime.mdx
+++ b/sql/functions/datetime.mdx
@@ -186,7 +186,7 @@ make_timestamp(2024, 1, 31, 1, 45, 30.2) → 2024-01-31 01:45:30.200
### `now`
-Returns the current date and time. For streaming queries, `now()` can only be used with WHERE, HAVING, and ON clauses. For more information, see [Temporal filters](/docs/current/sql-pattern-temporal-filters/). This constraint does not apply to batch queries.
+Returns the current date and time. For streaming queries, `now()` can only be used with WHERE, HAVING, and ON clauses. For more information, see [Temporal filters](/processing/sql/temporal-filters). This constraint does not apply to batch queries.
```bash
now() → timestamptz
diff --git a/sql/functions/overview.mdx b/sql/functions/overview.mdx
index 567b2a3a..e767070c 100644
--- a/sql/functions/overview.mdx
+++ b/sql/functions/overview.mdx
@@ -6,20 +6,20 @@ mode: wide
---
- Operators for logical operations.
- Functions and operators for comparing values.
- Functions and operators for mathematical operations.
- Functions and operators for string manipulation.
- Functions and operators for binary string operations.
- Functions and operators for date and time operations.
- Functions for cryptographic operations.
- Functions and operators for JSON data.
- Functions and operators for array operations.
- Functions and operators for map data structures.
- Expressions for conditional logic, such as CASE statements.
- Functions that compute a single result from a set of input values.
- Functions that operate on a set of rows related to the current row.
- Functions that return a set of rows, such as generate_series().
- Functions for system administration tasks in RisingWave and PostgreSQL.
- Functions to retrieve system information about database, schema, user, role, session, etc.
+ Operators for logical operations.
+ Functions and operators for comparing values.
+ Functions and operators for mathematical operations.
+ Functions and operators for string manipulation.
+ Functions and operators for binary string operations.
+ Functions and operators for date and time operations.
+ Functions for cryptographic operations.
+ Functions and operators for JSON data.
+ Functions and operators for array operations.
+ Functions and operators for map data structures.
+ Expressions for conditional logic, such as CASE statements.
+ Functions that compute a single result from a set of input values.
+ Functions that operate on a set of rows related to the current row.
+ Functions that return a set of rows, such as generate_series().
+ Functions for system administration tasks in RisingWave and PostgreSQL.
+ Functions to retrieve system information about database, schema, user, role, session, etc.
diff --git a/sql/functions/set-returning.mdx b/sql/functions/set-returning.mdx
index a9439ab6..82047afd 100644
--- a/sql/functions/set-returning.mdx
+++ b/sql/functions/set-returning.mdx
@@ -177,7 +177,7 @@ The result looks like this:
## \_pg\_expandarray()
-The `_pg_expandarray` function takes an array as input and expands it into a set of rows, providing values and their corresponding indices within the array. Ensure that [information\_schema](/docs/current/information-schema/) is in the search path to access the `_pg_expandarray` function.
+The `_pg_expandarray` function takes an array as input and expands it into a set of rows, providing values and their corresponding indices within the array. Ensure that [information\_schema](/sql/system-catalogs/information-schema) is in the search path to access the `_pg_expandarray` function.
Example:
diff --git a/sql/functions/window-functions.mdx b/sql/functions/window-functions.mdx
index 81df1ad2..c517e224 100644
--- a/sql/functions/window-functions.mdx
+++ b/sql/functions/window-functions.mdx
@@ -20,7 +20,7 @@ row_number() → integer
```
-We recommend using `row_number()` only for top-N pattern queries. For details about this pattern, see [Top-N by group](/docs/current/sql-pattern-topn/).
+We recommend using `row_number()` only for top-N pattern queries. For details about this pattern, see [Top-N by group](/processing/sql/top-n-by-group).
### `rank()`
diff --git a/sql/overview.mdx b/sql/overview.mdx
index 60f65f36..22b4a62b 100644
--- a/sql/overview.mdx
+++ b/sql/overview.mdx
@@ -4,25 +4,25 @@ mod: wide
sidebarTitle: Overview
---
-
+
84 items
-
+
13 items
-
+
8 items
-
+
16 items
-
+
Naming restrictions
-
+
3 items
-
+
RisingWave supported commands
diff --git a/sql/query-syntax/overview.mdx b/sql/query-syntax/overview.mdx
index 2900c2c3..4faae76d 100644
--- a/sql/query-syntax/overview.mdx
+++ b/sql/query-syntax/overview.mdx
@@ -6,17 +6,17 @@ description: Syntax and usage of common query clauses.
- Introduction of various types of literals.
- Specify the value returned by a particular column.
- Create a generated column when creating a table or source.
- Specify the source of the data on which the query should operate.
- Specify any conditions or filters to apply to your data.
- Group rows in a table with identical data
- Eliminate group rows that do not satisfy a given condition.
- Sort the result set of a query in ascending or descending order.
- Restrict the number of rows fetched.
- Generate one or more rows of data as a table expression
- The results of two queries can be combined using the set operations UNION and INTERSECT.
- Write supplemental statements for a larger query.
- Used with set functions in the FROM clause of a query.
+ Introduction of various types of literals.
+ Specify the value returned by a particular column.
+ Create a generated column when creating a table or source.
+ Specify the source of the data on which the query should operate.
+ Specify any conditions or filters to apply to your data.
+ Group rows in a table with identical data
+ Eliminate group rows that do not satisfy a given condition.
+ Sort the result set of a query in ascending or descending order.
+ Restrict the number of rows fetched.
+ Generate one or more rows of data as a table expression
+ The results of two queries can be combined using the set operations UNION and INTERSECT.
+ Write supplemental statements for a larger query.
+ Used with set functions in the FROM clause of a query.
diff --git a/sql/query-syntax/value-exp.mdx b/sql/query-syntax/value-exp.mdx
index 0254b550..d2be4b0a 100644
--- a/sql/query-syntax/value-exp.mdx
+++ b/sql/query-syntax/value-exp.mdx
@@ -28,7 +28,7 @@ The `DISTINCT` keyword, which is only available in the second form, cannot be us
AGGREGATE:function_name
```
-where the `AGGREGATE:` prefix converts a [builtin array function](/docs/current/sql-function-array/) (e.g. `array_sum`) or an [user-defined function](/sql/udfs/user-defined-functions), to an aggregate function. The function being converted must accept exactly one argument of an [array type](/sql/data-types/array-type). After the conversion, a function like `foo ( array of T ) -> U` becomes an aggregate function like `AGGREGATE:foo ( T ) -> U`.
+where the `AGGREGATE:` prefix converts a [builtin array function](/sql/functions/array) (e.g. `array_sum`) or an [user-defined function](/sql/udfs/user-defined-functions), to an aggregate function. The function being converted must accept exactly one argument of an [array type](/sql/data-types/array-type). After the conversion, a function like `foo ( array of T ) -> U` becomes an aggregate function like `AGGREGATE:foo ( T ) -> U`.
## Window function calls
@@ -57,7 +57,7 @@ For ranking window functions like `row_number`, `rank` and `dense_rank`, `ORDER
When operating in the [Emit on window close](/processing/emit-on-window-close) mode for a streaming query, `ORDER BY` clause is required for all window functions. Please ensure that you specify exactly one column in the `ORDER BY` clause. This column, generally a timestamp column, must have a watermark defined for it. It's important to note that when using the timestamp column from this streaming query in another streaming query, the watermark information associated with the column is not retained.
-`window_function_name` is one of the window functions listed on [Window functions](/docs/current/sql-function-window-functions/).
+`window_function_name` is one of the window functions listed on [Window functions](/sql/functions/window-functions).
`frame_clause` can be one of:
diff --git a/sql/query-syntax/with-ordinality-clause.mdx b/sql/query-syntax/with-ordinality-clause.mdx
index d8182ed0..23bc1558 100644
--- a/sql/query-syntax/with-ordinality-clause.mdx
+++ b/sql/query-syntax/with-ordinality-clause.mdx
@@ -5,7 +5,7 @@ description: "The `WITH ORDINALITY` clause can be used with set functions in the
An additional integer column will be added to the table, which numbers the rows returned by the function, starting from 1\. By default, the generated column is named `ordinality`.
-See [Set-returning functions](/docs/current/sql-function-set-returning/) for a list of supported set functions.
+See [Set-returning functions](/sql/functions/set-returning) for a list of supported set functions.
Here is a simple example of how the `WITH ORDINALITY` clause works.
diff --git a/sql/system-catalogs/overview.mdx b/sql/system-catalogs/overview.mdx
index c6399ae8..7524bbf9 100644
--- a/sql/system-catalogs/overview.mdx
+++ b/sql/system-catalogs/overview.mdx
@@ -6,7 +6,7 @@ mode: wide
---
- A set of views containing information about objects defined in the current database.
- System catalogs and views of PostgreSQL supported by RisingWave.
- System tables and views providing metadata about relations and cluster job status in RisingWave.
+ A set of views containing information about objects defined in the current database.
+ System catalogs and views of PostgreSQL supported by RisingWave.
+ System tables and views providing metadata about relations and cluster job status in RisingWave.
diff --git a/sql/system-catalogs/pg-catalog.mdx b/sql/system-catalogs/pg-catalog.mdx
index 61b03b55..33436d57 100644
--- a/sql/system-catalogs/pg-catalog.mdx
+++ b/sql/system-catalogs/pg-catalog.mdx
@@ -3,7 +3,7 @@ title: "PostgreSQL catalogs"
description: "RisingWave supports these system catalogs and views of PostgreSQL."
---
-For information about RisingWave and PostgreSQL system functions, see [System administration functions](/sql/functions/sys-admin) and [System information functions](/docs/current/sql-function-sys-info/).
+For information about RisingWave and PostgreSQL system functions, see [System administration functions](/sql/functions/sys-admin) and [System information functions](/sql/functions/sys-info).
RisingWave does not fully support all PostgreSQL system catalog columns.
diff --git a/sql/udfs/embedded-python-udfs.mdx b/sql/udfs/embedded-python-udfs.mdx
index f9718f36..89e8f793 100644
--- a/sql/udfs/embedded-python-udfs.mdx
+++ b/sql/udfs/embedded-python-udfs.mdx
@@ -4,7 +4,7 @@ description: "You can define embedded Python UDFs, which will be executed in an
sidebarTitle: Python
---
-Currently, embedded Python UDFs only support pure computational logic and do not support accessing external networks or file systems. If you need to access external services or resources, you can use [Python UDFs as external functions](/docs/current/udf-python/).
+Currently, embedded Python UDFs only support pure computational logic and do not support accessing external networks or file systems. If you need to access external services or resources, you can use [Python UDFs as external functions](/sql/udfs/use-udfs-in-python).
## Define your functions
You can create Python UDFs using the `CREATE FUNCTION` command. Refer to the syntax below:
@@ -29,7 +29,7 @@ $$;
The Python code must contain a function that has the same name as declared in the `CREATE FUNCTION` statement. The function's parameters and return type must match those declared in the `CREATE FUNCTION` statement, otherwise, an error may occur when the function is called.
-See the correspondence between SQL types and Python types in the [Data type mapping](/docs/current/udf-python-embedded/#data-type-mapping).
+See the correspondence between SQL types and Python types in the [Data type mapping](/sql/udfs/embedded-python-udfs#data-type-mapping).
Due to the nature of Python, the correctness of the source code cannot be verified when creating a function. It is recommended to make sure your implementation is correct through batch query before using UDFs in materialized views. If an error occurs when executing UDF in materialized views, all output results will be NULL.
diff --git a/sql/udfs/use-udfs-in-java.mdx b/sql/udfs/use-udfs-in-java.mdx
index 30dbcdd5..e8c3d0bc 100644
--- a/sql/udfs/use-udfs-in-java.mdx
+++ b/sql/udfs/use-udfs-in-java.mdx
@@ -77,7 +77,7 @@ A user-defined scalar function maps zero, one, or multiple scalar values to a ne
In order to define a scalar function, you have to create a new class that implements the `ScalarFunction`interface in `com.risingwave.functions` and implement exactly one evaluation method named `eval(...)`. This method must be declared public and non-static.
-Any data type listed in [Data type mapping](/docs/current/udf-java/#data-type-mapping) can be used as a parameter or return type of an evaluation method.
+Any data type listed in [Data type mapping](/sql/udfs/use-udfs-in-java#data-type-mapping) can be used as a parameter or return type of an evaluation method.
Here's an example of a scalar function that calculates the greatest common divisor (GCD) of two integers:
@@ -110,7 +110,7 @@ A user-defined table function maps zero, one, or multiple scalar values to one o
In order to define a table function, you have to create a new class that implements the `TableFunction`interface in `com.risingwave.functions` and implement exactly one evaluation method named `eval(...)`. This method must be declared public and non-static.
-The return type must be an `Iterator` of any data type listed in [Data type mapping](/docs/current/udf-java/#data-type-mapping).
+The return type must be an `Iterator` of any data type listed in [Data type mapping](/sql/udfs/use-udfs-in-java#data-type-mapping).
Similar to scalar functions, input and output data types are automatically extracted using reflection. This includes the generic argument T of the return value for determining an output data type.
diff --git a/sql/udfs/use-udfs-in-javascript.mdx b/sql/udfs/use-udfs-in-javascript.mdx
index 8ab0f6d2..b8e3e956 100644
--- a/sql/udfs/use-udfs-in-javascript.mdx
+++ b/sql/udfs/use-udfs-in-javascript.mdx
@@ -33,7 +33,7 @@ CREATE FUNCTION gcd(a int, b int) RETURNS int LANGUAGE javascript AS $$
$$;
```
-See the correspondence between SQL types and JavaScript types in the [Data type mapping](/docs/current/udf-javascript/#data-type-mapping). You need to ensure that the type of the return value is either `null` or consistent with the type in the `RETURNS` clause.
+See the correspondence between SQL types and JavaScript types in the [Data type mapping](/sql/udfs/use-udfs-in-javascript#data-type-mapping). You need to ensure that the type of the return value is either `null` or consistent with the type in the `RETURNS` clause.
If the function you defined returns a table, you need to use the `yield` statement to return the data of each row. For example:
diff --git a/sql/udfs/use-udfs-in-rust.mdx b/sql/udfs/use-udfs-in-rust.mdx
index e3e6b461..b856d0be 100644
--- a/sql/udfs/use-udfs-in-rust.mdx
+++ b/sql/udfs/use-udfs-in-rust.mdx
@@ -30,7 +30,7 @@ CREATE FUNCTION gcd(int, int) RETURNS int LANGUAGE rust AS $$
$$;
```
-The Rust code must start with a `fn` function, and the function's name, parameters, and return type must match those declared in the `CREATE FUNCTION` statement. Refer to the [Data type mapping](/docs/current/udf-rust/#data-type-mapping) for details on the correspondence between SQL types and Rust types.
+The Rust code must start with a `fn` function, and the function's name, parameters, and return type must match those declared in the `CREATE FUNCTION` statement. Refer to the [Data type mapping](/sql/udfs/use-udfs-in-rust#data-type-mapping) for details on the correspondence between SQL types and Rust types.
For table functions, your function must return an `impl Iterator` type, where `T` is the type of the returned elements. For example, to generate a sequence from 0 to n-1:
@@ -61,7 +61,7 @@ $$;
First, define a structure using `struct` and annotate it with `#[derive(StructType)]`. Its fields must match the struct type declared in the `CREATE FUNCTION` statement. Then, define the function and annotate it with the `#[function("...")]` macro. The string in the macro represents the SQL signature of the function, with the tail return type being `struct StructName`. For the specific syntax, see [arrow-udf](https://docs.rs/arrow-udf/0.2.1/arrow%5Fudf/attr.function.html).
-Currently, in `CREATE FUNCTION` statement, Rust code can only use libraries from the standard library, `chrono`, `rust_decimal`, `serde_json`, and does not support other third-party libraries. If you wish to use other libraries, you may consider [compiling WebAssembly modules manually](/docs/current/udf-rust/#alternative-manually-build-your-functions-into-a-webassembly-module).
+Currently, in `CREATE FUNCTION` statement, Rust code can only use libraries from the standard library, `chrono`, `rust_decimal`, `serde_json`, and does not support other third-party libraries. If you wish to use other libraries, you may consider [compiling WebAssembly modules manually](/sql/udfs/use-udfs-in-rust#alternative-manually-build-your-functions-into-a-webassembly-module).
## Use your functions in RisingWave
@@ -127,7 +127,7 @@ fn series(n: i32) -> impl Iterator {
You can find more usages in these [functions](https://docs.rs/arrow%5Fudf/0.2.1/arrow%5Fudf/attr.function.html) and more examples in these [tests](https://github.com/risingwavelabs/arrow-udf/blob/main/arrow-udf/tests/tests.rs).
-See the correspondence between SQL types and Rust types in the [Data type mapping](/docs/current/udf-rust/#data-type-mapping).
+See the correspondence between SQL types and Rust types in the [Data type mapping](/sql/udfs/use-udfs-in-rust#data-type-mapping).
### 3\. Build the project
diff --git a/sql/udfs/user-defined-functions.mdx b/sql/udfs/user-defined-functions.mdx
index 1af3f52d..a3a5ceff 100644
--- a/sql/udfs/user-defined-functions.mdx
+++ b/sql/udfs/user-defined-functions.mdx
@@ -26,8 +26,8 @@ You can create all types of UDFs mentioned above using the [CREATE FUNCTION](/sq
RisingWave supports creating external UDFs with the following programming languages:
-
-
+
+
### Embedded UDFs
@@ -35,9 +35,9 @@ RisingWave supports creating external UDFs with the following programming langua
RisingWave supports creating embedded UDFs with the following programming languages:
-
-
-
+
+
+
### SQL UDFs
@@ -45,7 +45,7 @@ RisingWave supports creating embedded UDFs with the following programming langua
For details about how to create a SQL UDF and its use cases, see:
-
+
## Other ways to categorize UDFs
@@ -57,7 +57,7 @@ The other two dimensions are:
* The input and output of functions.
For this dimension, since common SQL functions include scalar functions, table functions, aggregate functions, and window functions, UDFS can be classified as user-defined scalar functions (abbreviated as UDFs), user-defined table functions (UDTFs), user-defined aggregate functions (UDAFs), and user-defined window functions (UDWFs).
RisingWave supports UDFs, UDTFs and UDAFs, covering most practical needs. You can find their guides in our documentation too.
-For example, for UDAFs, you can use the [CREATE AGGREGATE](/sql/commands/sql-create-aggregate) command to create functions. Meanwhile, we also offer dedicated sections for creating UDAFs in [Embedded Python UDFs](/docs/current/udf-python-embedded/#define-your-aggregate-functions) and [Embedded JavaScript UDFs](/docs/current/udf-javascript/#define-your-aggregate-functions).
+For example, for UDAFs, you can use the [CREATE AGGREGATE](/sql/commands/sql-create-aggregate) command to create functions. Meanwhile, we also offer dedicated sections for creating UDAFs in [Embedded Python UDFs](/sql/udfs/embedded-python-udfs#define-your-aggregate-functions) and [Embedded JavaScript UDFs](/sql/udfs/use-udfs-in-javascript#define-your-aggregate-functions).
* The language used to write functions.
RisingWave currently supports using SQL, Python, Java, JavaScript, and Rust to write UDFs.
diff --git a/troubleshoot/node-failure.mdx b/troubleshoot/node-failure.mdx
index 9328382e..d3fd91bd 100644
--- a/troubleshoot/node-failure.mdx
+++ b/troubleshoot/node-failure.mdx
@@ -7,7 +7,7 @@ description: "When it comes to crafting downtime-sensitive applications, users o
2. Is it feasible to deploy multiple replicas of the nodes to eliminate single points of failure?
3. How can we mitigate the impact of downtime?
-In this topic, we will provide answers to these questions. However, before you continue reading, we suggest that you first explore our [fault tolerance mechanism](/docs/current/fault-tolerance/) to gain a better understanding.
+In this topic, we will provide answers to these questions. However, before you continue reading, we suggest that you first explore our [fault tolerance mechanism](/reference/fault-tolerance) to gain a better understanding.
---
diff --git a/troubleshoot/overview.mdx b/troubleshoot/overview.mdx
index 3bb46447..e6e22b7a 100644
--- a/troubleshoot/overview.mdx
+++ b/troubleshoot/overview.mdx
@@ -41,7 +41,7 @@ We recommend that you deploy a dedicated logging system, such as [Grafana Loki](
## Leverage system catalogs
-System catalogs provide comprehensive information about the definitions and metadata of objects in RisingWave. For a detailed list of system catalogs available, see [System catalogs](/docs/current/pg-catalogs/).
+System catalogs provide comprehensive information about the definitions and metadata of objects in RisingWave. For a detailed list of system catalogs available, see [System catalogs](/sql/system-catalogs/overview).
## Support resources
diff --git a/troubleshoot/troubleshoot-high-latency.mdx b/troubleshoot/troubleshoot-high-latency.mdx
index 937f3cc5..53f69f82 100644
--- a/troubleshoot/troubleshoot-high-latency.mdx
+++ b/troubleshoot/troubleshoot-high-latency.mdx
@@ -51,7 +51,7 @@ For example, the following figure shows a materialized view with extremely high
-To solve the issue, consider rewriting the SQL query to reduce join amplification, such as using better equal conditions on the problematic join to reduce the number of matched rows. See [Maintain wide table with table sinks](/docs/current/multiple-table-sink/) for details.
+To solve the issue, consider rewriting the SQL query to reduce join amplification, such as using better equal conditions on the problematic join to reduce the number of matched rows. See [Maintain wide table with table sinks](/processing/maintain-wide-table-with-table-sinks) for details.
At the same time, a log of `high_join_amplification` with the problematic join keys will be printed, such as