From 271faac7e8eb71c756746897b1d1db6e86d22ad3 Mon Sep 17 00:00:00 2001 From: Xinhao Xu <84456268+xxhZs@users.noreply.github.com> Date: Mon, 4 Nov 2024 12:07:39 +0800 Subject: [PATCH 01/15] fix(sink): Fix excessive number of starrocks txn (#19217) --- src/connector/src/sink/starrocks.rs | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/connector/src/sink/starrocks.rs b/src/connector/src/sink/starrocks.rs index 456415a62e0d..35d1033d8ee1 100644 --- a/src/connector/src/sink/starrocks.rs +++ b/src/connector/src/sink/starrocks.rs @@ -371,7 +371,7 @@ pub struct StarrocksSinkWriter { pk_indices: Vec, is_append_only: bool, client: Option, - txn_client: StarrocksTxnClient, + txn_client: Arc, row_encoder: JsonEncoder, executor_id: u64, curr_txn_label: Option, @@ -432,7 +432,7 @@ impl StarrocksSinkWriter { pk_indices, is_append_only, client: None, - txn_client: StarrocksTxnClient::new(txn_request_builder), + txn_client: Arc::new(StarrocksTxnClient::new(txn_request_builder)), row_encoder: JsonEncoder::new_with_starrocks(schema, None), executor_id, curr_txn_label: None, @@ -527,6 +527,23 @@ impl StarrocksSinkWriter { } } +impl Drop for StarrocksSinkWriter { + fn drop(&mut self) { + if let Some(txn_label) = self.curr_txn_label.take() { + let txn_client = self.txn_client.clone(); + tokio::spawn(async move { + if let Err(e) = txn_client.rollback(txn_label.clone()).await { + tracing::error!( + "starrocks rollback transaction error: {:?}, txn label: {}", + e.as_report(), + txn_label + ); + } + }); + } + } +} + #[async_trait] impl SinkWriter for StarrocksSinkWriter { type CommitMetadata = Option; From f559d3468a16f0a15e429dfb55276b1a07c814e3 Mon Sep 17 00:00:00 2001 From: Bugen Zhao Date: Mon, 4 Nov 2024 12:56:21 +0800 Subject: [PATCH 02/15] chore(meta): bump sea-orm to 1.0 (#19207) Signed-off-by: Bugen Zhao --- Cargo.lock | 122 +++++++----------- Cargo.toml | 8 +- src/meta/model/migration/Cargo.toml | 6 +- src/meta/model/migration/src/lib.rs | 1 + .../migration/src/m20230908_072257_init.rs | 77 +++++++---- .../migration/src/m20231008_020431_hummock.rs | 9 +- .../src/m20240304_074901_subscription.rs | 13 +- ...240506_112555_subscription_partial_ckpt.rs | 13 +- .../migration/src/m20240525_090457_secret.rs | 7 +- ...20240617_070131_index_column_properties.rs | 4 +- ...m20240617_071625_sink_into_table_column.rs | 3 +- ...40618_072634_function_compressed_binary.rs | 3 +- .../m20240630_131430_remove_parallel_unit.rs | 7 +- .../m20240701_060504_hummock_time_travel.rs | 7 +- src/meta/model/migration/src/utils.rs | 17 +++ src/meta/model/src/actor.rs | 2 +- src/meta/model/src/actor_dispatcher.rs | 2 +- src/meta/model/src/catalog_version.rs | 2 +- src/meta/model/src/fragment.rs | 2 +- src/meta/model/src/function.rs | 2 +- src/meta/model/src/lib.rs | 4 +- src/meta/model/src/object.rs | 2 +- src/meta/model/src/secret.rs | 1 - src/meta/model/src/sink.rs | 2 +- src/meta/model/src/table.rs | 4 +- src/meta/model/src/user_privilege.rs | 2 +- src/meta/model/src/worker.rs | 4 +- 27 files changed, 187 insertions(+), 139 deletions(-) create mode 100644 src/meta/model/migration/src/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 633698b84ac3..ea8e7d927645 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3929,7 +3929,7 @@ dependencies = [ name = "delta_btree_map" version = "2.2.0-alpha" dependencies = [ - "educe", + "educe 0.6.0", "enum-as-inner 0.6.0", ] @@ -4454,6 +4454,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4bd92664bf78c4d3dba9b7cdafce6fa15b13ed3ed16175218196942e99168a8" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "educe" version = "0.6.0" @@ -10648,7 +10660,7 @@ dependencies = [ "criterion", "darwin-libproc", "easy-ext", - "educe", + "educe 0.6.0", "either", "enum-as-inner 0.6.0", "enumflags2", @@ -10740,7 +10752,7 @@ name = "risingwave_common_estimate_size" version = "2.2.0-alpha" dependencies = [ "bytes", - "educe", + "educe 0.6.0", "ethnum", "fixedbitset 0.5.0", "jsonbb", @@ -11055,7 +11067,7 @@ dependencies = [ "rustls-pemfile 2.2.0", "rustls-pki-types", "rw_futures_util", - "sea-schema 0.15.0", + "sea-schema", "serde", "serde_derive", "serde_json", @@ -11230,7 +11242,7 @@ dependencies = [ "const-currying", "downcast-rs", "easy-ext", - "educe", + "educe 0.6.0", "either", "enum-as-inner 0.6.0", "expect-test", @@ -11273,7 +11285,7 @@ dependencies = [ "chrono", "chrono-tz 0.10.0", "criterion", - "educe", + "educe 0.6.0", "expect-test", "fancy-regex", "futures-async-stream", @@ -11340,7 +11352,7 @@ dependencies = [ "downcast-rs", "dyn-clone", "easy-ext", - "educe", + "educe 0.6.0", "either", "enum-as-inner 0.6.0", "expect-test", @@ -11593,7 +11605,7 @@ dependencies = [ "comfy-table", "crepe", "easy-ext", - "educe", + "educe 0.6.0", "either", "enum-as-inner 0.6.0", "expect-test", @@ -11695,6 +11707,7 @@ name = "risingwave_meta_model_migration" version = "2.2.0-alpha" dependencies = [ "async-std", + "easy-ext", "sea-orm", "sea-orm-migration", "serde", @@ -11708,7 +11721,7 @@ version = "2.2.0-alpha" dependencies = [ "anyhow", "clap", - "educe", + "educe 0.6.0", "either", "futures", "hex", @@ -12133,7 +12146,7 @@ dependencies = [ "cfg-if", "criterion", "delta_btree_map", - "educe", + "educe 0.6.0", "either", "enum-as-inner 0.6.0", "expect-test", @@ -12781,9 +12794,9 @@ dependencies = [ [[package]] name = "sea-orm" -version = "0.12.15" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8814e37dc25de54398ee62228323657520b7f29713b8e238649385dbe473ee0" +checksum = "ea1fee0cf8528dbe6eda29d5798afc522a63b75e44c5b15721e6e64af9c7cc4b" dependencies = [ "async-stream", "async-trait", @@ -12794,12 +12807,12 @@ dependencies = [ "ouroboros", "rust_decimal", "sea-orm-macros", - "sea-query 0.30.7", - "sea-query-binder 0.5.0", + "sea-query", + "sea-query-binder", "serde", "serde_json", "sqlx", - "strum 0.25.0", + "strum 0.26.3", "thiserror", "time", "tracing", @@ -12809,16 +12822,16 @@ dependencies = [ [[package]] name = "sea-orm-cli" -version = "0.12.15" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620bc560062ae251b1366bde43b3f1508445cab5c2c8cbdb397034638ab1b357" +checksum = "5f0b8869c75cf3fbb1bd860abb025033cd2e514c5f4fa43e792697cb1fe6c882" dependencies = [ "chrono", "clap", "dotenvy", "glob", "regex", - "sea-schema 0.14.2", + "sea-schema", "tracing", "tracing-subscriber", "url", @@ -12826,9 +12839,9 @@ dependencies = [ [[package]] name = "sea-orm-macros" -version = "0.12.15" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e115c6b078e013aa963cc2d38c196c2c40b05f03d0ac872fe06b6e0d5265603" +checksum = "8737b566799ed0444f278d13c300c4c6f1a91782f60ff5825a591852d5502030" dependencies = [ "heck 0.4.1", "proc-macro2", @@ -12840,9 +12853,9 @@ dependencies = [ [[package]] name = "sea-orm-migration" -version = "0.12.15" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8269bc6ff71afd6b78aa4333ac237a69eebd2cdb439036291e64fb4b8db23c" +checksum = "216643749e26ce27ab6c51d3475f2692981d4a902d34455bcd322f412900df5c" dependencies = [ "async-trait", "clap", @@ -12850,20 +12863,20 @@ dependencies = [ "futures", "sea-orm", "sea-orm-cli", - "sea-schema 0.14.2", + "sea-schema", "tracing", "tracing-subscriber", ] [[package]] name = "sea-query" -version = "0.30.7" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4166a1e072292d46dc91f31617c2a1cdaf55a8be4b5c9f4bf2ba248e3ac4999b" +checksum = "b4fd043b8117af233e221f73e3ea8dfbc8e8c3c928017c474296db45c649105c" dependencies = [ "bigdecimal 0.3.1", "chrono", - "derivative", + "educe 0.5.11", "inherent", "ordered-float 3.9.1", "rust_decimal", @@ -12873,42 +12886,22 @@ dependencies = [ "uuid", ] -[[package]] -name = "sea-query" -version = "0.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e5073b2cfed767511a57d18115f3b3d8bcb5690bf8c89518caec6cb22c0cd74" -dependencies = [ - "inherent", - "sea-query-derive", -] - [[package]] name = "sea-query-binder" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36bbb68df92e820e4d5aeb17b4acd5cc8b5d18b2c36a4dd6f4626aabfa7ab1b9" +checksum = "754965d4aee6145bec25d0898e5c931e6c22859789ce62fd85a42a15ed5a8ce3" dependencies = [ "bigdecimal 0.3.1", "chrono", "rust_decimal", - "sea-query 0.30.7", + "sea-query", "serde_json", "sqlx", "time", "uuid", ] -[[package]] -name = "sea-query-binder" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "754965d4aee6145bec25d0898e5c931e6c22859789ce62fd85a42a15ed5a8ce3" -dependencies = [ - "sea-query 0.31.0", - "sqlx", -] - [[package]] name = "sea-query-derive" version = "0.4.2" @@ -12923,17 +12916,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "sea-schema" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d148608012d25222442d1ebbfafd1228dbc5221baf4ec35596494e27a2394e" -dependencies = [ - "futures", - "sea-query 0.30.7", - "sea-schema-derive 0.2.0", -] - [[package]] name = "sea-schema" version = "0.15.0" @@ -12941,24 +12923,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad52149fc81836ea7424c3425d8f6ed8ad448dd16d2e4f6a3907ba46f3f2fd78" dependencies = [ "futures", - "sea-query 0.31.0", - "sea-query-binder 0.6.0", - "sea-schema-derive 0.3.0", + "sea-query", + "sea-query-binder", + "sea-schema-derive", "sqlx", ] -[[package]] -name = "sea-schema-derive" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f686050f76bffc4f635cda8aea6df5548666b830b52387e8bc7de11056d11e" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "sea-schema-derive" version = "0.3.0" @@ -13711,7 +13681,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b6b8f606d3c4cdcaf2031c4320b79d7584e454b79562ba3d675f49701c160e" dependencies = [ "async-trait", - "educe", + "educe 0.6.0", "fs-err", "futures", "glob", diff --git a/Cargo.toml b/Cargo.toml index 2000a730297f..17ba5bf07551 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -175,12 +175,12 @@ parking_lot = { version = "0.12", features = [ "arc_lock", "deadlock_detection", ] } -sea-orm = { version = "0.12.15", features = [ - "sqlx-mysql", - "sqlx-postgres", - "sqlx-sqlite", +sea-orm = { version = "~1.0", features = [ + "sqlx-all", "runtime-tokio-native-tls", + "with-uuid", ] } +sea-orm-migration = "~1.0" sqlx = { version = "0.7.4", default-features = false, features = [ "bigdecimal", "chrono", diff --git a/src/meta/model/migration/Cargo.toml b/src/meta/model/migration/Cargo.toml index a54753da16fb..9d9fbd5292d2 100644 --- a/src/meta/model/migration/Cargo.toml +++ b/src/meta/model/migration/Cargo.toml @@ -15,11 +15,9 @@ normal = ["workspace-hack"] [dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } +easy-ext = "1" sea-orm = { workspace = true } +sea-orm-migration = { workspace = true } serde = { version = "1", features = ["derive"] } serde_json = "1" uuid = { version = "1", features = ["v4"] } - -[dependencies.sea-orm-migration] -version = "0.12.14" -features = ["sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", "runtime-tokio-native-tls", "with-uuid"] diff --git a/src/meta/model/migration/src/lib.rs b/src/meta/model/migration/src/lib.rs index 0c8c244c0624..b84a29891eee 100644 --- a/src/meta/model/migration/src/lib.rs +++ b/src/meta/model/migration/src/lib.rs @@ -24,6 +24,7 @@ mod m20240820_081248_add_time_travel_per_table_epoch; mod m20240911_083152_variable_vnode_count; mod m20241016_065621_hummock_gc_history; mod m20241025_062548_singleton_vnode_count; +mod utils; pub struct Migrator; diff --git a/src/meta/model/migration/src/m20230908_072257_init.rs b/src/meta/model/migration/src/m20230908_072257_init.rs index 7840c40cd8f3..d30e9fe1d949 100644 --- a/src/meta/model/migration/src/m20230908_072257_init.rs +++ b/src/meta/model/migration/src/m20230908_072257_init.rs @@ -1,6 +1,7 @@ use sea_orm_migration::prelude::{Index as MigrationIndex, Table as MigrationTable, *}; use crate::sea_orm::{DatabaseBackend, DbBackend, Statement}; +use crate::utils::ColumnDefExt; use crate::{assert_not_has_tables, drop_tables}; #[derive(DeriveMigrationName)] @@ -140,7 +141,7 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(User::CanCreateDb).boolean().not_null()) .col(ColumnDef::new(User::CanCreateUser).boolean().not_null()) .col(ColumnDef::new(User::CanLogin).boolean().not_null()) - .col(ColumnDef::new(User::AuthInfo).binary()) + .col(ColumnDef::new(User::AuthInfo).rw_binary(manager)) .to_owned(), ) .await?; @@ -378,10 +379,14 @@ impl MigrationTrait for Migration { ) .col( ColumnDef::new(Fragment::StreamNode) - .blob(BlobSize::Long) + .rw_binary(manager) + .not_null(), + ) + .col( + ColumnDef::new(Fragment::VnodeMapping) + .rw_binary(manager) .not_null(), ) - .col(ColumnDef::new(Fragment::VnodeMapping).binary().not_null()) .col(ColumnDef::new(Fragment::StateTableIds).json_binary()) .col(ColumnDef::new(Fragment::UpstreamFragmentId).json_binary()) .foreign_key( @@ -407,12 +412,16 @@ impl MigrationTrait for Migration { ) .col(ColumnDef::new(Actor::FragmentId).integer().not_null()) .col(ColumnDef::new(Actor::Status).string().not_null()) - .col(ColumnDef::new(Actor::Splits).binary()) + .col(ColumnDef::new(Actor::Splits).rw_binary(manager)) .col(ColumnDef::new(Actor::ParallelUnitId).integer().not_null()) .col(ColumnDef::new(Actor::WorkerId).integer().not_null()) .col(ColumnDef::new(Actor::UpstreamActorIds).json_binary()) - .col(ColumnDef::new(Actor::VnodeBitmap).binary()) - .col(ColumnDef::new(Actor::ExprContext).binary().not_null()) + .col(ColumnDef::new(Actor::VnodeBitmap).rw_binary(manager)) + .col( + ColumnDef::new(Actor::ExprContext) + .rw_binary(manager) + .not_null(), + ) .foreign_key( &mut ForeignKey::create() .name("FK_actor_fragment_id") @@ -454,7 +463,7 @@ impl MigrationTrait for Migration { .json_binary() .not_null(), ) - .col(ColumnDef::new(ActorDispatcher::HashMapping).binary()) + .col(ColumnDef::new(ActorDispatcher::HashMapping).rw_binary(manager)) .col( ColumnDef::new(ActorDispatcher::DispatcherId) .integer() @@ -495,7 +504,11 @@ impl MigrationTrait for Migration { .primary_key(), ) .col(ColumnDef::new(Connection::Name).string().not_null()) - .col(ColumnDef::new(Connection::Info).binary().not_null()) + .col( + ColumnDef::new(Connection::Info) + .rw_binary(manager) + .not_null(), + ) .foreign_key( &mut ForeignKey::create() .name("FK_connection_object_id") @@ -514,7 +527,11 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Source::SourceId).integer().primary_key()) .col(ColumnDef::new(Source::Name).string().not_null()) .col(ColumnDef::new(Source::RowIdIndex).integer()) - .col(ColumnDef::new(Source::Columns).binary().not_null()) + .col( + ColumnDef::new(Source::Columns) + .rw_binary(manager) + .not_null(), + ) .col(ColumnDef::new(Source::PkColumnIds).json_binary().not_null()) .col( ColumnDef::new(Source::WithProperties) @@ -522,8 +539,12 @@ impl MigrationTrait for Migration { .not_null(), ) .col(ColumnDef::new(Source::Definition).text().not_null()) - .col(ColumnDef::new(Source::SourceInfo).binary()) - .col(ColumnDef::new(Source::WatermarkDescs).binary().not_null()) + .col(ColumnDef::new(Source::SourceInfo).rw_binary(manager)) + .col( + ColumnDef::new(Source::WatermarkDescs) + .rw_binary(manager) + .not_null(), + ) .col(ColumnDef::new(Source::OptionalAssociatedTableId).integer()) .col(ColumnDef::new(Source::ConnectionId).integer()) .col(ColumnDef::new(Source::Version).big_integer().not_null()) @@ -562,8 +583,8 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Table::OptionalAssociatedSourceId).integer()) .col(ColumnDef::new(Table::TableType).string().not_null()) .col(ColumnDef::new(Table::BelongsToJobId).integer()) - .col(ColumnDef::new(Table::Columns).binary().not_null()) - .col(ColumnDef::new(Table::Pk).binary().not_null()) + .col(ColumnDef::new(Table::Columns).rw_binary(manager).not_null()) + .col(ColumnDef::new(Table::Pk).rw_binary(manager).not_null()) .col( ColumnDef::new(Table::DistributionKey) .json_binary() @@ -593,14 +614,14 @@ impl MigrationTrait for Migration { ) .col(ColumnDef::new(Table::DistKeyInPk).json_binary().not_null()) .col(ColumnDef::new(Table::DmlFragmentId).integer()) - .col(ColumnDef::new(Table::Cardinality).binary()) + .col(ColumnDef::new(Table::Cardinality).rw_binary(manager)) .col( ColumnDef::new(Table::CleanedByWatermark) .boolean() .not_null(), ) .col(ColumnDef::new(Table::Description).string()) - .col(ColumnDef::new(Table::Version).binary()) + .col(ColumnDef::new(Table::Version).rw_binary(manager)) .col(ColumnDef::new(Table::RetentionSeconds).integer()) .col( ColumnDef::new(Table::IncomingSinks) @@ -655,8 +676,8 @@ impl MigrationTrait for Migration { .table(Sink::Table) .col(ColumnDef::new(Sink::SinkId).integer().primary_key()) .col(ColumnDef::new(Sink::Name).string().not_null()) - .col(ColumnDef::new(Sink::Columns).binary().not_null()) - .col(ColumnDef::new(Sink::PlanPk).binary().not_null()) + .col(ColumnDef::new(Sink::Columns).rw_binary(manager).not_null()) + .col(ColumnDef::new(Sink::PlanPk).rw_binary(manager).not_null()) .col( ColumnDef::new(Sink::DistributionKey) .json_binary() @@ -669,7 +690,7 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Sink::ConnectionId).integer()) .col(ColumnDef::new(Sink::DbName).string().not_null()) .col(ColumnDef::new(Sink::SinkFromName).string().not_null()) - .col(ColumnDef::new(Sink::SinkFormatDesc).binary()) + .col(ColumnDef::new(Sink::SinkFormatDesc).rw_binary(manager)) .col(ColumnDef::new(Sink::TargetTable).integer()) .foreign_key( &mut ForeignKey::create() @@ -704,7 +725,7 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(View::Name).string().not_null()) .col(ColumnDef::new(View::Properties).json_binary().not_null()) .col(ColumnDef::new(View::Definition).text().not_null()) - .col(ColumnDef::new(View::Columns).binary().not_null()) + .col(ColumnDef::new(View::Columns).rw_binary(manager).not_null()) .foreign_key( &mut ForeignKey::create() .name("FK_view_object_id") @@ -724,7 +745,11 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Index::Name).string().not_null()) .col(ColumnDef::new(Index::IndexTableId).integer().not_null()) .col(ColumnDef::new(Index::PrimaryTableId).integer().not_null()) - .col(ColumnDef::new(Index::IndexItems).binary().not_null()) + .col( + ColumnDef::new(Index::IndexItems) + .rw_binary(manager) + .not_null(), + ) .col(ColumnDef::new(Index::IndexColumnsLen).integer().not_null()) .foreign_key( &mut ForeignKey::create() @@ -760,8 +785,16 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Function::FunctionId).integer().primary_key()) .col(ColumnDef::new(Function::Name).string().not_null()) .col(ColumnDef::new(Function::ArgNames).string().not_null()) - .col(ColumnDef::new(Function::ArgTypes).binary().not_null()) - .col(ColumnDef::new(Function::ReturnType).binary().not_null()) + .col( + ColumnDef::new(Function::ArgTypes) + .rw_binary(manager) + .not_null(), + ) + .col( + ColumnDef::new(Function::ReturnType) + .rw_binary(manager) + .not_null(), + ) .col(ColumnDef::new(Function::Language).string().not_null()) .col(ColumnDef::new(Function::Link).string()) .col(ColumnDef::new(Function::Identifier).string()) diff --git a/src/meta/model/migration/src/m20231008_020431_hummock.rs b/src/meta/model/migration/src/m20231008_020431_hummock.rs index 42fe0c424f99..ea499d67182e 100644 --- a/src/meta/model/migration/src/m20231008_020431_hummock.rs +++ b/src/meta/model/migration/src/m20231008_020431_hummock.rs @@ -1,5 +1,6 @@ use sea_orm_migration::prelude::*; +use crate::utils::ColumnDefExt; use crate::{assert_not_has_tables, drop_tables}; #[derive(DeriveMigrationName)] @@ -32,7 +33,7 @@ impl MigrationTrait for Migration { ) .col( ColumnDef::new(CompactionTask::Task) - .blob(BlobSize::Long) + .rw_binary(manager) .not_null(), ) .col( @@ -54,7 +55,7 @@ impl MigrationTrait for Migration { .not_null() .primary_key(), ) - .col(ColumnDef::new(CompactionConfig::Config).blob(BlobSize::Long)) + .col(ColumnDef::new(CompactionConfig::Config).rw_binary(manager)) .to_owned(), ) .await?; @@ -69,7 +70,7 @@ impl MigrationTrait for Migration { .not_null() .primary_key(), ) - .col(ColumnDef::new(CompactionStatus::Status).blob(BlobSize::Long)) + .col(ColumnDef::new(CompactionStatus::Status).rw_binary(manager)) .to_owned(), ) .await?; @@ -142,7 +143,7 @@ impl MigrationTrait for Migration { .boolean() .not_null(), ) - .col(ColumnDef::new(HummockVersionDelta::FullVersionDelta).blob(BlobSize::Long)) + .col(ColumnDef::new(HummockVersionDelta::FullVersionDelta).rw_binary(manager)) .to_owned(), ) .await?; diff --git a/src/meta/model/migration/src/m20240304_074901_subscription.rs b/src/meta/model/migration/src/m20240304_074901_subscription.rs index 244c6abe1160..a7c791e828dc 100644 --- a/src/meta/model/migration/src/m20240304_074901_subscription.rs +++ b/src/meta/model/migration/src/m20240304_074901_subscription.rs @@ -1,5 +1,6 @@ use sea_orm_migration::prelude::{Table as MigrationTable, *}; +use crate::utils::ColumnDefExt; use crate::{assert_not_has_tables, drop_tables}; #[derive(DeriveMigrationName)] @@ -19,8 +20,16 @@ impl MigrationTrait for Migration { .primary_key(), ) .col(ColumnDef::new(Subscription::Name).string().not_null()) - .col(ColumnDef::new(Subscription::Columns).binary().not_null()) - .col(ColumnDef::new(Subscription::PlanPk).binary().not_null()) + .col( + ColumnDef::new(Subscription::Columns) + .rw_binary(manager) + .not_null(), + ) + .col( + ColumnDef::new(Subscription::PlanPk) + .rw_binary(manager) + .not_null(), + ) .col( ColumnDef::new(Subscription::DistributionKey) .json_binary() diff --git a/src/meta/model/migration/src/m20240506_112555_subscription_partial_ckpt.rs b/src/meta/model/migration/src/m20240506_112555_subscription_partial_ckpt.rs index 715be0e7cd23..1e0eec2c0bd2 100644 --- a/src/meta/model/migration/src/m20240506_112555_subscription_partial_ckpt.rs +++ b/src/meta/model/migration/src/m20240506_112555_subscription_partial_ckpt.rs @@ -1,6 +1,7 @@ use sea_orm_migration::prelude::{Table as MigrationTable, *}; use crate::drop_tables; +use crate::utils::ColumnDefExt; #[derive(DeriveMigrationName)] pub struct Migration; @@ -67,8 +68,16 @@ impl MigrationTrait for Migration { ) .col(ColumnDef::new(Subscription::Name).string().not_null()) .col(ColumnDef::new(Subscription::Definition).string().not_null()) - .col(ColumnDef::new(Subscription::Columns).binary().not_null()) - .col(ColumnDef::new(Subscription::PlanPk).binary().not_null()) + .col( + ColumnDef::new(Subscription::Columns) + .rw_binary(manager) + .not_null(), + ) + .col( + ColumnDef::new(Subscription::PlanPk) + .rw_binary(manager) + .not_null(), + ) .col( ColumnDef::new(Subscription::DistributionKey) .json_binary() diff --git a/src/meta/model/migration/src/m20240525_090457_secret.rs b/src/meta/model/migration/src/m20240525_090457_secret.rs index ed23085c6657..734c0a6d1fd6 100644 --- a/src/meta/model/migration/src/m20240525_090457_secret.rs +++ b/src/meta/model/migration/src/m20240525_090457_secret.rs @@ -1,5 +1,6 @@ use sea_orm_migration::prelude::{Table as MigrationTable, *}; +use crate::utils::ColumnDefExt; use crate::{assert_not_has_tables, drop_tables}; #[derive(DeriveMigrationName)] @@ -21,7 +22,7 @@ impl MigrationTrait for Migration { .primary_key(), ) .col(ColumnDef::new(Secret::Name).string().not_null()) - .col(ColumnDef::new(Secret::Value).binary().not_null()) + .col(ColumnDef::new(Secret::Value).rw_binary(manager).not_null()) .foreign_key( &mut ForeignKey::create() .name("FK_secret_object_id") @@ -42,7 +43,7 @@ impl MigrationTrait for Migration { .alter_table( MigrationTable::alter() .table(Sink::Table) - .add_column(ColumnDef::new(Sink::SecretRef).binary()) + .add_column(ColumnDef::new(Sink::SecretRef).rw_binary(manager)) .to_owned(), ) .await?; @@ -52,7 +53,7 @@ impl MigrationTrait for Migration { .alter_table( MigrationTable::alter() .table(Source::Table) - .add_column(ColumnDef::new(Source::SecretRef).binary()) + .add_column(ColumnDef::new(Source::SecretRef).rw_binary(manager)) .to_owned(), ) .await?; diff --git a/src/meta/model/migration/src/m20240617_070131_index_column_properties.rs b/src/meta/model/migration/src/m20240617_070131_index_column_properties.rs index daff755da999..f8f113e7974d 100644 --- a/src/meta/model/migration/src/m20240617_070131_index_column_properties.rs +++ b/src/meta/model/migration/src/m20240617_070131_index_column_properties.rs @@ -1,5 +1,7 @@ use sea_orm_migration::prelude::*; +use crate::utils::ColumnDefExt; + #[derive(DeriveMigrationName)] pub struct Migration; @@ -10,7 +12,7 @@ impl MigrationTrait for Migration { .alter_table( Table::alter() .table(Index::Table) - .add_column(ColumnDef::new(Index::IndexColumnProperties).binary()) + .add_column(ColumnDef::new(Index::IndexColumnProperties).rw_binary(manager)) .to_owned(), ) .await diff --git a/src/meta/model/migration/src/m20240617_071625_sink_into_table_column.rs b/src/meta/model/migration/src/m20240617_071625_sink_into_table_column.rs index 4904dd362324..c4e01c3bcd5b 100644 --- a/src/meta/model/migration/src/m20240617_071625_sink_into_table_column.rs +++ b/src/meta/model/migration/src/m20240617_071625_sink_into_table_column.rs @@ -1,5 +1,6 @@ use sea_orm_migration::prelude::{Table as MigrationTable, *}; +use crate::utils::ColumnDefExt; use crate::SubQueryStatement::SelectStatement; #[derive(DeriveMigrationName)] @@ -12,7 +13,7 @@ impl MigrationTrait for Migration { .alter_table( MigrationTable::alter() .table(Sink::Table) - .add_column(ColumnDef::new(Sink::OriginalTargetColumns).binary()) + .add_column(ColumnDef::new(Sink::OriginalTargetColumns).rw_binary(manager)) .to_owned(), ) .await?; diff --git a/src/meta/model/migration/src/m20240618_072634_function_compressed_binary.rs b/src/meta/model/migration/src/m20240618_072634_function_compressed_binary.rs index 6b4ef6157bcf..92d27da1e138 100644 --- a/src/meta/model/migration/src/m20240618_072634_function_compressed_binary.rs +++ b/src/meta/model/migration/src/m20240618_072634_function_compressed_binary.rs @@ -1,6 +1,7 @@ use sea_orm_migration::prelude::*; use crate::sea_orm::{DatabaseBackend, DbBackend, Statement}; +use crate::utils::ColumnDefExt; #[derive(DeriveMigrationName)] pub struct Migration; @@ -19,7 +20,7 @@ impl MigrationTrait for Migration { Table::alter() .table(Function::Table) .modify_column( - ColumnDef::new(Function::CompressedBinary).blob(BlobSize::Medium), + ColumnDef::new(Function::CompressedBinary).rw_binary(manager), ) .to_owned(), ) diff --git a/src/meta/model/migration/src/m20240630_131430_remove_parallel_unit.rs b/src/meta/model/migration/src/m20240630_131430_remove_parallel_unit.rs index 5acc8dc1a616..dabcb8d3daa7 100644 --- a/src/meta/model/migration/src/m20240630_131430_remove_parallel_unit.rs +++ b/src/meta/model/migration/src/m20240630_131430_remove_parallel_unit.rs @@ -2,6 +2,7 @@ use sea_orm_migration::prelude::*; use serde::{Deserialize, Serialize}; use crate::sea_orm::{FromJsonQueryResult, FromQueryResult, Statement}; +use crate::utils::ColumnDefExt; #[derive(DeriveMigrationName)] pub struct Migration; @@ -75,7 +76,11 @@ impl MigrationTrait for Migration { .alter_table( Table::alter() .table(Fragment::Table) - .add_column(ColumnDef::new(Fragment::VnodeMapping).binary().not_null()) + .add_column( + ColumnDef::new(Fragment::VnodeMapping) + .rw_binary(manager) + .not_null(), + ) .to_owned(), ) .await?; diff --git a/src/meta/model/migration/src/m20240701_060504_hummock_time_travel.rs b/src/meta/model/migration/src/m20240701_060504_hummock_time_travel.rs index 7dec44913dc8..619537078e28 100644 --- a/src/meta/model/migration/src/m20240701_060504_hummock_time_travel.rs +++ b/src/meta/model/migration/src/m20240701_060504_hummock_time_travel.rs @@ -1,6 +1,7 @@ use sea_orm_migration::prelude::*; use crate::drop_tables; +use crate::utils::ColumnDefExt; #[derive(DeriveMigrationName)] pub struct Migration; @@ -26,7 +27,7 @@ impl MigrationTrait for Migration { ) .col( ColumnDef::new(HummockSstableInfo::SstableInfo) - .blob(BlobSize::Long) + .rw_binary(manager) .null(), ) .to_owned(), @@ -46,7 +47,7 @@ impl MigrationTrait for Migration { ) .col( ColumnDef::new(HummockTimeTravelVersion::Version) - .blob(BlobSize::Long) + .rw_binary(manager) .null(), ) .to_owned(), @@ -66,7 +67,7 @@ impl MigrationTrait for Migration { ) .col( ColumnDef::new(HummockTimeTravelDelta::VersionDelta) - .blob(BlobSize::Long) + .rw_binary(manager) .null(), ) .to_owned(), diff --git a/src/meta/model/migration/src/utils.rs b/src/meta/model/migration/src/utils.rs new file mode 100644 index 000000000000..4f704374befd --- /dev/null +++ b/src/meta/model/migration/src/utils.rs @@ -0,0 +1,17 @@ +use sea_orm::DatabaseBackend; +use sea_orm_migration::prelude::*; + +#[easy_ext::ext(ColumnDefExt)] +impl ColumnDef { + /// Set column type as `longblob` for MySQL, `bytea` for Postgres, and `blob` for Sqlite. + /// + /// Should be preferred over [`binary`](ColumnDef::binary) or [`blob`](ColumnDef::blob) for large binary fields, + /// typically the fields wrapping protobuf or other serialized data. Otherwise, MySQL will return an error + /// when the length exceeds 65535 bytes. + pub fn rw_binary(&mut self, manager: &SchemaManager) -> &mut Self { + match manager.get_database_backend() { + DatabaseBackend::MySql => self.custom(extension::mysql::MySqlType::LongBlob), + DatabaseBackend::Postgres | DatabaseBackend::Sqlite => self.blob(), + } + } +} diff --git a/src/meta/model/src/actor.rs b/src/meta/model/src/actor.rs index cbbbca543679..2278d7e9fd37 100644 --- a/src/meta/model/src/actor.rs +++ b/src/meta/model/src/actor.rs @@ -21,7 +21,7 @@ use crate::{ }; #[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum ActorStatus { #[sea_orm(string_value = "INACTIVE")] Inactive, diff --git a/src/meta/model/src/actor_dispatcher.rs b/src/meta/model/src/actor_dispatcher.rs index 7d40af6967d3..1ef2e251dd75 100644 --- a/src/meta/model/src/actor_dispatcher.rs +++ b/src/meta/model/src/actor_dispatcher.rs @@ -23,7 +23,7 @@ use crate::{ActorId, ActorMapping, FragmentId, I32Array}; #[derive( Hash, Copy, Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize, )] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum DispatcherType { #[sea_orm(string_value = "HASH")] Hash, diff --git a/src/meta/model/src/catalog_version.rs b/src/meta/model/src/catalog_version.rs index cfcd90c12f6a..3aa797800561 100644 --- a/src/meta/model/src/catalog_version.rs +++ b/src/meta/model/src/catalog_version.rs @@ -16,7 +16,7 @@ use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; #[derive(Clone, Copy, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum VersionCategory { #[sea_orm(string_value = "NOTIFICATION")] Notification, diff --git a/src/meta/model/src/fragment.rs b/src/meta/model/src/fragment.rs index 9a365f37377d..45f59ed3c0ae 100644 --- a/src/meta/model/src/fragment.rs +++ b/src/meta/model/src/fragment.rs @@ -33,7 +33,7 @@ pub struct Model { } #[derive(Copy, Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum DistributionType { #[sea_orm(string_value = "SINGLE")] Single, diff --git a/src/meta/model/src/function.rs b/src/meta/model/src/function.rs index 5589c1daa023..0fea52c6c348 100644 --- a/src/meta/model/src/function.rs +++ b/src/meta/model/src/function.rs @@ -21,7 +21,7 @@ use serde::{Deserialize, Serialize}; use crate::{DataType, DataTypeArray, FunctionId}; #[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum FunctionKind { #[sea_orm(string_value = "Scalar")] Scalar, diff --git a/src/meta/model/src/lib.rs b/src/meta/model/src/lib.rs index 0b99b48914b2..6610484a8918 100644 --- a/src/meta/model/src/lib.rs +++ b/src/meta/model/src/lib.rs @@ -93,7 +93,7 @@ pub type FragmentId = i32; pub type ActorId = i32; #[derive(Clone, Copy, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum JobStatus { #[sea_orm(string_value = "INITIAL")] Initial, @@ -125,7 +125,7 @@ impl From for PbStreamJobState { } #[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum CreateType { #[sea_orm(string_value = "BACKGROUND")] Background, diff --git a/src/meta/model/src/object.rs b/src/meta/model/src/object.rs index 4ee0e5a4f4c4..a9a6fbe52488 100644 --- a/src/meta/model/src/object.rs +++ b/src/meta/model/src/object.rs @@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize}; use crate::{DatabaseId, ObjectId, SchemaId, UserId}; #[derive(Clone, Debug, PartialEq, Eq, Copy, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum ObjectType { #[sea_orm(string_value = "DATABASE")] Database, diff --git a/src/meta/model/src/secret.rs b/src/meta/model/src/secret.rs index 0d122267bb4b..bea3f87f56bc 100644 --- a/src/meta/model/src/secret.rs +++ b/src/meta/model/src/secret.rs @@ -23,7 +23,6 @@ pub struct Model { #[sea_orm(primary_key, auto_increment = false)] pub secret_id: i32, pub name: String, - #[sea_orm(column_type = "Binary(BlobSize::Blob(None))")] pub value: Vec, } diff --git a/src/meta/model/src/sink.rs b/src/meta/model/src/sink.rs index c5a72fbb748c..39b77fcc6381 100644 --- a/src/meta/model/src/sink.rs +++ b/src/meta/model/src/sink.rs @@ -23,7 +23,7 @@ use crate::{ }; #[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum SinkType { #[sea_orm(string_value = "APPEND_ONLY")] AppendOnly, diff --git a/src/meta/model/src/table.rs b/src/meta/model/src/table.rs index 1d950f277396..b8ba38d438b7 100644 --- a/src/meta/model/src/table.rs +++ b/src/meta/model/src/table.rs @@ -29,7 +29,7 @@ use crate::{ #[derive( Clone, Debug, PartialEq, Hash, Copy, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize, )] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum TableType { #[sea_orm(string_value = "TABLE")] Table, @@ -65,7 +65,7 @@ impl From for TableType { } #[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum HandleConflictBehavior { #[sea_orm(string_value = "OVERWRITE")] Overwrite, diff --git a/src/meta/model/src/user_privilege.rs b/src/meta/model/src/user_privilege.rs index 48f9a38a5504..bebf8485f464 100644 --- a/src/meta/model/src/user_privilege.rs +++ b/src/meta/model/src/user_privilege.rs @@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize}; use crate::{ObjectId, PrivilegeId, UserId}; #[derive(Clone, Debug, Hash, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum Action { #[sea_orm(string_value = "INSERT")] Insert, diff --git a/src/meta/model/src/worker.rs b/src/meta/model/src/worker.rs index ee2ab9b22d3c..82b13184eabe 100644 --- a/src/meta/model/src/worker.rs +++ b/src/meta/model/src/worker.rs @@ -21,7 +21,7 @@ use serde::{Deserialize, Serialize}; use crate::{TransactionId, WorkerId}; #[derive(Clone, Debug, Hash, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum WorkerType { #[sea_orm(string_value = "FRONTEND")] Frontend, @@ -61,7 +61,7 @@ impl From for PbWorkerType { } #[derive(Clone, Debug, PartialEq, Eq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "String(None)")] +#[sea_orm(rs_type = "String", db_type = "string(None)")] pub enum WorkerStatus { #[sea_orm(string_value = "STARTING")] Starting, From bb8f94dbdf354b2e866d92025ee62e93dd479167 Mon Sep 17 00:00:00 2001 From: StrikeW Date: Mon, 4 Nov 2024 14:46:25 +0800 Subject: [PATCH 03/15] fix(pg-cdc): fix ssl.mode value to debezium (#19239) --- .../source/common/DbzConnectorConfig.java | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/DbzConnectorConfig.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/DbzConnectorConfig.java index 09f69df349e3..872d6e900c91 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/DbzConnectorConfig.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/source/common/DbzConnectorConfig.java @@ -212,6 +212,23 @@ public DbzConnectorConfig( } } + // adapt value of sslmode to the expected value + var sslMode = postgresProps.getProperty("database.sslmode"); + if (sslMode != null) { + switch (sslMode) { + case "disabled": + sslMode = "disable"; + break; + case "preferred": + sslMode = "prefer"; + break; + case "required": + sslMode = "require"; + break; + } + postgresProps.setProperty("database.sslmode", sslMode); + } + dbzProps.putAll(postgresProps); if (isCdcSourceJob) { From d3d2969036c1f423c04dc76488040d8d01364dc3 Mon Sep 17 00:00:00 2001 From: August Date: Mon, 4 Nov 2024 15:04:08 +0800 Subject: [PATCH 04/15] feat: support swap rename syntax for table/mview/view/source/sink and subscription (#19172) Co-authored-by: Bugen Zhao --- e2e_test/ddl/alter_swap_rename.slt | 181 +++++++++++++++ proto/ddl_service.proto | 21 ++ src/frontend/src/catalog/catalog_service.rs | 12 +- src/frontend/src/catalog/schema_catalog.rs | 64 +++-- src/frontend/src/handler/alter_swap_rename.rs | 179 ++++++++++++++ src/frontend/src/handler/mod.rs | 77 ++++++ src/frontend/src/test_utils.rs | 8 +- src/meta/service/src/ddl_service.rs | 17 ++ src/meta/src/controller/catalog.rs | 219 ++++++++---------- src/meta/src/controller/utils.rs | 175 +++++++++++++- src/meta/src/rpc/ddl_controller.rs | 54 ++++- src/rpc_client/src/meta_client.rs | 14 ++ src/sqlparser/src/ast/ddl.rs | 35 +++ src/sqlparser/src/keywords.rs | 1 + src/sqlparser/src/parser.rs | 29 ++- 15 files changed, 929 insertions(+), 157 deletions(-) create mode 100644 e2e_test/ddl/alter_swap_rename.slt create mode 100644 src/frontend/src/handler/alter_swap_rename.rs diff --git a/e2e_test/ddl/alter_swap_rename.slt b/e2e_test/ddl/alter_swap_rename.slt new file mode 100644 index 000000000000..4accda425695 --- /dev/null +++ b/e2e_test/ddl/alter_swap_rename.slt @@ -0,0 +1,181 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +# Create initial tables and views for testing swap +statement ok +CREATE TABLE t1 (v1 INT primary key, v2 STRUCT>); + +statement ok +CREATE TABLE t2 (v1 INT primary key, v2 STRUCT>); + +# Insert some test data +statement ok +INSERT INTO t1 VALUES(1,(1,(1,2))); + +statement ok +INSERT INTO t2 VALUES(2,(2,(2,4))); + +# Create materialized views referencing the tables +statement ok +CREATE MATERIALIZED VIEW mv1 AS SELECT v1, (t.v2).v1 AS v21 FROM t1 t; + +statement ok +CREATE MATERIALIZED VIEW mv2 AS SELECT v1, (t.v2).v1 AS v21 FROM t2 t; + +# Create regular views +statement ok +CREATE VIEW v1 AS SELECT t1.v1 FROM t1; + +statement ok +CREATE VIEW v2 AS SELECT t2.v2 FROM t2; + +# Create sources +statement ok +CREATE SOURCE src1 (v INT) WITH ( + connector = 'datagen', + fields.v.kind = 'sequence', + fields.v.start = '1', + fields.v.end = '5', + datagen.rows.per.second='10', + datagen.split.num = '1' +) FORMAT PLAIN ENCODE JSON; + +statement ok +CREATE SOURCE src2 (v INT) WITH ( + connector = 'datagen', + fields.v.kind = 'sequence', + fields.v.start = '6', + fields.v.end = '10', + datagen.rows.per.second='10', + datagen.split.num = '1' +) FORMAT PLAIN ENCODE JSON; + +# Create sinks +statement ok +CREATE SINK sink1 AS SELECT * FROM mv1 WITH ( + connector = 'blackhole' +); + +statement ok +CREATE SINK sink2 AS SELECT * FROM mv2 WITH ( + connector = 'blackhole' +); + +# Create subscriptions +statement ok +CREATE SUBSCRIPTION sub1 FROM mv1 WITH ( + retention = '1D' +); + +statement ok +CREATE SUBSCRIPTION sub2 FROM mv2 WITH ( + retention = '1D' +); + +# Test table swap +statement ok +ALTER TABLE t1 SWAP WITH t2; + +statement error Permission denied +ALTER TABLE t1 SWAP WITH mv1; + +statement error not found +ALTER TABLE mv1 SWAP WITH mv2; + +query II rowsort +SELECT * FROM t1; +---- +2 (2,"(2,4)") + +query II rowsort +SELECT * FROM t2; +---- +1 (1,"(1,2)") + +# Test materialized view swap +statement ok +ALTER MATERIALIZED VIEW mv1 SWAP WITH mv2; + +# Verify materialized view contents +query II rowsort +SELECT * FROM mv1; +---- +2 2 + +query II rowsort +SELECT * FROM mv2; +---- +1 1 + +# Test view swap +statement ok +ALTER VIEW v1 SWAP WITH v2; + +# Verify view definitions are swapped +query TT +SHOW CREATE VIEW v1; +---- +public.v1 CREATE VIEW v1 AS SELECT t2.v2 FROM t1 AS t2 + +query TT +SHOW CREATE VIEW v2; +---- +public.v2 CREATE VIEW v2 AS SELECT t1.v1 FROM t2 AS t1 + +# Test source swap +statement ok +ALTER SOURCE src1 SWAP WITH src2; + +# Verify source definitions are swapped +query TT +SHOW CREATE SOURCE src1; +---- +public.src1 CREATE SOURCE src1 (v INT) WITH (connector = 'datagen', fields.v.kind = 'sequence', fields.v.start = '6', fields.v.end = '10', datagen.rows.per.second = '10', datagen.split.num = '1') FORMAT PLAIN ENCODE JSON + +query TT +SHOW CREATE SOURCE src2; +---- +public.src2 CREATE SOURCE src2 (v INT) WITH (connector = 'datagen', fields.v.kind = 'sequence', fields.v.start = '1', fields.v.end = '5', datagen.rows.per.second = '10', datagen.split.num = '1') FORMAT PLAIN ENCODE JSON + +# Test sink swap +statement ok +ALTER SINK sink1 SWAP WITH sink2; + +# Verify sink definitions are swapped +query TT +SHOW CREATE SINK sink1; +---- +public.sink1 CREATE SINK sink1 AS SELECT * FROM mv1 AS mv2 WITH (connector = 'blackhole') + +query TT +SHOW CREATE SINK sink2; +---- +public.sink2 CREATE SINK sink2 AS SELECT * FROM mv2 AS mv1 WITH (connector = 'blackhole') + +# Test subscription swap +statement ok +ALTER SUBSCRIPTION sub1 SWAP WITH sub2; + +# Verify subscription definitions are swapped +query TT +SHOW CREATE SUBSCRIPTION sub1; +---- +public.sub1 CREATE SUBSCRIPTION sub1 FROM mv1 WITH (retention = '1D') + +query TT +SHOW CREATE SUBSCRIPTION sub2; +---- +public.sub2 CREATE SUBSCRIPTION sub2 FROM mv2 WITH (retention = '1D') + +# Clean up +statement ok +DROP SOURCE src1; + +statement ok +DROP SOURCE src2; + +statement ok +DROP TABLE t1 CASCADE; + +statement ok +DROP TABLE t2 CASCADE; diff --git a/proto/ddl_service.proto b/proto/ddl_service.proto index de860593e810..6467bd6e1d7e 100644 --- a/proto/ddl_service.proto +++ b/proto/ddl_service.proto @@ -268,6 +268,26 @@ message AlterOwnerResponse { WaitVersion version = 2; } +message AlterSwapRenameRequest { + message ObjectNameSwapPair { + uint32 src_object_id = 1; + uint32 dst_object_id = 2; + } + oneof object { + ObjectNameSwapPair schema = 1; + ObjectNameSwapPair table = 2; + ObjectNameSwapPair view = 3; + ObjectNameSwapPair source = 4; + ObjectNameSwapPair sink = 5; + ObjectNameSwapPair subscription = 6; + } +} + +message AlterSwapRenameResponse { + common.Status status = 1; + WaitVersion version = 2; +} + message CreateFunctionRequest { catalog.Function function = 1; } @@ -513,4 +533,5 @@ service DdlService { rpc Wait(WaitRequest) returns (WaitResponse); rpc CommentOn(CommentOnRequest) returns (CommentOnResponse); rpc AutoSchemaChange(AutoSchemaChangeRequest) returns (AutoSchemaChangeResponse); + rpc AlterSwapRename(AlterSwapRenameRequest) returns (AlterSwapRenameResponse); } diff --git a/src/frontend/src/catalog/catalog_service.rs b/src/frontend/src/catalog/catalog_service.rs index 2d3fbd7e0178..271d395181df 100644 --- a/src/frontend/src/catalog/catalog_service.rs +++ b/src/frontend/src/catalog/catalog_service.rs @@ -25,8 +25,9 @@ use risingwave_pb::catalog::{ PbSubscription, PbTable, PbView, }; use risingwave_pb::ddl_service::{ - alter_name_request, alter_owner_request, alter_set_schema_request, create_connection_request, - PbReplaceTablePlan, PbTableJobType, ReplaceTablePlan, TableJobType, WaitVersion, + alter_name_request, alter_owner_request, alter_set_schema_request, alter_swap_rename_request, + create_connection_request, PbReplaceTablePlan, PbTableJobType, ReplaceTablePlan, TableJobType, + WaitVersion, }; use risingwave_pb::meta::PbTableParallelism; use risingwave_pb::stream_plan::StreamFragmentGraph; @@ -197,6 +198,8 @@ pub trait CatalogWriter: Send + Sync { object: alter_set_schema_request::Object, new_schema_id: u32, ) -> Result<()>; + + async fn alter_swap_rename(&self, object: alter_swap_rename_request::Object) -> Result<()>; } #[derive(Clone)] @@ -498,6 +501,11 @@ impl CatalogWriter for CatalogWriterImpl { Ok(()) } + + async fn alter_swap_rename(&self, object: alter_swap_rename_request::Object) -> Result<()> { + let version = self.meta_client.alter_swap_rename(object).await?; + self.wait_version(version).await + } } impl CatalogWriterImpl { diff --git a/src/frontend/src/catalog/schema_catalog.rs b/src/frontend/src/catalog/schema_catalog.rs index 2a14799ce9ab..86d0353f2907 100644 --- a/src/frontend/src/catalog/schema_catalog.rs +++ b/src/frontend/src/catalog/schema_catalog.rs @@ -115,10 +115,14 @@ impl SchemaCatalog { let table_ref = Arc::new(table); let old_table = self.table_by_id.get(&id).unwrap(); - // check if table name get updated. - if old_table.name() != name { + // check if the table name gets updated. + if old_table.name() != name + && let Some(t) = self.table_by_name.get(old_table.name()) + && t.id == id + { self.table_by_name.remove(old_table.name()); } + self.table_by_name.insert(name, table_ref.clone()); self.table_by_id.insert(id, table_ref.clone()); table_ref @@ -137,8 +141,11 @@ impl SchemaCatalog { let index: IndexCatalog = IndexCatalog::build_from(prost, index_table, primary_table); let index_ref = Arc::new(index); - // check if index name get updated. - if old_index.name != name { + // check if the index name gets updated. + if old_index.name != name + && let Some(idx) = self.index_by_name.get(&old_index.name) + && idx.id == id + { self.index_by_name.remove(&old_index.name); } self.index_by_name.insert(name, index_ref.clone()); @@ -245,8 +252,11 @@ impl SchemaCatalog { let source_ref = Arc::new(source); let old_source = self.source_by_id.get(&id).unwrap(); - // check if source name get updated. - if old_source.name != name { + // check if the source name gets updated. + if old_source.name != name + && let Some(src) = self.source_by_name.get(&old_source.name) + && src.id == id + { self.source_by_name.remove(&old_source.name); } @@ -294,8 +304,11 @@ impl SchemaCatalog { let sink_ref = Arc::new(sink); let old_sink = self.sink_by_id.get(&id).unwrap(); - // check if sink name get updated. - if old_sink.name != name { + // check if the sink name gets updated. + if old_sink.name != name + && let Some(s) = self.sink_by_name.get(&old_sink.name) + && s.id.sink_id == id + { self.sink_by_name.remove(&old_sink.name); } @@ -331,8 +344,11 @@ impl SchemaCatalog { let subscription_ref = Arc::new(subscription); let old_subscription = self.subscription_by_id.get(&id).unwrap(); - // check if subscription name get updated. - if old_subscription.name != name { + // check if the subscription name gets updated. + if old_subscription.name != name + && let Some(s) = self.subscription_by_name.get(&old_subscription.name) + && s.id.subscription_id == id + { self.subscription_by_name.remove(&old_subscription.name); } @@ -365,8 +381,11 @@ impl SchemaCatalog { let view_ref = Arc::new(view); let old_view = self.view_by_id.get(&id).unwrap(); - // check if view name get updated. - if old_view.name != name { + // check if the view name gets updated. + if old_view.name != name + && let Some(v) = self.view_by_name.get(old_view.name()) + && v.id == id + { self.view_by_name.remove(&old_view.name); } @@ -438,8 +457,11 @@ impl SchemaCatalog { .function_by_name .get_mut(&old_function_by_id.name) .unwrap(); - // check if function name get updated. - if old_function_by_id.name != name { + // check if the function name gets updated. + if old_function_by_id.name != name + && let Some(f) = old_function_by_name.get(&old_function_by_id.arg_types) + && f.id == id + { old_function_by_name.remove(&old_function_by_id.arg_types); if old_function_by_name.is_empty() { self.function_by_name.remove(&old_function_by_id.name); @@ -473,8 +495,11 @@ impl SchemaCatalog { let connection_ref = Arc::new(connection); let old_connection = self.connection_by_id.get(&id).unwrap(); - // check if connection name get updated. - if old_connection.name != name { + // check if the connection name gets updated. + if old_connection.name != name + && let Some(conn) = self.connection_by_name.get(&old_connection.name) + && conn.id == id + { self.connection_by_name.remove(&old_connection.name); } @@ -513,8 +538,11 @@ impl SchemaCatalog { let secret_ref = Arc::new(secret); let old_secret = self.secret_by_id.get(&id).unwrap(); - // check if secret name get updated. - if old_secret.name != name { + // check if the secret name gets updated. + if old_secret.name != name + && let Some(s) = self.secret_by_name.get(&old_secret.name) + && s.id == id + { self.secret_by_name.remove(&old_secret.name); } diff --git a/src/frontend/src/handler/alter_swap_rename.rs b/src/frontend/src/handler/alter_swap_rename.rs new file mode 100644 index 000000000000..a1d23484576f --- /dev/null +++ b/src/frontend/src/handler/alter_swap_rename.rs @@ -0,0 +1,179 @@ +// Copyright 2024 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use pgwire::pg_response::StatementType; +use risingwave_common::bail_not_implemented; +use risingwave_pb::ddl_service::alter_swap_rename_request; +use risingwave_pb::ddl_service::alter_swap_rename_request::ObjectNameSwapPair; +use risingwave_sqlparser::ast::ObjectName; + +use crate::catalog::root_catalog::SchemaPath; +use crate::catalog::CatalogError; +use crate::error::{ErrorCode, Result}; +use crate::handler::{HandlerArgs, RwPgResponse}; +use crate::session::SessionImpl; +use crate::user::UserId; +use crate::Binder; + +/// Check if the session user has the privilege to swap and rename the objects. +fn check_swap_rename_privilege( + session_impl: &Arc, + src_owner: UserId, + target_owner: UserId, +) -> Result<()> { + if !session_impl.is_super_user() + && (src_owner != session_impl.user_id() || target_owner != session_impl.user_id()) + { + return Err(ErrorCode::PermissionDenied(format!( + "{} is not super user and not the owner of the objects.", + session_impl.user_name() + )) + .into()); + } + Ok(()) +} + +pub async fn handle_swap_rename( + handler_args: HandlerArgs, + source_object: ObjectName, + target_object: ObjectName, + stmt_type: StatementType, +) -> Result { + let session = handler_args.session; + let db_name = session.database(); + let (src_schema_name, src_obj_name) = + Binder::resolve_schema_qualified_name(db_name, source_object)?; + let search_path = session.config().search_path(); + let user_name = &session.auth_context().user_name; + let src_schema_path = SchemaPath::new(src_schema_name.as_deref(), &search_path, user_name); + let (target_schema_name, target_obj_name) = + Binder::resolve_schema_qualified_name(db_name, target_object)?; + let target_schema_path = + SchemaPath::new(target_schema_name.as_deref(), &search_path, user_name); + + let obj = match stmt_type { + StatementType::ALTER_SCHEMA => { + // TODO: support it until resolves https://github.com/risingwavelabs/risingwave/issues/19028 + bail_not_implemented!("ALTER SCHEMA SWAP WITH is not supported yet"); + } + StatementType::ALTER_TABLE | StatementType::ALTER_MATERIALIZED_VIEW => { + let catalog_reader = session.env().catalog_reader().read_guard(); + let (src_table, _) = catalog_reader.get_created_table_by_name( + db_name, + src_schema_path, + &src_obj_name, + )?; + let (target_table, _) = catalog_reader.get_created_table_by_name( + db_name, + target_schema_path, + &target_obj_name, + )?; + + if src_table.table_type != target_table.table_type { + return Err(ErrorCode::PermissionDenied(format!( + "cannot swap between {} and {}: type mismatch", + src_obj_name, target_obj_name + )) + .into()); + } + if stmt_type == StatementType::ALTER_TABLE && !src_table.is_table() { + return Err(CatalogError::NotFound("table", src_obj_name.to_string()).into()); + } else if stmt_type == StatementType::ALTER_MATERIALIZED_VIEW && !src_table.is_mview() { + return Err( + CatalogError::NotFound("materialized view", src_obj_name.to_string()).into(), + ); + } + + check_swap_rename_privilege(&session, src_table.owner, target_table.owner)?; + + alter_swap_rename_request::Object::Table(ObjectNameSwapPair { + src_object_id: src_table.id.table_id, + dst_object_id: target_table.id.table_id, + }) + } + StatementType::ALTER_VIEW => { + let catalog_reader = session.env().catalog_reader().read_guard(); + let (src_view, _) = + catalog_reader.get_view_by_name(db_name, src_schema_path, &src_obj_name)?; + let (target_view, _) = + catalog_reader.get_view_by_name(db_name, target_schema_path, &target_obj_name)?; + check_swap_rename_privilege(&session, src_view.owner, target_view.owner)?; + + alter_swap_rename_request::Object::View(ObjectNameSwapPair { + src_object_id: src_view.id, + dst_object_id: target_view.id, + }) + } + StatementType::ALTER_SOURCE => { + let catalog_reader = session.env().catalog_reader().read_guard(); + let (src_source, _) = + catalog_reader.get_source_by_name(db_name, src_schema_path, &src_obj_name)?; + let (target_source, _) = + catalog_reader.get_source_by_name(db_name, target_schema_path, &target_obj_name)?; + check_swap_rename_privilege(&session, src_source.owner, target_source.owner)?; + + alter_swap_rename_request::Object::Source(ObjectNameSwapPair { + src_object_id: src_source.id, + dst_object_id: target_source.id, + }) + } + StatementType::ALTER_SINK => { + let catalog_reader = session.env().catalog_reader().read_guard(); + let (src_sink, _) = + catalog_reader.get_sink_by_name(db_name, src_schema_path, &src_obj_name)?; + let (target_sink, _) = + catalog_reader.get_sink_by_name(db_name, target_schema_path, &target_obj_name)?; + check_swap_rename_privilege( + &session, + src_sink.owner.user_id, + target_sink.owner.user_id, + )?; + + alter_swap_rename_request::Object::Sink(ObjectNameSwapPair { + src_object_id: src_sink.id.sink_id, + dst_object_id: target_sink.id.sink_id, + }) + } + StatementType::ALTER_SUBSCRIPTION => { + let catalog_reader = session.env().catalog_reader().read_guard(); + let (src_subscription, _) = + catalog_reader.get_subscription_by_name(db_name, src_schema_path, &src_obj_name)?; + let (target_subscription, _) = catalog_reader.get_subscription_by_name( + db_name, + target_schema_path, + &target_obj_name, + )?; + check_swap_rename_privilege( + &session, + src_subscription.owner.user_id, + target_subscription.owner.user_id, + )?; + + alter_swap_rename_request::Object::Subscription(ObjectNameSwapPair { + src_object_id: src_subscription.id.subscription_id, + dst_object_id: target_subscription.id.subscription_id, + }) + } + _ => { + unreachable!("handle_swap_rename: unsupported statement type") + } + }; + + let catalog_writer = session.catalog_writer()?; + catalog_writer.alter_swap_rename(obj).await?; + + Ok(RwPgResponse::empty_result(stmt_type)) +} diff --git a/src/frontend/src/handler/mod.rs b/src/frontend/src/handler/mod.rs index d9a190ca319e..9cf94a37c65b 100644 --- a/src/frontend/src/handler/mod.rs +++ b/src/frontend/src/handler/mod.rs @@ -46,6 +46,7 @@ mod alter_set_schema; mod alter_source_column; mod alter_source_with_sr; mod alter_streaming_rate_limit; +mod alter_swap_rename; mod alter_system; mod alter_table_column; mod alter_table_with_sr; @@ -637,6 +638,18 @@ pub async fn handle( ) .await } + Statement::AlterSchema { + name, + operation: AlterSchemaOperation::SwapRenameSchema { target_schema }, + } => { + alter_swap_rename::handle_swap_rename( + handler_args, + name, + target_schema, + StatementType::ALTER_SCHEMA, + ) + .await + } Statement::AlterTable { name, operation: @@ -720,6 +733,18 @@ pub async fn handle( ) .await } + Statement::AlterTable { + name, + operation: AlterTableOperation::SwapRenameTable { target_table }, + } => { + alter_swap_rename::handle_swap_rename( + handler_args, + name, + target_table, + StatementType::ALTER_TABLE, + ) + .await + } Statement::AlterIndex { name, operation: AlterIndexOperation::RenameIndex { index_name }, @@ -837,6 +862,19 @@ pub async fn handle( ) .await } + Statement::AlterView { + materialized, + name, + operation: AlterViewOperation::SwapRenameView { target_view }, + } => { + let statement_type = if materialized { + StatementType::ALTER_MATERIALIZED_VIEW + } else { + StatementType::ALTER_VIEW + }; + alter_swap_rename::handle_swap_rename(handler_args, name, target_view, statement_type) + .await + } Statement::AlterSink { name, operation: AlterSinkOperation::RenameSink { sink_name }, @@ -884,6 +922,18 @@ pub async fn handle( ) .await } + Statement::AlterSink { + name, + operation: AlterSinkOperation::SwapRenameSink { target_sink }, + } => { + alter_swap_rename::handle_swap_rename( + handler_args, + name, + target_sink, + StatementType::ALTER_SINK, + ) + .await + } Statement::AlterSubscription { name, operation: AlterSubscriptionOperation::RenameSubscription { subscription_name }, @@ -913,6 +963,21 @@ pub async fn handle( ) .await } + Statement::AlterSubscription { + name, + operation: + AlterSubscriptionOperation::SwapRenameSubscription { + target_subscription, + }, + } => { + alter_swap_rename::handle_swap_rename( + handler_args, + name, + target_subscription, + StatementType::ALTER_SUBSCRIPTION, + ) + .await + } Statement::AlterSource { name, operation: AlterSourceOperation::RenameSource { source_name }, @@ -969,6 +1034,18 @@ pub async fn handle( ) .await } + Statement::AlterSource { + name, + operation: AlterSourceOperation::SwapRenameSource { target_source }, + } => { + alter_swap_rename::handle_swap_rename( + handler_args, + name, + target_source, + StatementType::ALTER_SOURCE, + ) + .await + } Statement::AlterFunction { name, args, diff --git a/src/frontend/src/test_utils.rs b/src/frontend/src/test_utils.rs index ae5201427b56..d94b1dd2652d 100644 --- a/src/frontend/src/test_utils.rs +++ b/src/frontend/src/test_utils.rs @@ -44,8 +44,8 @@ use risingwave_pb::catalog::{ use risingwave_pb::common::WorkerNode; use risingwave_pb::ddl_service::alter_owner_request::Object; use risingwave_pb::ddl_service::{ - alter_name_request, alter_set_schema_request, create_connection_request, DdlProgress, - PbTableJobType, ReplaceTablePlan, TableJobType, + alter_name_request, alter_set_schema_request, alter_swap_rename_request, + create_connection_request, DdlProgress, PbTableJobType, ReplaceTablePlan, TableJobType, }; use risingwave_pb::hummock::write_limits::WriteLimit; use risingwave_pb::hummock::{ @@ -645,6 +645,10 @@ impl CatalogWriter for MockCatalogWriter { ) -> Result<()> { todo!() } + + async fn alter_swap_rename(&self, _object: alter_swap_rename_request::Object) -> Result<()> { + todo!() + } } impl MockCatalogWriter { diff --git a/src/meta/service/src/ddl_service.rs b/src/meta/service/src/ddl_service.rs index 71b45b1887ef..1d0cc36fff3f 100644 --- a/src/meta/service/src/ddl_service.rs +++ b/src/meta/service/src/ddl_service.rs @@ -1013,6 +1013,23 @@ impl DdlService for DdlServiceImpl { Ok(Response::new(AutoSchemaChangeResponse {})) } + + async fn alter_swap_rename( + &self, + request: Request, + ) -> Result, Status> { + let req = request.into_inner(); + + let version = self + .ddl_controller + .run_command(DdlCommand::AlterSwapRename(req.object.unwrap())) + .await?; + + Ok(Response::new(AlterSwapRenameResponse { + status: None, + version, + })) + } } fn add_auto_schema_change_fail_event_log( diff --git a/src/meta/src/controller/catalog.rs b/src/meta/src/controller/catalog.rs index bcff662f94f0..1f7324d97493 100644 --- a/src/meta/src/controller/catalog.rs +++ b/src/meta/src/controller/catalog.rs @@ -63,8 +63,10 @@ use tokio::sync::oneshot::Sender; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use tracing::info; -use super::utils::{check_subscription_name_duplicate, get_fragment_ids_by_jobs}; -use crate::controller::rename::{alter_relation_rename, alter_relation_rename_refs}; +use super::utils::{ + check_subscription_name_duplicate, get_fragment_ids_by_jobs, rename_relation, + rename_relation_refer, +}; use crate::controller::utils::{ build_relation_group_for_delete, check_connection_name_duplicate, check_database_name_duplicate, check_function_signature_duplicate, @@ -2483,139 +2485,104 @@ impl CatalogController { ) .await?; - let mut to_update_relations = vec![]; // rename relation. - macro_rules! rename_relation { - ($entity:ident, $table:ident, $identity:ident, $object_id:expr) => {{ - let (mut relation, obj) = $entity::find_by_id($object_id) - .find_also_related(Object) - .one(&txn) - .await? - .unwrap(); - let obj = obj.unwrap(); - let old_name = relation.name.clone(); - relation.name = object_name.into(); - if obj.obj_type != ObjectType::View { - relation.definition = alter_relation_rename(&relation.definition, object_name); - } - let active_model = $table::ActiveModel { - $identity: Set(relation.$identity), - name: Set(object_name.into()), - definition: Set(relation.definition.clone()), - ..Default::default() - }; - active_model.update(&txn).await?; - to_update_relations.push(PbRelation { - relation_info: Some(PbRelationInfo::$entity(ObjectModel(relation, obj).into())), - }); - old_name - }}; - } + let (mut to_update_relations, old_name) = + rename_relation(&txn, object_type, object_id, object_name).await?; + // rename referring relation name. + to_update_relations.extend( + rename_relation_refer(&txn, object_type, object_id, object_name, &old_name).await?, + ); - // TODO: check is there any thing to change for shared source? - let old_name = match object_type { - ObjectType::Table => rename_relation!(Table, table, table_id, object_id), - ObjectType::Source => rename_relation!(Source, source, source_id, object_id), - ObjectType::Sink => rename_relation!(Sink, sink, sink_id, object_id), - ObjectType::Subscription => { - rename_relation!(Subscription, subscription, subscription_id, object_id) - } - ObjectType::View => rename_relation!(View, view, view_id, object_id), - ObjectType::Index => { - let (mut index, obj) = Index::find_by_id(object_id) - .find_also_related(Object) - .one(&txn) - .await? - .unwrap(); - index.name = object_name.into(); - let index_table_id = index.index_table_id; - let old_name = rename_relation!(Table, table, table_id, index_table_id); - - // the name of index and its associated table is the same. - let active_model = index::ActiveModel { - index_id: Set(index.index_id), - name: Set(object_name.into()), - ..Default::default() - }; - active_model.update(&txn).await?; - to_update_relations.push(PbRelation { - relation_info: Some(PbRelationInfo::Index( - ObjectModel(index, obj.unwrap()).into(), - )), - }); - old_name - } - _ => unreachable!("only relation name can be altered."), - }; + txn.commit().await?; - // rename referring relation name. - macro_rules! rename_relation_ref { - ($entity:ident, $table:ident, $identity:ident, $object_id:expr) => {{ - let (mut relation, obj) = $entity::find_by_id($object_id) - .find_also_related(Object) - .one(&txn) - .await? - .unwrap(); - relation.definition = - alter_relation_rename_refs(&relation.definition, &old_name, object_name); - let active_model = $table::ActiveModel { - $identity: Set(relation.$identity), - definition: Set(relation.definition.clone()), - ..Default::default() - }; - active_model.update(&txn).await?; - to_update_relations.push(PbRelation { - relation_info: Some(PbRelationInfo::$entity( - ObjectModel(relation, obj.unwrap()).into(), - )), - }); - }}; - } - let mut objs = get_referring_objects(object_id, &txn).await?; - if object_type == ObjectType::Table { - let incoming_sinks: I32Array = Table::find_by_id(object_id) + let version = self + .notify_frontend( + NotificationOperation::Update, + NotificationInfo::RelationGroup(PbRelationGroup { + relations: to_update_relations, + }), + ) + .await; + + Ok(version) + } + + pub async fn alter_swap_rename( + &self, + object_type: ObjectType, + object_id: ObjectId, + dst_object_id: ObjectId, + ) -> MetaResult { + let inner = self.inner.write().await; + let txn = inner.db.begin().await?; + let dst_name: String = match object_type { + ObjectType::Table => Table::find_by_id(dst_object_id) .select_only() - .column(table::Column::IncomingSinks) + .column(table::Column::Name) .into_tuple() .one(&txn) .await? - .ok_or_else(|| MetaError::catalog_id_not_found("table", object_id))?; + .ok_or_else(|| { + MetaError::catalog_id_not_found(object_type.as_str(), dst_object_id) + })?, + ObjectType::Source => Source::find_by_id(dst_object_id) + .select_only() + .column(source::Column::Name) + .into_tuple() + .one(&txn) + .await? + .ok_or_else(|| { + MetaError::catalog_id_not_found(object_type.as_str(), dst_object_id) + })?, + ObjectType::Sink => Sink::find_by_id(dst_object_id) + .select_only() + .column(sink::Column::Name) + .into_tuple() + .one(&txn) + .await? + .ok_or_else(|| { + MetaError::catalog_id_not_found(object_type.as_str(), dst_object_id) + })?, + ObjectType::View => View::find_by_id(dst_object_id) + .select_only() + .column(view::Column::Name) + .into_tuple() + .one(&txn) + .await? + .ok_or_else(|| { + MetaError::catalog_id_not_found(object_type.as_str(), dst_object_id) + })?, + ObjectType::Subscription => Subscription::find_by_id(dst_object_id) + .select_only() + .column(subscription::Column::Name) + .into_tuple() + .one(&txn) + .await? + .ok_or_else(|| { + MetaError::catalog_id_not_found(object_type.as_str(), dst_object_id) + })?, + _ => { + return Err(MetaError::permission_denied(format!( + "swap rename not supported for object type: {:?}", + object_type + ))); + } + }; - objs.extend( - incoming_sinks - .into_inner() - .into_iter() - .map(|id| PartialObject { - oid: id, - obj_type: ObjectType::Sink, - schema_id: None, - database_id: None, - }), - ); - } + // rename relations. + let (mut to_update_relations, src_name) = + rename_relation(&txn, object_type, object_id, &dst_name).await?; + let (to_update_relations2, _) = + rename_relation(&txn, object_type, dst_object_id, &src_name).await?; + to_update_relations.extend(to_update_relations2); + // rename referring relation name. + to_update_relations.extend( + rename_relation_refer(&txn, object_type, object_id, &dst_name, &src_name).await?, + ); + to_update_relations.extend( + rename_relation_refer(&txn, object_type, dst_object_id, &src_name, &dst_name).await?, + ); - for obj in objs { - match obj.obj_type { - ObjectType::Table => rename_relation_ref!(Table, table, table_id, obj.oid), - ObjectType::Sink => rename_relation_ref!(Sink, sink, sink_id, obj.oid), - ObjectType::Subscription => { - rename_relation_ref!(Subscription, subscription, subscription_id, obj.oid) - } - ObjectType::View => rename_relation_ref!(View, view, view_id, obj.oid), - ObjectType::Index => { - let index_table_id: Option = Index::find_by_id(obj.oid) - .select_only() - .column(index::Column::IndexTableId) - .into_tuple() - .one(&txn) - .await?; - rename_relation_ref!(Table, table, table_id, index_table_id.unwrap()); - } - _ => { - bail!("only table, sink, subscription, view and index depend on other objects.") - } - } - } txn.commit().await?; let version = self diff --git a/src/meta/src/controller/utils.rs b/src/meta/src/controller/utils.rs index 2240f701a47b..7a2158b8584d 100644 --- a/src/meta/src/controller/utils.rs +++ b/src/meta/src/controller/utils.rs @@ -17,8 +17,8 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use anyhow::{anyhow, Context}; use itertools::Itertools; use risingwave_common::bitmap::Bitmap; -use risingwave_common::hash; use risingwave_common::hash::{ActorMapping, WorkerSlotId, WorkerSlotMapping}; +use risingwave_common::{bail, hash}; use risingwave_meta_model::actor::ActorStatus; use risingwave_meta_model::fragment::DistributionType; use risingwave_meta_model::object::ObjectType; @@ -27,7 +27,7 @@ use risingwave_meta_model::{ actor, actor_dispatcher, connection, database, fragment, function, index, object, object_dependency, schema, secret, sink, source, subscription, table, user, user_privilege, view, ActorId, ConnectorSplits, DataTypeArray, DatabaseId, FragmentId, I32Array, ObjectId, - PrivilegeId, SchemaId, SourceId, StreamNode, UserId, VnodeBitmap, WorkerId, + PrivilegeId, SchemaId, SourceId, StreamNode, TableId, UserId, VnodeBitmap, WorkerId, }; use risingwave_meta_model_migration::WithQuery; use risingwave_pb::catalog::{ @@ -49,12 +49,15 @@ use sea_orm::sea_query::{ WithClause, }; use sea_orm::{ - ColumnTrait, ConnectionTrait, DerivePartialModel, EntityTrait, FromQueryResult, JoinType, - Order, PaginatorTrait, QueryFilter, QuerySelect, RelationTrait, Statement, + ColumnTrait, ConnectionTrait, DatabaseTransaction, DerivePartialModel, EntityTrait, + FromQueryResult, JoinType, Order, PaginatorTrait, QueryFilter, QuerySelect, RelationTrait, Set, + Statement, }; use thiserror_ext::AsReport; +use crate::controller::ObjectModel; use crate::{MetaError, MetaResult}; + /// This function will construct a query using recursive cte to find all objects[(id, `obj_type`)] that are used by the given object. /// /// # Examples @@ -1177,6 +1180,170 @@ pub fn extract_external_table_name_from_definition(table_definition: &str) -> Op } } +/// `rename_relation` renames the target relation and its definition, +/// it commits the changes to the transaction and returns the updated relations and the old name. +pub async fn rename_relation( + txn: &DatabaseTransaction, + object_type: ObjectType, + object_id: ObjectId, + object_name: &str, +) -> MetaResult<(Vec, String)> { + use sea_orm::ActiveModelTrait; + + use crate::controller::rename::alter_relation_rename; + + let mut to_update_relations = vec![]; + // rename relation. + macro_rules! rename_relation { + ($entity:ident, $table:ident, $identity:ident, $object_id:expr) => {{ + let (mut relation, obj) = $entity::find_by_id($object_id) + .find_also_related(Object) + .one(txn) + .await? + .unwrap(); + let obj = obj.unwrap(); + let old_name = relation.name.clone(); + relation.name = object_name.into(); + if obj.obj_type != ObjectType::View { + relation.definition = alter_relation_rename(&relation.definition, object_name); + } + let active_model = $table::ActiveModel { + $identity: Set(relation.$identity), + name: Set(object_name.into()), + definition: Set(relation.definition.clone()), + ..Default::default() + }; + active_model.update(txn).await?; + to_update_relations.push(PbRelation { + relation_info: Some(PbRelationInfo::$entity(ObjectModel(relation, obj).into())), + }); + old_name + }}; + } + // TODO: check is there any thing to change for shared source? + let old_name = match object_type { + ObjectType::Table => rename_relation!(Table, table, table_id, object_id), + ObjectType::Source => rename_relation!(Source, source, source_id, object_id), + ObjectType::Sink => rename_relation!(Sink, sink, sink_id, object_id), + ObjectType::Subscription => { + rename_relation!(Subscription, subscription, subscription_id, object_id) + } + ObjectType::View => rename_relation!(View, view, view_id, object_id), + ObjectType::Index => { + let (mut index, obj) = Index::find_by_id(object_id) + .find_also_related(Object) + .one(txn) + .await? + .unwrap(); + index.name = object_name.into(); + let index_table_id = index.index_table_id; + let old_name = rename_relation!(Table, table, table_id, index_table_id); + + // the name of index and its associated table is the same. + let active_model = index::ActiveModel { + index_id: sea_orm::ActiveValue::Set(index.index_id), + name: sea_orm::ActiveValue::Set(object_name.into()), + ..Default::default() + }; + active_model.update(txn).await?; + to_update_relations.push(PbRelation { + relation_info: Some(PbRelationInfo::Index( + ObjectModel(index, obj.unwrap()).into(), + )), + }); + old_name + } + _ => unreachable!("only relation name can be altered."), + }; + + Ok((to_update_relations, old_name)) +} + +/// `rename_relation_refer` updates the definition of relations that refer to the target one, +/// it commits the changes to the transaction and returns all the updated relations. +pub async fn rename_relation_refer( + txn: &DatabaseTransaction, + object_type: ObjectType, + object_id: ObjectId, + object_name: &str, + old_name: &str, +) -> MetaResult> { + use sea_orm::ActiveModelTrait; + + use crate::controller::rename::alter_relation_rename_refs; + + let mut to_update_relations = vec![]; + macro_rules! rename_relation_ref { + ($entity:ident, $table:ident, $identity:ident, $object_id:expr) => {{ + let (mut relation, obj) = $entity::find_by_id($object_id) + .find_also_related(Object) + .one(txn) + .await? + .unwrap(); + relation.definition = + alter_relation_rename_refs(&relation.definition, old_name, object_name); + let active_model = $table::ActiveModel { + $identity: Set(relation.$identity), + definition: Set(relation.definition.clone()), + ..Default::default() + }; + active_model.update(txn).await?; + to_update_relations.push(PbRelation { + relation_info: Some(PbRelationInfo::$entity( + ObjectModel(relation, obj.unwrap()).into(), + )), + }); + }}; + } + let mut objs = get_referring_objects(object_id, txn).await?; + if object_type == ObjectType::Table { + let incoming_sinks: I32Array = Table::find_by_id(object_id) + .select_only() + .column(table::Column::IncomingSinks) + .into_tuple() + .one(txn) + .await? + .ok_or_else(|| MetaError::catalog_id_not_found("table", object_id))?; + + objs.extend( + incoming_sinks + .into_inner() + .into_iter() + .map(|id| PartialObject { + oid: id, + obj_type: ObjectType::Sink, + schema_id: None, + database_id: None, + }), + ); + } + + for obj in objs { + match obj.obj_type { + ObjectType::Table => rename_relation_ref!(Table, table, table_id, obj.oid), + ObjectType::Sink => rename_relation_ref!(Sink, sink, sink_id, obj.oid), + ObjectType::Subscription => { + rename_relation_ref!(Subscription, subscription, subscription_id, obj.oid) + } + ObjectType::View => rename_relation_ref!(View, view, view_id, obj.oid), + ObjectType::Index => { + let index_table_id: Option = Index::find_by_id(obj.oid) + .select_only() + .column(index::Column::IndexTableId) + .into_tuple() + .one(txn) + .await?; + rename_relation_ref!(Table, table, table_id, index_table_id.unwrap()); + } + _ => { + bail!("only table, sink, subscription, view and index depend on other objects.") + } + } + } + + Ok(to_update_relations) +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/meta/src/rpc/ddl_controller.rs b/src/meta/src/rpc/ddl_controller.rs index d787be50f0ba..f0cbd1d5a477 100644 --- a/src/meta/src/rpc/ddl_controller.rs +++ b/src/meta/src/rpc/ddl_controller.rs @@ -48,7 +48,8 @@ use risingwave_pb::catalog::{ }; use risingwave_pb::ddl_service::alter_owner_request::Object; use risingwave_pb::ddl_service::{ - alter_name_request, alter_set_schema_request, DdlProgress, TableJobType, WaitVersion, + alter_name_request, alter_set_schema_request, alter_swap_rename_request, DdlProgress, + TableJobType, WaitVersion, }; use risingwave_pb::meta::table_fragments::fragment::FragmentDistributionType; use risingwave_pb::meta::table_fragments::PbFragment; @@ -141,6 +142,7 @@ pub enum DdlCommand { ), DropStreamingJob(StreamingJobId, DropMode, Option), AlterName(alter_name_request::Object, String), + AlterSwapRename(alter_swap_rename_request::Object), ReplaceTable(ReplaceTableInfo), AlterSourceColumn(Source), AlterObjectOwner(Object, UserId), @@ -339,6 +341,7 @@ impl DdlController { DdlCommand::DropSubscription(subscription_id, drop_mode) => { ctrl.drop_subscription(subscription_id, drop_mode).await } + DdlCommand::AlterSwapRename(objects) => ctrl.alter_swap_rename(objects).await, } } .in_current_span(); @@ -1878,6 +1881,55 @@ impl DdlController { .await } + async fn alter_swap_rename( + &self, + object: alter_swap_rename_request::Object, + ) -> MetaResult { + let (obj_type, src_id, dst_id) = match object { + alter_swap_rename_request::Object::Schema(_) => unimplemented!("schema swap"), + alter_swap_rename_request::Object::Table(objs) => { + let (src_id, dst_id) = ( + objs.src_object_id as ObjectId, + objs.dst_object_id as ObjectId, + ); + (ObjectType::Table, src_id, dst_id) + } + alter_swap_rename_request::Object::View(objs) => { + let (src_id, dst_id) = ( + objs.src_object_id as ObjectId, + objs.dst_object_id as ObjectId, + ); + (ObjectType::View, src_id, dst_id) + } + alter_swap_rename_request::Object::Source(objs) => { + let (src_id, dst_id) = ( + objs.src_object_id as ObjectId, + objs.dst_object_id as ObjectId, + ); + (ObjectType::Source, src_id, dst_id) + } + alter_swap_rename_request::Object::Sink(objs) => { + let (src_id, dst_id) = ( + objs.src_object_id as ObjectId, + objs.dst_object_id as ObjectId, + ); + (ObjectType::Sink, src_id, dst_id) + } + alter_swap_rename_request::Object::Subscription(objs) => { + let (src_id, dst_id) = ( + objs.src_object_id as ObjectId, + objs.dst_object_id as ObjectId, + ); + (ObjectType::Subscription, src_id, dst_id) + } + }; + + self.metadata_manager + .catalog_controller + .alter_swap_rename(obj_type, src_id, dst_id) + .await + } + async fn alter_owner( &self, object: Object, diff --git a/src/rpc_client/src/meta_client.rs b/src/rpc_client/src/meta_client.rs index 0b096e53ddfa..24375d395a06 100644 --- a/src/rpc_client/src/meta_client.rs +++ b/src/rpc_client/src/meta_client.rs @@ -573,6 +573,19 @@ impl MetaClient { Ok(()) } + pub async fn alter_swap_rename( + &self, + object: alter_swap_rename_request::Object, + ) -> Result { + let request = AlterSwapRenameRequest { + object: Some(object), + }; + let resp = self.inner.alter_swap_rename(request).await?; + Ok(resp + .version + .ok_or_else(|| anyhow!("wait version not set"))?) + } + pub async fn replace_table( &self, source: Option, @@ -2096,6 +2109,7 @@ macro_rules! for_all_meta_rpc { ,{ ddl_client, get_tables, GetTablesRequest, GetTablesResponse } ,{ ddl_client, wait, WaitRequest, WaitResponse } ,{ ddl_client, auto_schema_change, AutoSchemaChangeRequest, AutoSchemaChangeResponse } + ,{ ddl_client, alter_swap_rename, AlterSwapRenameRequest, AlterSwapRenameResponse } ,{ hummock_client, unpin_version_before, UnpinVersionBeforeRequest, UnpinVersionBeforeResponse } ,{ hummock_client, get_current_version, GetCurrentVersionRequest, GetCurrentVersionResponse } ,{ hummock_client, replay_version_delta, ReplayVersionDeltaRequest, ReplayVersionDeltaResponse } diff --git a/src/sqlparser/src/ast/ddl.rs b/src/sqlparser/src/ast/ddl.rs index 15ac6a9d2762..d94cf80cb9f2 100644 --- a/src/sqlparser/src/ast/ddl.rs +++ b/src/sqlparser/src/ast/ddl.rs @@ -38,6 +38,7 @@ pub enum AlterDatabaseOperation { pub enum AlterSchemaOperation { ChangeOwner { new_owner_name: Ident }, RenameSchema { schema_name: ObjectName }, + SwapRenameSchema { target_schema: ObjectName }, } /// An `ALTER TABLE` (`Statement::AlterTable`) operation @@ -110,6 +111,10 @@ pub enum AlterTableOperation { SetBackfillRateLimit { rate_limit: i32, }, + /// `SWAP WITH ` + SwapRenameTable { + target_table: ObjectName, + }, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -146,6 +151,10 @@ pub enum AlterViewOperation { SetBackfillRateLimit { rate_limit: i32, }, + /// `SWAP WITH ` + SwapRenameView { + target_view: ObjectName, + }, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -165,6 +174,10 @@ pub enum AlterSinkOperation { parallelism: SetVariableValue, deferred: bool, }, + /// `SWAP WITH ` + SwapRenameSink { + target_sink: ObjectName, + }, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -173,6 +186,7 @@ pub enum AlterSubscriptionOperation { RenameSubscription { subscription_name: ObjectName }, ChangeOwner { new_owner_name: Ident }, SetSchema { new_schema_name: ObjectName }, + SwapRenameSubscription { target_subscription: ObjectName }, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -185,6 +199,7 @@ pub enum AlterSourceOperation { FormatEncode { format_encode: FormatEncodeOptions }, RefreshSchema, SetSourceRateLimit { rate_limit: i32 }, + SwapRenameSource { target_source: ObjectName }, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -221,6 +236,9 @@ impl fmt::Display for AlterSchemaOperation { AlterSchemaOperation::RenameSchema { schema_name } => { write!(f, "RENAME TO {}", schema_name) } + AlterSchemaOperation::SwapRenameSchema { target_schema } => { + write!(f, "SWAP WITH {}", target_schema) + } } } } @@ -300,6 +318,9 @@ impl fmt::Display for AlterTableOperation { AlterTableOperation::SetBackfillRateLimit { rate_limit } => { write!(f, "SET BACKFILL_RATE_LIMIT TO {}", rate_limit) } + AlterTableOperation::SwapRenameTable { target_table } => { + write!(f, "SWAP WITH {}", target_table) + } } } } @@ -351,6 +372,9 @@ impl fmt::Display for AlterViewOperation { AlterViewOperation::SetBackfillRateLimit { rate_limit } => { write!(f, "SET BACKFILL_RATE_LIMIT TO {}", rate_limit) } + AlterViewOperation::SwapRenameView { target_view } => { + write!(f, "SWAP WITH {}", target_view) + } } } } @@ -378,6 +402,9 @@ impl fmt::Display for AlterSinkOperation { if *deferred { " DEFERRED" } else { "" } ) } + AlterSinkOperation::SwapRenameSink { target_sink } => { + write!(f, "SWAP WITH {}", target_sink) + } } } } @@ -394,6 +421,11 @@ impl fmt::Display for AlterSubscriptionOperation { AlterSubscriptionOperation::SetSchema { new_schema_name } => { write!(f, "SET SCHEMA {}", new_schema_name) } + AlterSubscriptionOperation::SwapRenameSubscription { + target_subscription, + } => { + write!(f, "SWAP WITH {}", target_subscription) + } } } } @@ -422,6 +454,9 @@ impl fmt::Display for AlterSourceOperation { AlterSourceOperation::SetSourceRateLimit { rate_limit } => { write!(f, "SET SOURCE_RATE_LIMIT TO {}", rate_limit) } + AlterSourceOperation::SwapRenameSource { target_source } => { + write!(f, "SWAP WITH {}", target_source) + } } } } diff --git a/src/sqlparser/src/keywords.rs b/src/sqlparser/src/keywords.rs index 8ec7191f749c..93e47d7a6b11 100644 --- a/src/sqlparser/src/keywords.rs +++ b/src/sqlparser/src/keywords.rs @@ -503,6 +503,7 @@ define_keywords!( SUCCEEDS, SUM, SUPERUSER, + SWAP, SYMMETRIC, SYNC, SYSTEM, diff --git a/src/sqlparser/src/parser.rs b/src/sqlparser/src/parser.rs index b383869c0d3d..d5582f31a64d 100644 --- a/src/sqlparser/src/parser.rs +++ b/src/sqlparser/src/parser.rs @@ -3096,8 +3096,11 @@ impl Parser<'_> { self.expect_keyword(Keyword::TO)?; let schema_name = self.parse_object_name()?; AlterSchemaOperation::RenameSchema { schema_name } + } else if self.parse_keywords(&[Keyword::SWAP, Keyword::WITH]) { + let target_schema = self.parse_object_name()?; + AlterSchemaOperation::SwapRenameSchema { target_schema } } else { - return self.expected("RENAME OR OWNER TO after ALTER SCHEMA"); + return self.expected("RENAME, OWNER TO, OR SWAP WITH after ALTER SCHEMA"); }; Ok(Statement::AlterSchema { @@ -3216,8 +3219,12 @@ impl Parser<'_> { AlterTableOperation::AlterColumn { column_name, op } } else if self.parse_keywords(&[Keyword::REFRESH, Keyword::SCHEMA]) { AlterTableOperation::RefreshSchema + } else if self.parse_keywords(&[Keyword::SWAP, Keyword::WITH]) { + let target_table = self.parse_object_name()?; + AlterTableOperation::SwapRenameTable { target_table } } else { - return self.expected("ADD or RENAME or OWNER TO or SET or DROP after ALTER TABLE"); + return self + .expected("ADD or RENAME or OWNER TO or SET or DROP or SWAP after ALTER TABLE"); }; Ok(Statement::AlterTable { name: table_name, @@ -3322,6 +3329,9 @@ impl Parser<'_> { AlterViewOperation::ChangeOwner { new_owner_name: owner_name, } + } else if self.parse_keywords(&[Keyword::SWAP, Keyword::WITH]) { + let target_view = self.parse_object_name()?; + AlterViewOperation::SwapRenameView { target_view } } else if self.parse_keyword(Keyword::SET) { if self.parse_keyword(Keyword::SCHEMA) { let schema_name = self.parse_object_name()?; @@ -3352,7 +3362,7 @@ impl Parser<'_> { } } else { return self.expected(&format!( - "RENAME or OWNER TO or SET after ALTER {}VIEW", + "RENAME or OWNER TO or SET or SWAP after ALTER {}VIEW", if materialized { "MATERIALIZED " } else { "" } )); }; @@ -3401,6 +3411,9 @@ impl Parser<'_> { } else { return self.expected("SCHEMA/PARALLELISM after SET"); } + } else if self.parse_keywords(&[Keyword::SWAP, Keyword::WITH]) { + let target_sink = self.parse_object_name()?; + AlterSinkOperation::SwapRenameSink { target_sink } } else { return self.expected("RENAME or OWNER TO or SET after ALTER SINK"); }; @@ -3434,8 +3447,13 @@ impl Parser<'_> { } else { return self.expected("SCHEMA after SET"); } + } else if self.parse_keywords(&[Keyword::SWAP, Keyword::WITH]) { + let target_subscription = self.parse_object_name()?; + AlterSubscriptionOperation::SwapRenameSubscription { + target_subscription, + } } else { - return self.expected("RENAME or OWNER TO or SET after ALTER SUBSCRIPTION"); + return self.expected("RENAME or OWNER TO or SET or SWAP after ALTER SUBSCRIPTION"); }; Ok(Statement::AlterSubscription { @@ -3482,6 +3500,9 @@ impl Parser<'_> { AlterSourceOperation::FormatEncode { format_encode } } else if self.parse_keywords(&[Keyword::REFRESH, Keyword::SCHEMA]) { AlterSourceOperation::RefreshSchema + } else if self.parse_keywords(&[Keyword::SWAP, Keyword::WITH]) { + let target_source = self.parse_object_name()?; + AlterSourceOperation::SwapRenameSource { target_source } } else { return self.expected( "RENAME, ADD COLUMN, OWNER TO, SET or SOURCE_RATE_LIMIT after ALTER SOURCE", From f3e9a3be1952c5f0e7fcb9dc595a99a25a6528be Mon Sep 17 00:00:00 2001 From: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:16:30 +0800 Subject: [PATCH 05/15] refactor(test): reorganize file connector CI tests (#19230) --- ci/workflows/main-cron.yml | 89 +-------- e2e_test/s3/{fs_sink.py => file_sink.py} | 126 +++++++++++- .../s3/{fs_source_batch.py => file_source.py} | 155 ++++++++++++--- e2e_test/s3/fs_sink_batch.py | 142 -------------- e2e_test/s3/fs_source_v2.py | 180 ------------------ e2e_test/s3/fs_source_v2_new_file.py | 90 --------- e2e_test/s3/posix_fs_source.py | 136 ------------- 7 files changed, 261 insertions(+), 657 deletions(-) rename e2e_test/s3/{fs_sink.py => file_sink.py} (71%) rename e2e_test/s3/{fs_source_batch.py => file_source.py} (55%) delete mode 100644 e2e_test/s3/fs_sink_batch.py delete mode 100644 e2e_test/s3/fs_source_v2.py delete mode 100644 e2e_test/s3/fs_source_v2_new_file.py delete mode 100644 e2e_test/s3/posix_fs_source.py diff --git a/ci/workflows/main-cron.yml b/ci/workflows/main-cron.yml index 52335fa59234..2bc81a73de5b 100644 --- a/ci/workflows/main-cron.yml +++ b/ci/workflows/main-cron.yml @@ -466,29 +466,7 @@ steps: - label: "S3 source check on AWS (json parser)" key: "s3-v2-source-check-aws-json-parser" - command: "ci/scripts/s3-source-test.sh -p ci-release -s fs_source_v2.py -t json" - if: | - !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null - || build.pull_request.labels includes "ci/run-s3-source-tests" - || build.env("CI_STEPS") =~ /(^|,)s3-source-tests?(,|$$)/ - depends_on: build - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - S3_SOURCE_TEST_CONF: ci_s3_source_test_aws - - docker-compose#v5.1.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - environment: - - S3_SOURCE_TEST_CONF - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 25 - retry: *auto-retry - - - label: "S3 source new file check on AWS (json)" - key: "s3-v2-source-new-file-check-aws" - command: "ci/scripts/s3-source-test.sh -p ci-release -s fs_source_v2_new_file.py" + command: "ci/scripts/s3-source-test.sh -p ci-release -s file_source.py -t json" if: | !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null || build.pull_request.labels includes "ci/run-s3-source-tests" @@ -510,51 +488,7 @@ steps: - label: "S3 sink on parquet and json file" key: "s3-sink-parquet-and-json-encode" - command: "ci/scripts/s3-source-test.sh -p ci-release -s fs_sink.py" - if: | - !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null - || build.pull_request.labels includes "ci/run-s3-source-tests" - || build.env("CI_STEPS") =~ /(^|,)s3-source-tests?(,|$$)/ - depends_on: build - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - S3_SOURCE_TEST_CONF: ci_s3_source_test_aws - - docker-compose#v5.1.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - environment: - - S3_SOURCE_TEST_CONF - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 25 - retry: *auto-retry - - - label: "S3 sink batching test" - key: "s3-sink-batching-test" - command: "ci/scripts/s3-source-test.sh -p ci-release -s fs_sink_batch.py" - if: | - !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null - || build.pull_request.labels includes "ci/run-s3-source-tests" - || build.env("CI_STEPS") =~ /(^|,)s3-source-tests?(,|$$)/ - depends_on: build - plugins: - - seek-oss/aws-sm#v2.3.1: - env: - S3_SOURCE_TEST_CONF: ci_s3_source_test_aws - - docker-compose#v5.1.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - environment: - - S3_SOURCE_TEST_CONF - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 25 - retry: *auto-retry - - - label: "S3 source batch read on AWS (json parser)" - key: "s3-v2-source-batch-read-check-aws-json-parser" - command: "ci/scripts/s3-source-test.sh -p ci-release -s fs_source_batch.py -t json" + command: "ci/scripts/s3-source-test.sh -p ci-release -s file_sink.py" if: | !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null || build.pull_request.labels includes "ci/run-s3-source-tests" @@ -576,7 +510,7 @@ steps: - label: "S3 source check on AWS (csv parser)" key: "s3-v2-source-check-aws-csv-parser" - command: "ci/scripts/s3-source-test.sh -p ci-release -s fs_source_v2.py -t csv_without_header" + command: "ci/scripts/s3-source-test.sh -p ci-release -s file_source.py -t csv_without_header" if: | !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null || build.pull_request.labels includes "ci/run-s3-source-tests" @@ -596,23 +530,6 @@ steps: timeout_in_minutes: 25 retry: *auto-retry - - label: "PosixFs source on OpenDAL fs engine (csv parser)" - key: "s3-source-test-for-opendal-fs-engine-csv-parser" - command: "ci/scripts/s3-source-test.sh -p ci-release -s posix_fs_source.py -t csv_without_header" - if: | - !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null - || build.pull_request.labels includes "ci/run-s3-source-tests" - || build.env("CI_STEPS") =~ /(^|,)s3-source-tests?(,|$$)/ - depends_on: build - plugins: - - docker-compose#v5.1.0: - run: rw-build-env - config: ci/docker-compose.yml - mount-buildkite-agent: true - - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 25 - retry: *auto-retry - - label: "pulsar source check" key: "pulsar-source-tests" command: "ci/scripts/pulsar-source-test.sh -p ci-release" diff --git a/e2e_test/s3/fs_sink.py b/e2e_test/s3/file_sink.py similarity index 71% rename from e2e_test/s3/fs_sink.py rename to e2e_test/s3/file_sink.py index 344b1b807d7e..04042eb7817a 100644 --- a/e2e_test/s3/fs_sink.py +++ b/e2e_test/s3/file_sink.py @@ -10,6 +10,8 @@ from time import sleep from minio import Minio from random import uniform +from time import sleep +import time def gen_data(file_num, item_num_per_file): assert item_num_per_file % 2 == 0, \ @@ -266,9 +268,129 @@ def _assert_eq(field, got, expect): cur.execute(f'drop table test_parquet_sink_table') cur.execute(f'drop sink test_file_sink_json') cur.execute(f'drop table test_json_sink_table') + cur.execute(f'drop table s3_test_parquet') cur.close() conn.close() +def test_file_sink_batching(): + conn = psycopg2.connect( + host="localhost", + port="4566", + user="root", + database="dev" + ) + + # Open a cursor to execute SQL statements + cur = conn.cursor() + + + # Execute a SELECT statement + cur.execute(f'''CREATE TABLE t (v1 int, v2 int);''') + + print('test file sink batching...\n') + cur.execute(f'''CREATE sink test_file_sink_batching as select + v1, v2 from t WITH ( + connector = 's3', + s3.region_name = 'custom', + s3.bucket_name = 'hummock001', + s3.credentials.access = 'hummockadmin', + s3.credentials.secret = 'hummockadmin', + s3.endpoint_url = 'http://hummock001.127.0.0.1:9301', + s3.path = 'test_file_sink_batching/', + s3.file_type = 'parquet', + type = 'append-only', + rollover_seconds = 5, + max_row_count = 5, + force_append_only='true' + ) FORMAT PLAIN ENCODE PARQUET(force_append_only='true');''') + + cur.execute(f'''CREATE TABLE test_file_sink_batching_table( + v1 int, + v2 int, + ) WITH ( + connector = 's3', + match_pattern = 'test_file_sink_batching/*.parquet', + refresh.interval.sec = 1, + s3.region_name = 'custom', + s3.bucket_name = 'hummock001', + s3.credentials.access = 'hummockadmin', + s3.credentials.secret = 'hummockadmin', + s3.endpoint_url = 'http://hummock001.127.0.0.1:9301', + ) FORMAT PLAIN ENCODE PARQUET;''') + + cur.execute(f'''ALTER SINK test_file_sink_batching SET PARALLELISM = 2;''') + + cur.execute(f'''INSERT INTO t VALUES (10, 10);''') + + + cur.execute(f'select count(*) from test_file_sink_batching_table') + # no item will be selectedpsq + result = cur.fetchone() + + def _assert_eq(field, got, expect): + assert got == expect, f'{field} assertion failed: got {got}, expect {expect}.' + def _assert_greater(field, got, expect): + assert got > expect, f'{field} assertion failed: got {got}, expect {expect}.' + + _assert_eq('count(*)', result[0], 0) + print('the rollover_seconds has not reached, count(*) = 0') + + + time.sleep(11) + + cur.execute(f'select count(*) from test_file_sink_batching_table') + result = cur.fetchone() + _assert_eq('count(*)', result[0], 1) + print('the rollover_seconds has reached, count(*) = ', result[0]) + + cur.execute(f''' + INSERT INTO t VALUES (20, 20); + INSERT INTO t VALUES (30, 30); + INSERT INTO t VALUES (40, 40); + INSERT INTO t VALUES (50, 10); + ''') + + cur.execute(f'select count(*) from test_file_sink_batching_table') + # count(*) = 1 + result = cur.fetchone() + _assert_eq('count(*)', result[0], 1) + print('the max row count has not reached, count(*) = ', result[0]) + + cur.execute(f''' + INSERT INTO t VALUES (60, 20); + INSERT INTO t VALUES (70, 30); + INSERT INTO t VALUES (80, 10); + INSERT INTO t VALUES (90, 20); + INSERT INTO t VALUES (100, 30); + INSERT INTO t VALUES (100, 10); + ''') + + time.sleep(10) + + cur.execute(f'select count(*) from test_file_sink_batching_table') + result = cur.fetchone() + _assert_greater('count(*)', result[0], 1) + print('the rollover_seconds has reached, count(*) = ', result[0]) + + cur.execute(f'drop sink test_file_sink_batching;') + cur.execute(f'drop table t;') + cur.execute(f'drop table test_file_sink_batching_table;') + cur.close() + conn.close() + # delete objects + + client = Minio( + "127.0.0.1:9301", + "hummockadmin", + "hummockadmin", + secure=False, + ) + objects = client.list_objects("hummock001", prefix="test_file_sink_batching/", recursive=True) + + for obj in objects: + client.remove_object("hummock001", obj.object_name) + print(f"Deleted: {obj.object_name}") + if __name__ == "__main__": @@ -307,7 +429,9 @@ def _assert_eq(field, got, expect): do_sink(config, FILE_NUM, ITEM_NUM_PER_FILE, run_id) - # clean up s3 files + # clean up s3 files for idx, _ in enumerate(data): client.remove_object("hummock001", _s3(idx)) + # test file sink batching + test_file_sink_batching() \ No newline at end of file diff --git a/e2e_test/s3/fs_source_batch.py b/e2e_test/s3/file_source.py similarity index 55% rename from e2e_test/s3/fs_source_batch.py rename to e2e_test/s3/file_source.py index fc09b0ef4b51..0f3aaaed64a7 100644 --- a/e2e_test/s3/fs_source_batch.py +++ b/e2e_test/s3/file_source.py @@ -5,7 +5,9 @@ import random import psycopg2 +# from handle_incremental_file import upload_to_s3_bucket, check_for_new_files from time import sleep +import time from io import StringIO from minio import Minio from functools import partial @@ -29,6 +31,7 @@ def format_json(data): for file in data ] + def format_csv(data, with_header): csv_files = [] @@ -42,7 +45,7 @@ def format_csv(data, with_header): csv_files.append(ostream.getvalue()) return csv_files -def do_test(config, file_num, item_num_per_file, prefix, fmt): +def do_test(config, file_num, item_num_per_file, prefix, fmt, need_drop_table=True): conn = psycopg2.connect( host="localhost", port="4566", @@ -53,7 +56,7 @@ def do_test(config, file_num, item_num_per_file, prefix, fmt): # Open a cursor to execute SQL statements cur = conn.cursor() - def _source(): + def _table(): return f's3_test_{fmt}' def _encode(): @@ -62,33 +65,42 @@ def _encode(): else: return f"CSV (delimiter = ',', without_header = {str('without' in fmt).lower()})" + def _include_clause(): + if fmt == 'json': + return 'INCLUDE payload as rw_payload' + else: + return '' + # Execute a SELECT statement - cur.execute(f'''CREATE SOURCE {_source()}( + cur.execute(f'''CREATE TABLE {_table()}( id int, name TEXT, sex int, mark int, - ) WITH ( + ) + {_include_clause()} + WITH ( connector = 's3', match_pattern = '{prefix}*.{fmt}', s3.region_name = '{config['S3_REGION']}', s3.bucket_name = '{config['S3_BUCKET']}', s3.credentials.access = '{config['S3_ACCESS_KEY']}', s3.credentials.secret = '{config['S3_SECRET_KEY']}', - s3.endpoint_url = 'https://{config['S3_ENDPOINT']}' + s3.endpoint_url = 'https://{config['S3_ENDPOINT']}', + refresh.interval.sec = 1 ) FORMAT PLAIN ENCODE {_encode()};''') total_rows = file_num * item_num_per_file MAX_RETRIES = 40 for retry_no in range(MAX_RETRIES): - cur.execute(f'select count(*) from {_source()}') + cur.execute(f'select count(*) from {_table()}') result = cur.fetchone() if result[0] == total_rows: break - print(f"[retry {retry_no}] Now got {result[0]} rows in source, {total_rows} expected, wait 30s") + print(f"[retry {retry_no}] Now got {result[0]} rows in table, {total_rows} expected, wait 30s") sleep(30) - stmt = f'select count(*), sum(id), sum(sex), sum(mark) from {_source()}' + stmt = f'select count(*), sum(id), sum(sex), sum(mark) from {_table()}' print(f'Execute {stmt}') cur.execute(stmt) result = cur.fetchone() @@ -103,13 +115,35 @@ def _assert_eq(field, got, expect): _assert_eq('sum(sex)', result[2], total_rows / 2) _assert_eq('sum(mark)', result[3], 0) - print('Test pass') + # only do payload check for json format, which enables INCLUDE CLAUSE + if fmt == 'json': + # check rw_payload + print('Check rw_payload') + stmt = f"select id, name, sex, mark, rw_payload from {_table()} limit 1;" + cur.execute(stmt) + result = cur.fetchone() + print("Got one line with rw_payload: ", result) + payload = result[4] + _assert_eq('id', payload['id'], result[0]) + _assert_eq('name', payload['name'], result[1]) + _assert_eq('sex', payload['sex'], result[2]) + _assert_eq('mark', payload['mark'], result[3]) - cur.execute(f'drop source {_source()}') + print('Test pass') + + if need_drop_table: + cur.execute(f'drop table {_table()}') cur.close() conn.close() -def test_empty_source(config, prefix, fmt): +FORMATTER = { + 'json': format_json, + 'csv_with_header': partial(format_csv, with_header=True), + 'csv_without_header': partial(format_csv, with_header=False), + } + + +def test_batch_read(config, file_num, item_num_per_file, prefix, fmt): conn = psycopg2.connect( host="localhost", port="4566", @@ -121,7 +155,7 @@ def test_empty_source(config, prefix, fmt): cur = conn.cursor() def _source(): - return f's3_test_empty_{fmt}' + return f's3_test_{fmt}' def _encode(): if fmt == 'json': @@ -145,6 +179,16 @@ def _encode(): s3.endpoint_url = 'https://{config['S3_ENDPOINT']}' ) FORMAT PLAIN ENCODE {_encode()};''') + total_rows = file_num * item_num_per_file + MAX_RETRIES = 40 + for retry_no in range(MAX_RETRIES): + cur.execute(f'select count(*) from {_source()}') + result = cur.fetchone() + if result[0] == total_rows: + break + print(f"[retry {retry_no}] Now got {result[0]} rows in source, {total_rows} expected, wait 30s") + sleep(30) + stmt = f'select count(*), sum(id), sum(sex), sum(mark) from {_source()}' print(f'Execute {stmt}') cur.execute(stmt) @@ -155,25 +199,61 @@ def _encode(): def _assert_eq(field, got, expect): assert got == expect, f'{field} assertion failed: got {got}, expect {expect}.' - _assert_eq('count(*)', result[0], 0) + _assert_eq('count(*)', result[0], total_rows) + _assert_eq('sum(id)', result[1], (total_rows - 1) * total_rows / 2) + _assert_eq('sum(sex)', result[2], total_rows / 2) + _assert_eq('sum(mark)', result[3], 0) - print('Empty source test pass') + print('Test batch read pass') cur.execute(f'drop source {_source()}') cur.close() conn.close() + +def upload_to_s3_bucket(config, minio_client, run_id, files, start_bias): + _local = lambda idx, start_bias: f"data_{idx + start_bias}.{fmt}" + _s3 = lambda idx, start_bias: f"{run_id}_data_{idx + start_bias}.{fmt}" + for idx, file_str in enumerate(files): + with open(_local(idx, start_bias), "w") as f: + f.write(file_str) + os.fsync(f.fileno()) + + minio_client.fput_object( + config["S3_BUCKET"], _s3(idx, start_bias), _local(idx, start_bias) + ) + + +def check_for_new_files(file_num, item_num_per_file, fmt): + conn = psycopg2.connect(host="localhost", port="4566", user="root", database="dev") + + # Open a cursor to execute SQL statements + cur = conn.cursor() + + def _table(): + return f"s3_test_{fmt}" + + total_rows = file_num * item_num_per_file + + MAX_RETRIES = 40 + for retry_no in range(MAX_RETRIES): + cur.execute(f"select count(*) from {_table()}") + result = cur.fetchone() + if result[0] == total_rows: + return True + print( + f"[retry {retry_no}] Now got {result[0]} rows in table, {total_rows} expected, wait 10s" + ) + time.sleep(10) + return False + + if __name__ == "__main__": FILE_NUM = 4001 ITEM_NUM_PER_FILE = 2 data = gen_data(FILE_NUM, ITEM_NUM_PER_FILE) fmt = sys.argv[1] - FORMATTER = { - 'json': format_json, - 'csv_with_header': partial(format_csv, with_header=True), - 'csv_without_header': partial(format_csv, with_header=False), - } assert fmt in FORMATTER, f"Unsupported format: {fmt}" formatted_files = FORMATTER[fmt](data) @@ -201,10 +281,41 @@ def _assert_eq(field, got, expect): ) # do test + print("Test streaming file source...\n") do_test(config, FILE_NUM, ITEM_NUM_PER_FILE, run_id, fmt) + print("Test batch read file source...\n") + test_batch_read(config, FILE_NUM, ITEM_NUM_PER_FILE, run_id, fmt) + + # test file source handle incremental files + data = gen_data(FILE_NUM, ITEM_NUM_PER_FILE) + fmt = "json" + + split_idx = 51 + data_batch1 = data[:split_idx] + data_batch2 = data[split_idx:] + run_id = str(random.randint(1000, 9999)) + print(f"S3 Source New File Test: run ID: {run_id} to buckek") + + formatted_batch1 = FORMATTER[fmt](data_batch1) + upload_to_s3_bucket(config, client, run_id, formatted_batch1, 0) + + # config in do_test that fs source's list interval is 1s + do_test( + config, len(data_batch1), ITEM_NUM_PER_FILE, run_id, fmt, need_drop_table=False + ) + + formatted_batch2 = FORMATTER[fmt](data_batch2) + upload_to_s3_bucket(config, client, run_id, formatted_batch2, split_idx) + + success_flag = check_for_new_files(FILE_NUM, ITEM_NUM_PER_FILE, fmt) + if success_flag: + print("Test(add new file) pass") + else: + print("Test(add new file) fail") + + _s3 = lambda idx, start_bias: f"{run_id}_data_{idx + start_bias}.{fmt}" + # clean up s3 files for idx, _ in enumerate(formatted_files): - client.remove_object(config["S3_BUCKET"], _s3(idx)) - - test_empty_source(config, run_id, fmt) \ No newline at end of file + client.remove_object(config["S3_BUCKET"], _s3(idx, 0)) diff --git a/e2e_test/s3/fs_sink_batch.py b/e2e_test/s3/fs_sink_batch.py deleted file mode 100644 index 125675f61bee..000000000000 --- a/e2e_test/s3/fs_sink_batch.py +++ /dev/null @@ -1,142 +0,0 @@ -import os -import sys -import random -import psycopg2 -import json -import time -import pyarrow as pa -import pyarrow.parquet as pq -import pandas as pd -from datetime import datetime, timezone -from time import sleep -from minio import Minio -from random import uniform - -def do_test(config, file_num, item_num_per_file, prefix): - conn = psycopg2.connect( - host="localhost", - port="4566", - user="root", - database="dev" - ) - - # Open a cursor to execute SQL statements - cur = conn.cursor() - - - # Execute a SELECT statement - cur.execute(f'''CREATE TABLE t (v1 int, v2 int);''') - - print('create sink') - cur.execute(f'''CREATE sink test_file_sink_batching as select - v1, v2 from t WITH ( - connector = 's3', - s3.region_name = 'custom', - s3.bucket_name = 'hummock001', - s3.credentials.access = 'hummockadmin', - s3.credentials.secret = 'hummockadmin', - s3.endpoint_url = 'http://hummock001.127.0.0.1:9301', - s3.path = 'test_sink/', - s3.file_type = 'parquet', - type = 'append-only', - rollover_seconds = 5, - max_row_count = 5, - force_append_only='true' - ) FORMAT PLAIN ENCODE PARQUET(force_append_only='true');''') - - cur.execute(f'''CREATE TABLE test_sink_table( - v1 int, - v2 int, - ) WITH ( - connector = 's3', - match_pattern = 'test_sink/*.parquet', - refresh.interval.sec = 1, - s3.region_name = 'custom', - s3.bucket_name = 'hummock001', - s3.credentials.access = 'hummockadmin', - s3.credentials.secret = 'hummockadmin', - s3.endpoint_url = 'http://hummock001.127.0.0.1:9301', - ) FORMAT PLAIN ENCODE PARQUET;''') - - cur.execute(f'''ALTER SINK test_file_sink_batching SET PARALLELISM = 2;''') - - cur.execute(f'''INSERT INTO t VALUES (10, 10);''') - - - cur.execute(f'select count(*) from test_sink_table') - # no item will be selectedpsq - result = cur.fetchone() - - def _assert_eq(field, got, expect): - assert got == expect, f'{field} assertion failed: got {got}, expect {expect}.' - def _assert_greater(field, got, expect): - assert got > expect, f'{field} assertion failed: got {got}, expect {expect}.' - - _assert_eq('count(*)', result[0], 0) - print('the rollover_seconds has not reached, count(*) = 0') - - - time.sleep(11) - - cur.execute(f'select count(*) from test_sink_table') - result = cur.fetchone() - _assert_eq('count(*)', result[0], 1) - print('the rollover_seconds has reached, count(*) = ', result[0]) - - cur.execute(f''' - INSERT INTO t VALUES (20, 20); - INSERT INTO t VALUES (30, 30); - INSERT INTO t VALUES (40, 40); - INSERT INTO t VALUES (50, 10); - ''') - - cur.execute(f'select count(*) from test_sink_table') - # count(*) = 1 - result = cur.fetchone() - _assert_eq('count(*)', result[0], 1) - print('the max row count has not reached, count(*) = ', result[0]) - - cur.execute(f''' - INSERT INTO t VALUES (60, 20); - INSERT INTO t VALUES (70, 30); - INSERT INTO t VALUES (80, 10); - INSERT INTO t VALUES (90, 20); - INSERT INTO t VALUES (100, 30); - INSERT INTO t VALUES (100, 10); - ''') - - time.sleep(10) - - cur.execute(f'select count(*) from test_sink_table') - result = cur.fetchone() - _assert_greater('count(*)', result[0], 1) - print('the rollover_seconds has reached, count(*) = ', result[0]) - - cur.execute(f'drop sink test_file_sink_batching;') - cur.execute(f'drop table t;') - cur.execute(f'drop table test_sink_table;') - cur.close() - conn.close() - -if __name__ == "__main__": - FILE_NUM = 10 - ITEM_NUM_PER_FILE = 2000 - - config = json.loads(os.environ["S3_SOURCE_TEST_CONF"]) - client = Minio( - "127.0.0.1:9301", - "hummockadmin", - "hummockadmin", - secure=False, - ) - run_id = str(random.randint(1000, 9999)) - _local = lambda idx: f'data_{idx}.parquet' - _s3 = lambda idx: f"{run_id}_data_{idx}.parquet" - - do_test(config, FILE_NUM, ITEM_NUM_PER_FILE, run_id) - - objects = client.list_objects("hummock001", prefix="test_sink/", recursive=True) - - for obj in objects: - client.remove_object("hummock001", obj.object_name) - print(f"Deleted: {obj.object_name}") diff --git a/e2e_test/s3/fs_source_v2.py b/e2e_test/s3/fs_source_v2.py deleted file mode 100644 index 343b92154085..000000000000 --- a/e2e_test/s3/fs_source_v2.py +++ /dev/null @@ -1,180 +0,0 @@ -import os -import sys -import csv -import json -import random -import psycopg2 - -from time import sleep -from io import StringIO -from minio import Minio -from functools import partial - -def gen_data(file_num, item_num_per_file): - assert item_num_per_file % 2 == 0, \ - f'item_num_per_file should be even to ensure sum(mark) == 0: {item_num_per_file}' - return [ - [{ - 'id': file_id * item_num_per_file + item_id, - 'name': f'{file_id}_{item_id}', - 'sex': item_id % 2, - 'mark': (-1) ** (item_id % 2), - } for item_id in range(item_num_per_file)] - for file_id in range(file_num) - ] - -def format_json(data): - return [ - '\n'.join([json.dumps(item) for item in file]) - for file in data - ] - - -def format_csv(data, with_header): - csv_files = [] - - for file_data in data: - ostream = StringIO() - writer = csv.DictWriter(ostream, fieldnames=file_data[0].keys()) - if with_header: - writer.writeheader() - for item_data in file_data: - writer.writerow(item_data) - csv_files.append(ostream.getvalue()) - return csv_files - -def do_test(config, file_num, item_num_per_file, prefix, fmt, need_drop_table=True): - conn = psycopg2.connect( - host="localhost", - port="4566", - user="root", - database="dev" - ) - - # Open a cursor to execute SQL statements - cur = conn.cursor() - - def _table(): - return f's3_test_{fmt}' - - def _encode(): - if fmt == 'json': - return 'JSON' - else: - return f"CSV (delimiter = ',', without_header = {str('without' in fmt).lower()})" - - def _include_clause(): - if fmt == 'json': - return 'INCLUDE payload as rw_payload' - else: - return '' - - # Execute a SELECT statement - cur.execute(f'''CREATE TABLE {_table()}( - id int, - name TEXT, - sex int, - mark int, - ) - {_include_clause()} - WITH ( - connector = 's3', - match_pattern = '{prefix}*.{fmt}', - s3.region_name = '{config['S3_REGION']}', - s3.bucket_name = '{config['S3_BUCKET']}', - s3.credentials.access = '{config['S3_ACCESS_KEY']}', - s3.credentials.secret = '{config['S3_SECRET_KEY']}', - s3.endpoint_url = 'https://{config['S3_ENDPOINT']}', - refresh.interval.sec = 1 - ) FORMAT PLAIN ENCODE {_encode()};''') - - total_rows = file_num * item_num_per_file - MAX_RETRIES = 40 - for retry_no in range(MAX_RETRIES): - cur.execute(f'select count(*) from {_table()}') - result = cur.fetchone() - if result[0] == total_rows: - break - print(f"[retry {retry_no}] Now got {result[0]} rows in table, {total_rows} expected, wait 30s") - sleep(30) - - stmt = f'select count(*), sum(id), sum(sex), sum(mark) from {_table()}' - print(f'Execute {stmt}') - cur.execute(stmt) - result = cur.fetchone() - - print('Got:', result) - - def _assert_eq(field, got, expect): - assert got == expect, f'{field} assertion failed: got {got}, expect {expect}.' - - _assert_eq('count(*)', result[0], total_rows) - _assert_eq('sum(id)', result[1], (total_rows - 1) * total_rows / 2) - _assert_eq('sum(sex)', result[2], total_rows / 2) - _assert_eq('sum(mark)', result[3], 0) - - # only do payload check for json format, which enables INCLUDE CLAUSE - if fmt == 'json': - # check rw_payload - print('Check rw_payload') - stmt = f"select id, name, sex, mark, rw_payload from {_table()} limit 1;" - cur.execute(stmt) - result = cur.fetchone() - print("Got one line with rw_payload: ", result) - payload = result[4] - _assert_eq('id', payload['id'], result[0]) - _assert_eq('name', payload['name'], result[1]) - _assert_eq('sex', payload['sex'], result[2]) - _assert_eq('mark', payload['mark'], result[3]) - - print('Test pass') - - if need_drop_table: - cur.execute(f'drop table {_table()}') - cur.close() - conn.close() - -FORMATTER = { - 'json': format_json, - 'csv_with_header': partial(format_csv, with_header=True), - 'csv_without_header': partial(format_csv, with_header=False), - } - -if __name__ == "__main__": - FILE_NUM = 4001 - ITEM_NUM_PER_FILE = 2 - data = gen_data(FILE_NUM, ITEM_NUM_PER_FILE) - - fmt = sys.argv[1] - assert fmt in FORMATTER, f"Unsupported format: {fmt}" - formatted_files = FORMATTER[fmt](data) - - config = json.loads(os.environ["S3_SOURCE_TEST_CONF"]) - client = Minio( - config["S3_ENDPOINT"], - access_key=config["S3_ACCESS_KEY"], - secret_key=config["S3_SECRET_KEY"], - secure=True, - ) - run_id = str(random.randint(1000, 9999)) - _local = lambda idx: f'data_{idx}.{fmt}' - _s3 = lambda idx: f"{run_id}_data_{idx}.{fmt}" - - # put s3 files - for idx, file_str in enumerate(formatted_files): - with open(_local(idx), "w") as f: - f.write(file_str) - os.fsync(f.fileno()) - - client.fput_object( - config["S3_BUCKET"], - _s3(idx), - _local(idx) - ) - - # do test - do_test(config, FILE_NUM, ITEM_NUM_PER_FILE, run_id, fmt) - - # clean up s3 files - for idx, _ in enumerate(formatted_files): - client.remove_object(config["S3_BUCKET"], _s3(idx)) diff --git a/e2e_test/s3/fs_source_v2_new_file.py b/e2e_test/s3/fs_source_v2_new_file.py deleted file mode 100644 index c90103e15c12..000000000000 --- a/e2e_test/s3/fs_source_v2_new_file.py +++ /dev/null @@ -1,90 +0,0 @@ -from fs_source_v2 import gen_data, FORMATTER, do_test -import json -import os -import random -import psycopg2 -import time -from minio import Minio - - -def upload_to_s3_bucket(config, minio_client, run_id, files, start_bias): - _local = lambda idx, start_bias: f"data_{idx + start_bias}.{fmt}" - _s3 = lambda idx, start_bias: f"{run_id}_data_{idx + start_bias}.{fmt}" - for idx, file_str in enumerate(files): - with open(_local(idx, start_bias), "w") as f: - f.write(file_str) - os.fsync(f.fileno()) - - minio_client.fput_object( - config["S3_BUCKET"], _s3(idx, start_bias), _local(idx, start_bias) - ) - - -def check_for_new_files(file_num, item_num_per_file, fmt): - conn = psycopg2.connect(host="localhost", port="4566", user="root", database="dev") - - # Open a cursor to execute SQL statements - cur = conn.cursor() - - def _table(): - return f"s3_test_{fmt}" - - total_rows = file_num * item_num_per_file - - MAX_RETRIES = 40 - for retry_no in range(MAX_RETRIES): - cur.execute(f"select count(*) from {_table()}") - result = cur.fetchone() - if result[0] == total_rows: - return True - print( - f"[retry {retry_no}] Now got {result[0]} rows in table, {total_rows} expected, wait 10s" - ) - time.sleep(10) - return False - - -if __name__ == "__main__": - FILE_NUM = 101 - ITEM_NUM_PER_FILE = 2 - data = gen_data(FILE_NUM, ITEM_NUM_PER_FILE) - fmt = "json" - - split_idx = 51 - data_batch1 = data[:split_idx] - data_batch2 = data[split_idx:] - - config = json.loads(os.environ["S3_SOURCE_TEST_CONF"]) - client = Minio( - config["S3_ENDPOINT"], - access_key=config["S3_ACCESS_KEY"], - secret_key=config["S3_SECRET_KEY"], - secure=True, - ) - run_id = str(random.randint(1000, 9999)) - print(f"S3 Source New File Test: run ID: {run_id} to bucket {config['S3_BUCKET']}") - - formatted_batch1 = FORMATTER[fmt](data_batch1) - upload_to_s3_bucket(config, client, run_id, formatted_batch1, 0) - - # config in do_test that fs source's list interval is 1s - do_test( - config, len(data_batch1), ITEM_NUM_PER_FILE, run_id, fmt, need_drop_table=False - ) - - formatted_batch2 = FORMATTER[fmt](data_batch2) - upload_to_s3_bucket(config, client, run_id, formatted_batch2, split_idx) - - success_flag = check_for_new_files(FILE_NUM, ITEM_NUM_PER_FILE, fmt) - if success_flag: - print("Test(add new file) pass") - else: - print("Test(add new file) fail") - - _s3 = lambda idx, start_bias: f"{run_id}_data_{idx + start_bias}.{fmt}" - # clean up s3 files - for idx, _ in enumerate(data): - client.remove_object(config["S3_BUCKET"], _s3(idx, 0)) - - if success_flag == False: - exit(1) diff --git a/e2e_test/s3/posix_fs_source.py b/e2e_test/s3/posix_fs_source.py deleted file mode 100644 index a7cea46fa496..000000000000 --- a/e2e_test/s3/posix_fs_source.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import sys -import csv -import random -import psycopg2 -import opendal - -from time import sleep -from io import StringIO -from functools import partial - -def gen_data(file_num, item_num_per_file): - assert item_num_per_file % 2 == 0, \ - f'item_num_per_file should be even to ensure sum(mark) == 0: {item_num_per_file}' - return [ - [{ - 'id': file_id * item_num_per_file + item_id, - 'name': f'{file_id}_{item_id}', - 'sex': item_id % 2, - 'mark': (-1) ** (item_id % 2), - } for item_id in range(item_num_per_file)] - for file_id in range(file_num) - ] - -def format_csv(data, with_header): - csv_files = [] - - for file_data in data: - ostream = StringIO() - writer = csv.DictWriter(ostream, fieldnames=file_data[0].keys()) - if with_header: - writer.writeheader() - for item_data in file_data: - writer.writerow(item_data) - csv_files.append(ostream.getvalue()) - return csv_files - - -def do_test(file_num, item_num_per_file, prefix, fmt): - conn = psycopg2.connect( - host="localhost", - port="4566", - user="root", - database="dev" - ) - - # Open a cursor to execute SQL statements - cur = conn.cursor() - - def _table(): - return f'posix_fs_test_{fmt}' - - def _encode(): - return f"CSV (delimiter = ',', without_header = {str('without' in fmt).lower()})" - - # Execute a SELECT statement - cur.execute(f'''CREATE TABLE {_table()}( - id int, - name TEXT, - sex int, - mark int, - ) WITH ( - connector = 'posix_fs', - match_pattern = '{prefix}*.{fmt}', - posix_fs.root = '/tmp', - ) FORMAT PLAIN ENCODE {_encode()};''') - - total_rows = file_num * item_num_per_file - MAX_RETRIES = 40 - for retry_no in range(MAX_RETRIES): - cur.execute(f'select count(*) from {_table()}') - result = cur.fetchone() - if result[0] == total_rows: - break - print(f"[retry {retry_no}] Now got {result[0]} rows in table, {total_rows} expected, wait 30s") - sleep(30) - - stmt = f'select count(*), sum(id), sum(sex), sum(mark) from {_table()}' - print(f'Execute {stmt}') - cur.execute(stmt) - result = cur.fetchone() - - print('Got:', result) - - def _assert_eq(field, got, expect): - assert got == expect, f'{field} assertion failed: got {got}, expect {expect}.' - - _assert_eq('count(*)', result[0], total_rows) - _assert_eq('sum(id)', result[1], (total_rows - 1) * total_rows / 2) - _assert_eq('sum(sex)', result[2], total_rows / 2) - _assert_eq('sum(mark)', result[3], 0) - - print('Test pass') - - cur.execute(f'drop table {_table()}') - cur.close() - conn.close() - - -if __name__ == "__main__": - FILE_NUM = 4001 - ITEM_NUM_PER_FILE = 2 - data = gen_data(FILE_NUM, ITEM_NUM_PER_FILE) - - fmt = sys.argv[1] - FORMATTER = { - 'csv_with_header': partial(format_csv, with_header=True), - 'csv_without_header': partial(format_csv, with_header=False), - } - assert fmt in FORMATTER, f"Unsupported format: {fmt}" - formatted_files = FORMATTER[fmt](data) - - run_id = str(random.randint(1000, 9999)) - _local = lambda idx: f'data_{idx}.{fmt}' - _posix = lambda idx: f"{run_id}_data_{idx}.{fmt}" - # put local files - op = opendal.Operator("fs", root="/tmp") - - print("write file to /tmp") - for idx, file_str in enumerate(formatted_files): - with open(_local(idx), "w") as f: - f.write(file_str) - os.fsync(f.fileno()) - file_name = _posix(idx) - file_bytes = file_str.encode('utf-8') - op.write(file_name, file_bytes) - - # do test - print("do test") - do_test(FILE_NUM, ITEM_NUM_PER_FILE, run_id, fmt) - - # clean up local files - print("clean up local files in /tmp") - for idx, _ in enumerate(formatted_files): - file_name = _posix(idx) - op.delete(file_name) From 6b10d92ecbacec65f361823799ef6470630ee316 Mon Sep 17 00:00:00 2001 From: Bugen Zhao Date: Mon, 4 Nov 2024 15:51:01 +0800 Subject: [PATCH 06/15] fix(meta): address bugs for mysql backend & enable e2e tests (#19156) Signed-off-by: Bugen Zhao --- ci/workflows/main-cron.yml | 42 ++++++++++++++++++- ci/workflows/pull-request.yml | 4 +- src/meta/model/migration/README.md | 6 +++ .../migration/src/m20230908_072257_init.rs | 29 ++++++++++--- .../src/m20240304_074901_subscription.rs | 6 ++- ...240506_112555_subscription_partial_ckpt.rs | 12 +++++- .../m20241025_062548_singleton_vnode_count.rs | 19 ++++----- src/meta/model/migration/src/utils.rs | 12 ++++++ src/meta/src/barrier/mod.rs | 5 ++- src/meta/src/hummock/manager/gc.rs | 7 +--- src/meta/src/hummock/manager/time_travel.rs | 22 ++-------- 11 files changed, 115 insertions(+), 49 deletions(-) diff --git a/ci/workflows/main-cron.yml b/ci/workflows/main-cron.yml index 2bc81a73de5b..3f3cd705f09f 100644 --- a/ci/workflows/main-cron.yml +++ b/ci/workflows/main-cron.yml @@ -704,6 +704,46 @@ steps: timeout_in_minutes: 25 retry: *auto-retry + - label: "end-to-end test (postgres backend)" + key: e2e-test-postgres-backend + command: "ci/scripts/e2e-test.sh -p ci-release -m ci-3streaming-2serving-3fe-pg-backend" + if: | + !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null + || build.pull_request.labels includes "ci/run-e2e-test-other-backends" + || build.env("CI_STEPS") =~ /(^|,)e2e-test-other-backends?(,|$$)/ + depends_on: + - "build" + - "build-other" + - "docslt" + plugins: + - docker-compose#v5.1.0: + run: pg-mysql-backend-test-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 25 + retry: *auto-retry + + - label: "end-to-end test (mysql backend)" + key: e2e-test-mysql-backend + command: "ci/scripts/e2e-test.sh -p ci-release -m ci-3streaming-2serving-3fe-mysql-backend" + if: | + !(build.pull_request.labels includes "ci/main-cron/run-selected") && build.env("CI_STEPS") == null + || build.pull_request.labels includes "ci/run-e2e-test-other-backends" + || build.env("CI_STEPS") =~ /(^|,)e2e-test-other-backends?(,|$$)/ + depends_on: + - "build" + - "build-other" + - "docslt" + plugins: + - docker-compose#v5.1.0: + run: pg-mysql-backend-test-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 25 + retry: *auto-retry + - label: "end-to-end test for opendal (parallel)" key: "e2e-test-opendal-parallel" command: "ci/scripts/e2e-test-parallel-for-opendal.sh -p ci-release" @@ -1112,4 +1152,4 @@ steps: # This should be the LAST part of the main-cron file. - label: "trigger failed test notification" if: build.pull_request.labels includes "ci/main-cron/test-notify" || build.branch == "main" - command: "ci/scripts/notify.py" \ No newline at end of file + command: "ci/scripts/notify.py" diff --git a/ci/workflows/pull-request.yml b/ci/workflows/pull-request.yml index 4b6079b72414..5924f0fe49bf 100644 --- a/ci/workflows/pull-request.yml +++ b/ci/workflows/pull-request.yml @@ -808,9 +808,7 @@ steps: - label: "end-to-end test (mysql backend)" command: "ci/scripts/e2e-test.sh -p ci-dev -m ci-3streaming-2serving-3fe-mysql-backend" - if: "false" - # TODO: enable this when all bugs of mysql backend are addressed. - # if: build.pull_request.labels includes "ci/run-e2e-test-other-backends" || build.env("CI_STEPS") =~ /(^|,)e2e-test-other-backends?(,|$$)/ + if: build.pull_request.labels includes "ci/run-e2e-test-other-backends" || build.env("CI_STEPS") =~ /(^|,)e2e-test-other-backends?(,|$$)/ depends_on: - "build" - "build-other" diff --git a/src/meta/model/migration/README.md b/src/meta/model/migration/README.md index d0253be5c05d..e354d556ed6d 100644 --- a/src/meta/model/migration/README.md +++ b/src/meta/model/migration/README.md @@ -54,3 +54,9 @@ ## Adding a migration - Add a new column to some catalogs. You can checkout the migration [m20240617_070131_index_column_properties.rs](src/m20240617_070131_index_column_properties.rs) as a reference. + +### Special notes on MySQL backend + +MySQL data types typically have stricter length limits compared to those in PostgreSQL or SQLite. For example, the `VARCHAR`, `TEXT`, `BLOB`, and `BINARY` data types have a maximum length of 65,535 bytes. + +If you need to store more data, e.g., SQL definition, UDF body, protobuf-encoded internal data, etc., please **avoid** using the built-in constructors like `ColumnDef::text` or `ColumnDef::blob`, but use the extensions defined in [`./src/utils.rs`](./src/utils.rs) instead. diff --git a/src/meta/model/migration/src/m20230908_072257_init.rs b/src/meta/model/migration/src/m20230908_072257_init.rs index d30e9fe1d949..38fba9101156 100644 --- a/src/meta/model/migration/src/m20230908_072257_init.rs +++ b/src/meta/model/migration/src/m20230908_072257_init.rs @@ -538,7 +538,11 @@ impl MigrationTrait for Migration { .json_binary() .not_null(), ) - .col(ColumnDef::new(Source::Definition).text().not_null()) + .col( + ColumnDef::new(Source::Definition) + .rw_long_text(manager) + .not_null(), + ) .col(ColumnDef::new(Source::SourceInfo).rw_binary(manager)) .col( ColumnDef::new(Source::WatermarkDescs) @@ -596,7 +600,11 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Table::VnodeColIndex).integer()) .col(ColumnDef::new(Table::RowIdIndex).integer()) .col(ColumnDef::new(Table::ValueIndices).json_binary().not_null()) - .col(ColumnDef::new(Table::Definition).text().not_null()) + .col( + ColumnDef::new(Table::Definition) + .rw_long_text(manager) + .not_null(), + ) .col( ColumnDef::new(Table::HandlePkConflictBehavior) .string() @@ -686,7 +694,11 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Sink::DownstreamPk).json_binary().not_null()) .col(ColumnDef::new(Sink::SinkType).string().not_null()) .col(ColumnDef::new(Sink::Properties).json_binary().not_null()) - .col(ColumnDef::new(Sink::Definition).text().not_null()) + .col( + ColumnDef::new(Sink::Definition) + .rw_long_text(manager) + .not_null(), + ) .col(ColumnDef::new(Sink::ConnectionId).integer()) .col(ColumnDef::new(Sink::DbName).string().not_null()) .col(ColumnDef::new(Sink::SinkFromName).string().not_null()) @@ -724,7 +736,11 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(View::ViewId).integer().primary_key()) .col(ColumnDef::new(View::Name).string().not_null()) .col(ColumnDef::new(View::Properties).json_binary().not_null()) - .col(ColumnDef::new(View::Definition).text().not_null()) + .col( + ColumnDef::new(View::Definition) + .rw_long_text(manager) + .not_null(), + ) .col(ColumnDef::new(View::Columns).rw_binary(manager).not_null()) .foreign_key( &mut ForeignKey::create() @@ -798,8 +814,9 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Function::Language).string().not_null()) .col(ColumnDef::new(Function::Link).string()) .col(ColumnDef::new(Function::Identifier).string()) - .col(ColumnDef::new(Function::Body).string()) - .col(ColumnDef::new(Function::CompressedBinary).string()) + .col(ColumnDef::new(Function::Body).rw_long_text(manager)) + // XXX: should this be binary type instead? + .col(ColumnDef::new(Function::CompressedBinary).rw_long_text(manager)) .col(ColumnDef::new(Function::Kind).string().not_null()) .col( ColumnDef::new(Function::AlwaysRetryOnNetworkError) diff --git a/src/meta/model/migration/src/m20240304_074901_subscription.rs b/src/meta/model/migration/src/m20240304_074901_subscription.rs index a7c791e828dc..f759d303b25a 100644 --- a/src/meta/model/migration/src/m20240304_074901_subscription.rs +++ b/src/meta/model/migration/src/m20240304_074901_subscription.rs @@ -40,7 +40,11 @@ impl MigrationTrait for Migration { .json_binary() .not_null(), ) - .col(ColumnDef::new(Subscription::Definition).string().not_null()) + .col( + ColumnDef::new(Subscription::Definition) + .rw_long_text(manager) + .not_null(), + ) .col( ColumnDef::new(Subscription::SubscriptionFromName) .string() diff --git a/src/meta/model/migration/src/m20240506_112555_subscription_partial_ckpt.rs b/src/meta/model/migration/src/m20240506_112555_subscription_partial_ckpt.rs index 1e0eec2c0bd2..e82f7aa59d1a 100644 --- a/src/meta/model/migration/src/m20240506_112555_subscription_partial_ckpt.rs +++ b/src/meta/model/migration/src/m20240506_112555_subscription_partial_ckpt.rs @@ -21,7 +21,11 @@ impl MigrationTrait for Migration { .primary_key(), ) .col(ColumnDef::new(Subscription::Name).string().not_null()) - .col(ColumnDef::new(Subscription::Definition).string().not_null()) + .col( + ColumnDef::new(Subscription::Definition) + .rw_long_text(manager) + .not_null(), + ) .col( ColumnDef::new(Subscription::RetentionSeconds) .string() @@ -67,7 +71,11 @@ impl MigrationTrait for Migration { .primary_key(), ) .col(ColumnDef::new(Subscription::Name).string().not_null()) - .col(ColumnDef::new(Subscription::Definition).string().not_null()) + .col( + ColumnDef::new(Subscription::Definition) + .rw_long_text(manager) + .not_null(), + ) .col( ColumnDef::new(Subscription::Columns) .rw_binary(manager) diff --git a/src/meta/model/migration/src/m20241025_062548_singleton_vnode_count.rs b/src/meta/model/migration/src/m20241025_062548_singleton_vnode_count.rs index bd2b12bd0724..be5b1fef90fd 100644 --- a/src/meta/model/migration/src/m20241025_062548_singleton_vnode_count.rs +++ b/src/meta/model/migration/src/m20241025_062548_singleton_vnode_count.rs @@ -6,22 +6,21 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + // Hack for unifying the test for different backends. + let json_is_empty_array = |col| { + Expr::col(col) + .cast_as(Alias::new("char(2)")) + .eq(Expr::value("[]")) + }; + // Fill vnode count with 1 for singleton tables. manager .exec_stmt( UpdateStatement::new() .table(Table::Table) .values([(Table::VnodeCount, Expr::value(1))]) - .and_where( - Expr::col(Table::DistributionKey) - .cast_as(Alias::new("varchar")) - .eq(Expr::value("[]")), - ) - .and_where( - Expr::col(Table::DistKeyInPk) - .cast_as(Alias::new("varchar")) - .eq(Expr::value("[]")), - ) + .and_where(json_is_empty_array(Table::DistributionKey)) + .and_where(json_is_empty_array(Table::DistKeyInPk)) .and_where(Expr::col(Table::VnodeColIndex).is_null()) .to_owned(), ) diff --git a/src/meta/model/migration/src/utils.rs b/src/meta/model/migration/src/utils.rs index 4f704374befd..7d3eb1fbb3ae 100644 --- a/src/meta/model/migration/src/utils.rs +++ b/src/meta/model/migration/src/utils.rs @@ -14,4 +14,16 @@ impl ColumnDef { DatabaseBackend::Postgres | DatabaseBackend::Sqlite => self.blob(), } } + + /// Set column type as `longtext` for MySQL, and `text` for Postgres and Sqlite. + /// + /// Should be preferred over [`text`](ColumnDef::text) or [`string`](ColumnDef::string) for large text fields, + /// typically user-specified contents like UDF body or SQL definition. Otherwise, MySQL will return an error + /// when the length exceeds 65535 bytes. + pub fn rw_long_text(&mut self, manager: &SchemaManager) -> &mut Self { + match manager.get_database_backend() { + DatabaseBackend::MySql => self.custom(Alias::new("longtext")), + DatabaseBackend::Postgres | DatabaseBackend::Sqlite => self.text(), + } + } } diff --git a/src/meta/src/barrier/mod.rs b/src/meta/src/barrier/mod.rs index 10e6338bc213..6a1cddc0ef74 100644 --- a/src/meta/src/barrier/mod.rs +++ b/src/meta/src/barrier/mod.rs @@ -1140,7 +1140,10 @@ impl GlobalBarrierWorker { .instrument(span) .await; } else { - panic!("failed to execute barrier: {}", err.as_report()); + panic!( + "a streaming error occurred while recovery is disabled, aborting: {:?}", + err.as_report() + ); } } diff --git a/src/meta/src/hummock/manager/gc.rs b/src/meta/src/hummock/manager/gc.rs index 6c8eb32e6a6c..4187ce818b34 100644 --- a/src/meta/src/hummock/manager/gc.rs +++ b/src/meta/src/hummock/manager/gc.rs @@ -385,12 +385,7 @@ impl HummockManager { .exec(db) .await?; hummock_gc_history::Entity::insert_many(models) - .on_conflict( - OnConflict::column(hummock_gc_history::Column::ObjectId) - .do_nothing() - .to_owned(), - ) - .do_nothing() + .on_conflict_do_nothing() .exec(db) .await?; Ok(()) diff --git a/src/meta/src/hummock/manager/time_travel.rs b/src/meta/src/hummock/manager/time_travel.rs index 4e28fa512abb..fab79fe01f9a 100644 --- a/src/meta/src/hummock/manager/time_travel.rs +++ b/src/meta/src/hummock/manager/time_travel.rs @@ -34,7 +34,6 @@ use risingwave_meta_model::{ hummock_time_travel_version, }; use risingwave_pb::hummock::{PbHummockVersion, PbHummockVersionDelta}; -use sea_orm::sea_query::OnConflict; use sea_orm::ActiveValue::Set; use sea_orm::{ ColumnTrait, Condition, DatabaseTransaction, EntityTrait, QueryFilter, QueryOrder, QuerySelect, @@ -383,12 +382,7 @@ impl HummockManager { sstable_info: Set(SstableInfoV2Backend::from(&sst_info.to_protobuf())), }; hummock_sstable_info::Entity::insert(m) - .on_conflict( - OnConflict::column(hummock_sstable_info::Column::SstId) - .do_nothing() - .to_owned(), - ) - .do_nothing() + .on_conflict_do_nothing() .exec(txn) .await?; count += 1; @@ -437,12 +431,7 @@ impl HummockManager { .into()), }; hummock_time_travel_version::Entity::insert(m) - .on_conflict( - OnConflict::column(hummock_time_travel_version::Column::VersionId) - .do_nothing() - .to_owned(), - ) - .do_nothing() + .on_conflict_do_nothing() .exec(txn) .await?; } @@ -468,12 +457,7 @@ impl HummockManager { .into()), }; hummock_time_travel_delta::Entity::insert(m) - .on_conflict( - OnConflict::column(hummock_time_travel_delta::Column::VersionId) - .do_nothing() - .to_owned(), - ) - .do_nothing() + .on_conflict_do_nothing() .exec(txn) .await?; } From 7690f674c48a7cb824dcd44fe3940a03d42e54ed Mon Sep 17 00:00:00 2001 From: Noel Kwan <47273164+kwannoel@users.noreply.github.com> Date: Mon, 4 Nov 2024 17:41:54 +0800 Subject: [PATCH 07/15] chore(deps): update sqlx to 0.8.2, sea-orm to 1.1, sea-orm-migration to 1.1 (#19145) --- Cargo.lock | 160 ++++++++++++++++++++------------------- Cargo.toml | 14 ++-- src/connector/Cargo.toml | 2 +- 3 files changed, 89 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea8e7d927645..fb9830efdcc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3929,7 +3929,7 @@ dependencies = [ name = "delta_btree_map" version = "2.2.0-alpha" dependencies = [ - "educe 0.6.0", + "educe", "enum-as-inner 0.6.0", ] @@ -4454,18 +4454,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "educe" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4bd92664bf78c4d3dba9b7cdafce6fa15b13ed3ed16175218196942e99168a8" -dependencies = [ - "enum-ordinalize", - "proc-macro2", - "quote", - "syn 2.0.79", -] - [[package]] name = "educe" version = "0.6.0" @@ -5884,9 +5872,9 @@ checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ "hashbrown 0.14.5", ] @@ -5919,9 +5907,6 @@ name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -dependencies = [ - "unicode-segmentation", -] [[package]] name = "heck" @@ -7062,9 +7047,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.27.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -8602,9 +8587,9 @@ dependencies = [ [[package]] name = "ouroboros" -version = "0.17.2" +version = "0.18.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2ba07320d39dfea882faa70554b4bd342a5f273ed59ba7c1c6b4c840492c954" +checksum = "944fa20996a25aded6b4795c6d63f10014a7a83f8be9828a11860b08c5fc4a67" dependencies = [ "aliasable", "ouroboros_macro", @@ -8613,13 +8598,14 @@ dependencies = [ [[package]] name = "ouroboros_macro" -version = "0.17.2" +version = "0.18.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec4c6225c69b4ca778c0aea097321a64c421cf4577b331c61b229267edabb6f8" +checksum = "39b0deead1528fd0e5947a8546a9642a9777c25f6e1e26f34c97b204bbb465bd" dependencies = [ "heck 0.4.1", - "proc-macro-error 1.0.4", + "itertools 0.12.1", "proc-macro2", + "proc-macro2-diagnostics", "quote", "syn 2.0.79", ] @@ -9356,7 +9342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" dependencies = [ "diff", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -9472,6 +9458,19 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", + "version_check", + "yansi 1.0.1", +] + [[package]] name = "procfs" version = "0.14.2" @@ -10660,7 +10659,7 @@ dependencies = [ "criterion", "darwin-libproc", "easy-ext", - "educe 0.6.0", + "educe", "either", "enum-as-inner 0.6.0", "enumflags2", @@ -10752,7 +10751,7 @@ name = "risingwave_common_estimate_size" version = "2.2.0-alpha" dependencies = [ "bytes", - "educe 0.6.0", + "educe", "ethnum", "fixedbitset 0.5.0", "jsonbb", @@ -11242,7 +11241,7 @@ dependencies = [ "const-currying", "downcast-rs", "easy-ext", - "educe 0.6.0", + "educe", "either", "enum-as-inner 0.6.0", "expect-test", @@ -11285,7 +11284,7 @@ dependencies = [ "chrono", "chrono-tz 0.10.0", "criterion", - "educe 0.6.0", + "educe", "expect-test", "fancy-regex", "futures-async-stream", @@ -11352,7 +11351,7 @@ dependencies = [ "downcast-rs", "dyn-clone", "easy-ext", - "educe 0.6.0", + "educe", "either", "enum-as-inner 0.6.0", "expect-test", @@ -11605,7 +11604,7 @@ dependencies = [ "comfy-table", "crepe", "easy-ext", - "educe 0.6.0", + "educe", "either", "enum-as-inner 0.6.0", "expect-test", @@ -11721,7 +11720,7 @@ version = "2.2.0-alpha" dependencies = [ "anyhow", "clap", - "educe 0.6.0", + "educe", "either", "futures", "hex", @@ -12146,7 +12145,7 @@ dependencies = [ "cfg-if", "criterion", "delta_btree_map", - "educe 0.6.0", + "educe", "either", "enum-as-inner 0.6.0", "expect-test", @@ -12794,13 +12793,13 @@ dependencies = [ [[package]] name = "sea-orm" -version = "1.0.1" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea1fee0cf8528dbe6eda29d5798afc522a63b75e44c5b15721e6e64af9c7cc4b" +checksum = "d5680a8b686985116607ef5f5af2b1f9e1cc2c228330e93101816a0baa279afa" dependencies = [ "async-stream", "async-trait", - "bigdecimal 0.3.1", + "bigdecimal 0.4.5", "chrono", "futures", "log", @@ -12822,9 +12821,9 @@ dependencies = [ [[package]] name = "sea-orm-cli" -version = "1.0.1" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0b8869c75cf3fbb1bd860abb025033cd2e514c5f4fa43e792697cb1fe6c882" +checksum = "70a157f42d291ccbd6e913b9d9b12dbe2ccbcf0472efc60c8715dd1254083aec" dependencies = [ "chrono", "clap", @@ -12839,9 +12838,9 @@ dependencies = [ [[package]] name = "sea-orm-macros" -version = "1.0.1" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8737b566799ed0444f278d13c300c4c6f1a91782f60ff5825a591852d5502030" +checksum = "3a239e3bb1b566ad4ec2654d0d193d6ceddfd733487edc9c21a64d214c773910" dependencies = [ "heck 0.4.1", "proc-macro2", @@ -12853,9 +12852,9 @@ dependencies = [ [[package]] name = "sea-orm-migration" -version = "1.0.1" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216643749e26ce27ab6c51d3475f2692981d4a902d34455bcd322f412900df5c" +checksum = "63ba07e9f2479cc671758fcb1edee42ff2e32c34b3e67ab41d0af1e41f73c74e" dependencies = [ "async-trait", "clap", @@ -12870,13 +12869,12 @@ dependencies = [ [[package]] name = "sea-query" -version = "0.31.1" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4fd043b8117af233e221f73e3ea8dfbc8e8c3c928017c474296db45c649105c" +checksum = "ff504d13b5e4b52fffcf2fb203d0352a5722fa5151696db768933e41e1e591bb" dependencies = [ - "bigdecimal 0.3.1", + "bigdecimal 0.4.5", "chrono", - "educe 0.5.11", "inherent", "ordered-float 3.9.1", "rust_decimal", @@ -12888,11 +12886,11 @@ dependencies = [ [[package]] name = "sea-query-binder" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "754965d4aee6145bec25d0898e5c931e6c22859789ce62fd85a42a15ed5a8ce3" +checksum = "b0019f47430f7995af63deda77e238c17323359af241233ec768aba1faea7608" dependencies = [ - "bigdecimal 0.3.1", + "bigdecimal 0.4.5", "chrono", "rust_decimal", "sea-query", @@ -12918,9 +12916,9 @@ dependencies = [ [[package]] name = "sea-schema" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad52149fc81836ea7424c3425d8f6ed8ad448dd16d2e4f6a3907ba46f3f2fd78" +checksum = "aab1592d17860a9a8584d9b549aebcd06f7bdc3ff615f71752486ba0b05b1e6e" dependencies = [ "futures", "sea-query", @@ -13681,7 +13679,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b6b8f606d3c4cdcaf2031c4320b79d7584e454b79562ba3d675f49701c160e" dependencies = [ "async-trait", - "educe 0.6.0", + "educe", "fs-err", "futures", "glob", @@ -13730,8 +13728,8 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.7.4" -source = "git+https://github.com/risingwavelabs/sqlx.git?rev=ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a#ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a" +version = "0.8.2" +source = "git+https://github.com/madsim-rs/sqlx.git?rev=3efe6d0065963db2a2b7f30dee69f647e28dec81#3efe6d0065963db2a2b7f30dee69f647e28dec81" dependencies = [ "sqlx-core", "sqlx-macros", @@ -13742,24 +13740,24 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.7.4" -source = "git+https://github.com/risingwavelabs/sqlx.git?rev=ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a#ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a" +version = "0.8.2" +source = "git+https://github.com/madsim-rs/sqlx.git?rev=3efe6d0065963db2a2b7f30dee69f647e28dec81#3efe6d0065963db2a2b7f30dee69f647e28dec81" dependencies = [ - "ahash 0.8.11", "atoi", - "bigdecimal 0.3.1", + "bigdecimal 0.4.5", "byteorder", "bytes", "chrono", "crc", "crossbeam-queue", "either", - "event-listener 2.5.3", + "event-listener 5.3.1", "futures-channel", "futures-core", "futures-intrusive", "futures-io", "futures-util", + "hashbrown 0.14.5", "hashlink", "hex", "indexmap 2.6.0", @@ -13786,24 +13784,24 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.7.4" -source = "git+https://github.com/risingwavelabs/sqlx.git?rev=ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a#ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a" +version = "0.8.2" +source = "git+https://github.com/madsim-rs/sqlx.git?rev=3efe6d0065963db2a2b7f30dee69f647e28dec81#3efe6d0065963db2a2b7f30dee69f647e28dec81" dependencies = [ "proc-macro2", "quote", "sqlx-core", "sqlx-macros-core", - "syn 1.0.109", + "syn 2.0.79", ] [[package]] name = "sqlx-macros-core" -version = "0.7.4" -source = "git+https://github.com/risingwavelabs/sqlx.git?rev=ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a#ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a" +version = "0.8.2" +source = "git+https://github.com/madsim-rs/sqlx.git?rev=3efe6d0065963db2a2b7f30dee69f647e28dec81#3efe6d0065963db2a2b7f30dee69f647e28dec81" dependencies = [ "dotenvy", "either", - "heck 0.4.1", + "heck 0.5.0", "hex", "madsim-tokio", "once_cell", @@ -13816,19 +13814,19 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 1.0.109", + "syn 2.0.79", "tempfile", "url", ] [[package]] name = "sqlx-mysql" -version = "0.7.4" -source = "git+https://github.com/risingwavelabs/sqlx.git?rev=ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a#ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a" +version = "0.8.2" +source = "git+https://github.com/madsim-rs/sqlx.git?rev=3efe6d0065963db2a2b7f30dee69f647e28dec81#3efe6d0065963db2a2b7f30dee69f647e28dec81" dependencies = [ "atoi", - "base64 0.21.7", - "bigdecimal 0.3.1", + "base64 0.22.0", + "bigdecimal 0.4.5", "bitflags 2.6.0", "byteorder", "bytes", @@ -13869,12 +13867,12 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.7.4" -source = "git+https://github.com/risingwavelabs/sqlx.git?rev=ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a#ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a" +version = "0.8.2" +source = "git+https://github.com/madsim-rs/sqlx.git?rev=3efe6d0065963db2a2b7f30dee69f647e28dec81#3efe6d0065963db2a2b7f30dee69f647e28dec81" dependencies = [ "atoi", - "base64 0.21.7", - "bigdecimal 0.3.1", + "base64 0.22.0", + "bigdecimal 0.4.5", "bitflags 2.6.0", "byteorder", "chrono", @@ -13912,8 +13910,8 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.7.4" -source = "git+https://github.com/risingwavelabs/sqlx.git?rev=ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a#ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a" +version = "0.8.2" +source = "git+https://github.com/madsim-rs/sqlx.git?rev=3efe6d0065963db2a2b7f30dee69f647e28dec81#3efe6d0065963db2a2b7f30dee69f647e28dec81" dependencies = [ "atoi", "chrono", @@ -13927,11 +13925,11 @@ dependencies = [ "madsim-tokio", "percent-encoding", "serde", + "serde_urlencoded", "sqlx-core", "time", "tracing", "url", - "urlencoding", "uuid", ] @@ -16504,6 +16502,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "yup-oauth2" version = "8.3.0" diff --git a/Cargo.toml b/Cargo.toml index 17ba5bf07551..0e1c1725baa8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -175,13 +175,13 @@ parking_lot = { version = "0.12", features = [ "arc_lock", "deadlock_detection", ] } -sea-orm = { version = "~1.0", features = [ +sea-orm = { version = "~1.1", features = [ "sqlx-all", "runtime-tokio-native-tls", "with-uuid", ] } -sea-orm-migration = "~1.0" -sqlx = { version = "0.7.4", default-features = false, features = [ +sea-orm-migration = "~1.1" +sqlx = { version = "0.8.2", default-features = false, features = [ "bigdecimal", "chrono", "json", @@ -350,11 +350,9 @@ getrandom = { git = "https://github.com/madsim-rs/getrandom.git", rev = "e79a7ae # tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "0dd1055" } tokio-retry = { git = "https://github.com/madsim-rs/rust-tokio-retry.git", rev = "95e2fd3" } tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "ac00d88" } - -# sqlx version: v0.7.4 -# patch diffs: https://github.com/madsim-rs/sqlx/pull/3 -sqlx = { git = "https://github.com/risingwavelabs/sqlx.git", rev = "ff6d6d2dc0e9e8e47282fd29be006eed7ae3421a" } - +# sqlx version: v0.8.2 +# patch diffs: https://github.com/madsim-rs/sqlx/pull/4 +sqlx = { git = "https://github.com/madsim-rs/sqlx.git", rev = "3efe6d0065963db2a2b7f30dee69f647e28dec81" } futures-timer = { git = "https://github.com/madsim-rs/futures-timer.git", rev = "05b33b4" } # patch to remove preserve_order from serde_json bson = { git = "https://github.com/risingwavelabs/bson-rust", rev = "e5175ec" } diff --git a/src/connector/Cargo.toml b/src/connector/Cargo.toml index 14257251041f..3fe5499e41d0 100644 --- a/src/connector/Cargo.toml +++ b/src/connector/Cargo.toml @@ -128,7 +128,7 @@ rustls-native-certs = "0.7" rustls-pemfile = "2" rustls-pki-types = "1" rw_futures_util = { workspace = true } -sea-schema = { version = "0.15", default-features = false, features = [ +sea-schema = { version = "0.16", default-features = false, features = [ "discovery", "sqlx-postgres", "sqlx-mysql", From a9f8945658dc2044f8e2f4eef5037ec6b7f42330 Mon Sep 17 00:00:00 2001 From: xxchan Date: Mon, 4 Nov 2024 18:20:30 +0800 Subject: [PATCH 08/15] refactor(meta): simplify stream job table/source id assignment (#19171) Signed-off-by: xxchan Co-authored-by: Bugen Zhao --- .git-blame-ignore-revs | 3 + proto/catalog.proto | 2 + .../src/handler/alter_table_column.rs | 4 +- src/frontend/src/handler/create_sink.rs | 4 +- src/frontend/src/handler/create_table.rs | 18 +++-- src/meta/service/src/ddl_service.rs | 37 +++++---- src/meta/src/manager/streaming_job.rs | 4 - src/meta/src/rpc/ddl_controller.rs | 76 +------------------ src/meta/src/stream/source_manager.rs | 1 + src/meta/src/stream/stream_graph/fragment.rs | 60 ++++++++++----- 10 files changed, 85 insertions(+), 124 deletions(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index b8ca322d767a..c8e1f42656e4 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -42,3 +42,6 @@ c583e2c6c054764249acf484438c7bf7197765f4 # chore: replace all ProstXxx with PbXxx (#8621) 6fd8821f2e053957b183d648bea9c95b6703941f + +# chore: cleanup v2 naming for sql metastore (#18941) +9a6a7f9052d5679165ff57cc01417c742c95351c diff --git a/proto/catalog.proto b/proto/catalog.proto index 169347c199eb..5383104e9c0f 100644 --- a/proto/catalog.proto +++ b/proto/catalog.proto @@ -95,6 +95,8 @@ message StreamSourceInfo { } message Source { + // For shared source, this is the same as the job id. + // For non-shared source and table with connector, this is a different oid. uint32 id = 1; uint32 schema_id = 2; uint32 database_id = 3; diff --git a/src/frontend/src/handler/alter_table_column.rs b/src/frontend/src/handler/alter_table_column.rs index 1241553aff04..ce90fa94253f 100644 --- a/src/frontend/src/handler/alter_table_column.rs +++ b/src/frontend/src/handler/alter_table_column.rs @@ -35,7 +35,7 @@ use risingwave_sqlparser::ast::{ use risingwave_sqlparser::parser::Parser; use super::create_source::get_json_schema_location; -use super::create_table::{generate_stream_graph_for_table, ColumnIdGenerator}; +use super::create_table::{generate_stream_graph_for_replace_table, ColumnIdGenerator}; use super::util::SourceSchemaCompatExt; use super::{HandlerArgs, RwPgResponse}; use crate::catalog::root_catalog::SchemaPath; @@ -192,7 +192,7 @@ pub async fn get_replace_table_plan( panic!("unexpected statement type: {:?}", definition); }; - let (mut graph, table, source, job_type) = generate_stream_graph_for_table( + let (mut graph, table, source, job_type) = generate_stream_graph_for_replace_table( session, table_name, original_catalog, diff --git a/src/frontend/src/handler/create_sink.rs b/src/frontend/src/handler/create_sink.rs index ea9f9e98f9b7..24a7e96fe15d 100644 --- a/src/frontend/src/handler/create_sink.rs +++ b/src/frontend/src/handler/create_sink.rs @@ -54,7 +54,7 @@ use crate::error::{ErrorCode, Result, RwError}; use crate::expr::{rewrite_now_to_proctime, ExprImpl, InputRef}; use crate::handler::alter_table_column::fetch_table_catalog_for_alter; use crate::handler::create_mv::parse_column_names; -use crate::handler::create_table::{generate_stream_graph_for_table, ColumnIdGenerator}; +use crate::handler::create_table::{generate_stream_graph_for_replace_table, ColumnIdGenerator}; use crate::handler::privilege::resolve_query_privileges; use crate::handler::util::SourceSchemaCompatExt; use crate::handler::HandlerArgs; @@ -672,7 +672,7 @@ pub(crate) async fn reparse_table_for_sink( panic!("unexpected statement type: {:?}", definition); }; - let (graph, table, source, _) = generate_stream_graph_for_table( + let (graph, table, source, _) = generate_stream_graph_for_replace_table( session, table_name, table_catalog, diff --git a/src/frontend/src/handler/create_table.rs b/src/frontend/src/handler/create_table.rs index eab38a44c4ff..1e5dc489c1a0 100644 --- a/src/frontend/src/handler/create_table.rs +++ b/src/frontend/src/handler/create_table.rs @@ -35,6 +35,7 @@ use risingwave_connector::source::cdc::external::{ ExternalTableConfig, ExternalTableImpl, DATABASE_NAME_KEY, SCHEMA_NAME_KEY, TABLE_NAME_KEY, }; use risingwave_connector::{source, WithOptionsSecResolved}; +use risingwave_pb::catalog::source::OptionalAssociatedTableId; use risingwave_pb::catalog::{PbSource, PbTable, Table, WatermarkDesc}; use risingwave_pb::ddl_service::TableJobType; use risingwave_pb::plan_common::column_desc::GeneratedOrDefaultColumn; @@ -1322,7 +1323,7 @@ pub fn check_create_table_with_source( } #[allow(clippy::too_many_arguments)] -pub async fn generate_stream_graph_for_table( +pub async fn generate_stream_graph_for_replace_table( _session: &Arc, table_name: ObjectName, original_catalog: &Arc, @@ -1341,7 +1342,7 @@ pub async fn generate_stream_graph_for_table( ) -> Result<(StreamFragmentGraph, Table, Option, TableJobType)> { use risingwave_pb::catalog::table::OptionalAssociatedSourceId; - let ((plan, source, table), job_type) = match (format_encode, cdc_table_info.as_ref()) { + let ((plan, mut source, table), job_type) = match (format_encode, cdc_table_info.as_ref()) { (Some(format_encode), None) => ( gen_create_table_plan_with_source( handler_args, @@ -1441,13 +1442,18 @@ pub async fn generate_stream_graph_for_table( let graph = build_graph(plan)?; // Fill the original table ID. - let table = Table { + let mut table = Table { id: original_catalog.id().table_id(), - optional_associated_source_id: original_catalog - .associated_source_id() - .map(|source_id| OptionalAssociatedSourceId::AssociatedSourceId(source_id.into())), ..table }; + if let Some(source_id) = original_catalog.associated_source_id() { + table.optional_associated_source_id = Some(OptionalAssociatedSourceId::AssociatedSourceId( + source_id.table_id, + )); + source.as_mut().unwrap().id = source_id.table_id; + source.as_mut().unwrap().optional_associated_table_id = + Some(OptionalAssociatedTableId::AssociatedTableId(table.id)) + } Ok((graph, table, source, job_type)) } diff --git a/src/meta/service/src/ddl_service.rs b/src/meta/service/src/ddl_service.rs index 1d0cc36fff3f..e59c4a410014 100644 --- a/src/meta/service/src/ddl_service.rs +++ b/src/meta/service/src/ddl_service.rs @@ -23,9 +23,7 @@ use risingwave_common::types::DataType; use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_connector::sink::catalog::SinkId; use risingwave_meta::manager::{EventLogManagerRef, MetadataManager}; -use risingwave_meta::rpc::ddl_controller::fill_table_stream_graph_info; use risingwave_meta::rpc::metrics::MetaMetrics; -use risingwave_pb::catalog::table::OptionalAssociatedSourceId; use risingwave_pb::catalog::{Comment, CreateType, Secret, Table}; use risingwave_pb::common::worker_node::State; use risingwave_pb::common::WorkerType; @@ -84,27 +82,28 @@ impl DdlServiceImpl { } } - fn extract_replace_table_info(change: ReplaceTablePlan) -> ReplaceTableInfo { - let job_type = change.get_job_type().unwrap_or_default(); - let mut source = change.source; - let mut fragment_graph = change.fragment_graph.unwrap(); - let mut table = change.table.unwrap(); - if let Some(OptionalAssociatedSourceId::AssociatedSourceId(source_id)) = - table.optional_associated_source_id - { - source.as_mut().unwrap().id = source_id; - fill_table_stream_graph_info(&mut source, &mut table, job_type, &mut fragment_graph); - } - let table_col_index_mapping = change - .table_col_index_mapping + fn extract_replace_table_info( + ReplaceTablePlan { + table, + fragment_graph, + table_col_index_mapping, + source, + job_type, + }: ReplaceTablePlan, + ) -> ReplaceTableInfo { + let table = table.unwrap(); + let col_index_mapping = table_col_index_mapping .as_ref() .map(ColIndexMapping::from_protobuf); - let stream_job = StreamingJob::Table(source, table, job_type); ReplaceTableInfo { - streaming_job: stream_job, - fragment_graph, - col_index_mapping: table_col_index_mapping, + streaming_job: StreamingJob::Table( + source, + table, + TableJobType::try_from(job_type).unwrap(), + ), + fragment_graph: fragment_graph.unwrap(), + col_index_mapping, } } } diff --git a/src/meta/src/manager/streaming_job.rs b/src/meta/src/manager/streaming_job.rs index 5257b2c1aa24..9d5b135095fb 100644 --- a/src/meta/src/manager/streaming_job.rs +++ b/src/meta/src/manager/streaming_job.rs @@ -328,8 +328,4 @@ impl StreamingJob { StreamingJob::MaterializedView(_) | StreamingJob::Index(_, _) => Ok(HashSet::new()), } } - - pub fn is_source_job(&self) -> bool { - matches!(self, StreamingJob::Source(_)) - } } diff --git a/src/meta/src/rpc/ddl_controller.rs b/src/meta/src/rpc/ddl_controller.rs index f0cbd1d5a477..fd336a1d0a5b 100644 --- a/src/meta/src/rpc/ddl_controller.rs +++ b/src/meta/src/rpc/ddl_controller.rs @@ -27,7 +27,7 @@ use risingwave_common::secret::SecretEncryption; use risingwave_common::system_param::reader::SystemParamsRead; use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_common::util::stream_graph_visitor::{ - visit_fragment, visit_stream_node, visit_stream_node_cont_mut, + visit_stream_node, visit_stream_node_cont_mut, }; use risingwave_common::{bail, hash, must_match}; use risingwave_connector::error::ConnectorError; @@ -40,11 +40,9 @@ use risingwave_meta_model::{ ConnectionId, DatabaseId, FunctionId, IndexId, ObjectId, SchemaId, SecretId, SinkId, SourceId, SubscriptionId, TableId, UserId, ViewId, }; -use risingwave_pb::catalog::source::OptionalAssociatedTableId; -use risingwave_pb::catalog::table::OptionalAssociatedSourceId; use risingwave_pb::catalog::{ - Comment, Connection, CreateType, Database, Function, PbSink, PbSource, PbTable, Schema, Secret, - Sink, Source, Subscription, Table, View, + Comment, Connection, CreateType, Database, Function, PbSink, Schema, Secret, Sink, Source, + Subscription, Table, View, }; use risingwave_pb::ddl_service::alter_owner_request::Object; use risingwave_pb::ddl_service::{ @@ -906,7 +904,7 @@ impl DdlController { pub async fn create_streaming_job( &self, mut streaming_job: StreamingJob, - mut fragment_graph: StreamFragmentGraphProto, + fragment_graph: StreamFragmentGraphProto, affected_table_replace_info: Option, ) -> MetaResult { let ctx = StreamContext::from_protobuf(fragment_graph.get_ctx().unwrap()); @@ -921,24 +919,6 @@ impl DdlController { .await?; let job_id = streaming_job.id(); - match &mut streaming_job { - StreamingJob::Table(src, table, job_type) => { - // If we're creating a table with connector, we should additionally fill its ID first. - fill_table_stream_graph_info(src, table, *job_type, &mut fragment_graph); - } - StreamingJob::Source(src) => { - // set the inner source id of source node. - for fragment in fragment_graph.fragments.values_mut() { - visit_fragment(fragment, |node_body| { - if let NodeBody::Source(source_node) = node_body { - source_node.source_inner.as_mut().unwrap().source_id = src.id; - } - }); - } - } - _ => {} - } - tracing::debug!( id = job_id, definition = streaming_job.definition(), @@ -2003,51 +1983,3 @@ impl DdlController { .await } } - -/// Fill in necessary information for `Table` stream graph. -/// e.g., fill source id for table with connector, fill external table id for CDC table. -pub fn fill_table_stream_graph_info( - source: &mut Option, - table: &mut PbTable, - table_job_type: TableJobType, - fragment_graph: &mut PbStreamFragmentGraph, -) { - let mut source_count = 0; - for fragment in fragment_graph.fragments.values_mut() { - visit_fragment(fragment, |node_body| { - if let NodeBody::Source(source_node) = node_body { - if source_node.source_inner.is_none() { - // skip empty source for dml node - return; - } - - // If we're creating a table with connector, we should additionally fill its ID first. - if let Some(source) = source { - source_node.source_inner.as_mut().unwrap().source_id = source.id; - source_count += 1; - - assert_eq!( - source_count, 1, - "require exactly 1 external stream source when creating table with a connector" - ); - - // Fill in the correct table id for source. - source.optional_associated_table_id = - Some(OptionalAssociatedTableId::AssociatedTableId(table.id)); - // Fill in the correct source id for mview. - table.optional_associated_source_id = - Some(OptionalAssociatedSourceId::AssociatedSourceId(source.id)); - } - } - - // fill table id for cdc backfill - if let NodeBody::StreamCdcScan(node) = node_body - && table_job_type == TableJobType::SharedCdcSource - { - if let Some(table_desc) = node.cdc_table_desc.as_mut() { - table_desc.table_id = table.id; - } - } - }); - } -} diff --git a/src/meta/src/stream/source_manager.rs b/src/meta/src/stream/source_manager.rs index b13acb68ac39..ef8c1298264b 100644 --- a/src/meta/src/stream/source_manager.rs +++ b/src/meta/src/stream/source_manager.rs @@ -1002,6 +1002,7 @@ impl SourceManager { /// create and register connector worker for source. pub async fn register_source(&self, source: &Source) -> MetaResult<()> { + tracing::debug!("register_source: {}", source.get_id()); let mut core = self.core.lock().await; if let Entry::Vacant(e) = core.managed_sources.entry(source.get_id() as _) { let handle = create_source_worker_handle(source, self.metrics.clone()) diff --git a/src/meta/src/stream/stream_graph/fragment.rs b/src/meta/src/stream/stream_graph/fragment.rs index 86a2197a9d5b..c2ccd4300ccf 100644 --- a/src/meta/src/stream/stream_graph/fragment.rs +++ b/src/meta/src/stream/stream_graph/fragment.rs @@ -56,7 +56,7 @@ pub(super) struct BuildingFragment { inner: StreamFragment, /// The ID of the job if it contains the streaming job node. - table_id: Option, + job_id: Option, /// The required column IDs of each upstream table. /// Will be converted to indices when building the edge connected to the upstream. @@ -82,12 +82,12 @@ impl BuildingFragment { // Fill the information of the internal tables in the fragment. Self::fill_internal_tables(&mut fragment, job, table_id_gen); - let table_id = Self::fill_job(&mut fragment, job).then(|| job.id()); + let job_id = Self::fill_job(&mut fragment, job).then(|| job.id()); let upstream_table_columns = Self::extract_upstream_table_columns(&mut fragment); Self { inner: fragment, - table_id, + job_id, upstream_table_columns, } } @@ -126,17 +126,17 @@ impl BuildingFragment { /// Fill the information with the job in the fragment. fn fill_job(fragment: &mut StreamFragment, job: &StreamingJob) -> bool { - let table_id = job.id(); + let job_id = job.id(); let fragment_id = fragment.fragment_id; - let mut has_table = false; + let mut has_job = false; stream_graph_visitor::visit_fragment(fragment, |node_body| match node_body { NodeBody::Materialize(materialize_node) => { - materialize_node.table_id = table_id; + materialize_node.table_id = job_id; // Fill the ID of the `Table`. let table = materialize_node.table.as_mut().unwrap(); - table.id = table_id; + table.id = job_id; table.database_id = job.database_id(); table.schema_id = job.schema_id(); table.fragment_id = fragment_id; @@ -145,27 +145,49 @@ impl BuildingFragment { table.definition = job.name(); } - has_table = true; + has_job = true; } NodeBody::Sink(sink_node) => { - sink_node.sink_desc.as_mut().unwrap().id = table_id; + sink_node.sink_desc.as_mut().unwrap().id = job_id; - has_table = true; + has_job = true; } NodeBody::Dml(dml_node) => { - dml_node.table_id = table_id; + dml_node.table_id = job_id; dml_node.table_version_id = job.table_version_id().unwrap(); } - NodeBody::Source(_) => { - // Notice: Table job has a dumb Source node, we should be careful that `has_table` should not be overwrite to `false` - if !has_table { - has_table = job.is_source_job(); + NodeBody::Source(source_node) => { + match job { + // Note: For table without connector, it has a dummy Source node. + // Note: For table with connector, it's source node has a source id different with the table id (job id), assigned in create_job_catalog. + StreamingJob::Table(source, _table, _table_job_type) => { + if let Some(source_inner) = source_node.source_inner.as_mut() { + if let Some(source) = source { + debug_assert_ne!(source.id, job_id); + source_inner.source_id = source.id; + } + } + } + StreamingJob::Source(source) => { + has_job = true; + if let Some(source_inner) = source_node.source_inner.as_mut() { + debug_assert_eq!(source.id, job_id); + source_inner.source_id = source.id; + } + } + // For other job types, no need to fill the source id, since it refers to an existing source. + _ => {} + } + } + NodeBody::StreamCdcScan(node) => { + if let Some(table_desc) = node.cdc_table_desc.as_mut() { + table_desc.table_id = job_id; } } _ => {} }); - has_table + has_job } /// Extract the required columns (in IDs) of each upstream table. @@ -499,7 +521,7 @@ impl StreamFragmentGraph { pub fn table_fragment_id(&self) -> FragmentId { self.fragments .values() - .filter(|b| b.table_id.is_some()) + .filter(|b| b.job_id.is_some()) .map(|b| b.fragment_id) .exactly_one() .expect("require exactly 1 materialize/sink/cdc source node when creating the streaming job") @@ -1095,7 +1117,7 @@ impl CompleteStreamFragmentGraph { let internal_tables = building_fragment.extract_internal_tables(); let BuildingFragment { inner, - table_id, + job_id, upstream_table_columns: _, } = building_fragment; @@ -1104,7 +1126,7 @@ impl CompleteStreamFragmentGraph { let materialized_fragment_id = if inner.fragment_type_mask & FragmentTypeFlag::Mview as u32 != 0 { - table_id + job_id } else { None }; From 946b50089673b026720905e5cccece0485192553 Mon Sep 17 00:00:00 2001 From: August Date: Mon, 4 Nov 2024 18:40:53 +0800 Subject: [PATCH 09/15] fix: fix index create privilege check and ensure consistency between the owner and its associated table (#19252) --- src/frontend/src/handler/create_index.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/frontend/src/handler/create_index.rs b/src/frontend/src/handler/create_index.rs index 4b32bedc01ea..1bf7ad16e1c9 100644 --- a/src/frontend/src/handler/create_index.rs +++ b/src/frontend/src/handler/create_index.rs @@ -21,11 +21,9 @@ use either::Either; use fixedbitset::FixedBitSet; use itertools::Itertools; use pgwire::pg_response::{PgResponse, StatementType}; -use risingwave_common::acl::AclMode; use risingwave_common::catalog::{IndexId, TableDesc, TableId}; use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use risingwave_pb::catalog::{PbIndex, PbIndexColumnProperties, PbStreamJobStatus, PbTable}; -use risingwave_pb::user::grant_privilege::Object; use risingwave_sqlparser::ast; use risingwave_sqlparser::ast::{Ident, ObjectName, OrderByExpr}; @@ -34,7 +32,6 @@ use crate::binder::Binder; use crate::catalog::root_catalog::SchemaPath; use crate::error::{ErrorCode, Result}; use crate::expr::{Expr, ExprImpl, ExprRewriter, InputRef}; -use crate::handler::privilege::ObjectCheckItem; use crate::handler::HandlerArgs; use crate::optimizer::plan_expr_rewriter::ConstEvalRewriter; use crate::optimizer::plan_node::{Explain, LogicalProject, LogicalScan, StreamMaterialize}; @@ -83,11 +80,11 @@ pub(crate) fn gen_create_index_plan( ); } - session.check_privileges(&[ObjectCheckItem::new( - table.owner, - AclMode::Select, - Object::TableId(table.id.table_id), - )])?; + if !session.is_super_user() && session.user_id() != table.owner { + return Err( + ErrorCode::PermissionDenied(format!("must be owner of table {}", table.name)).into(), + ); + } let mut binder = Binder::new_for_stream(session); binder.bind_table(Some(&schema_name), &table_name, None)?; @@ -202,7 +199,7 @@ pub(crate) fn gen_create_index_plan( &index_columns_ordered_expr, &include_columns_expr, // We use the first index column as distributed key by default if users - // haven't specify the distributed by columns. + // haven't specified the distributed by columns. if distributed_columns_expr.is_empty() { 1 } else { @@ -221,7 +218,7 @@ pub(crate) fn gen_create_index_plan( index_table_prost.retention_seconds = table.retention_seconds; } - index_table_prost.owner = session.user_id(); + index_table_prost.owner = table.owner; index_table_prost.dependent_relations = vec![table.id.table_id]; let index_columns_len = index_columns_ordered_expr.len() as u32; From bb0d786b39c9eef9dbf1e16814e8d12e46b8a676 Mon Sep 17 00:00:00 2001 From: Noel Kwan <47273164+kwannoel@users.noreply.github.com> Date: Mon, 4 Nov 2024 19:13:05 +0800 Subject: [PATCH 10/15] fix(connector): handle backwards compat parsing for `boolean` datatype (#19251) Co-authored-by: William Wen --- e2e_test/source_legacy/cdc/cdc.share_stream.slt | 6 +++--- src/connector/src/parser/mysql.rs | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/e2e_test/source_legacy/cdc/cdc.share_stream.slt b/e2e_test/source_legacy/cdc/cdc.share_stream.slt index 53148c66836e..cf1000957b6f 100644 --- a/e2e_test/source_legacy/cdc/cdc.share_stream.slt +++ b/e2e_test/source_legacy/cdc/cdc.share_stream.slt @@ -232,10 +232,10 @@ SELECT order_id,order_date,customer_name,product_id FROM orders_test order by or 10003 2020-07-30 12:00:30 Edward 106 query IIIIITTTTTTTTT -SELECT c_tinyint, c_smallint, c_mediumint, c_integer, c_bigint, c_decimal, c_float, c_double, c_char_255, c_varchar_10000, c_date, c_time, c_datetime, c_timestamp FROM mysql_all_types order by c_bigint; +SELECT c_boolean, c_bit, c_tinyint, c_smallint, c_mediumint, c_integer, c_bigint, c_decimal, c_float, c_double, c_char_255, c_varchar_10000, c_date, c_time, c_datetime, c_timestamp FROM mysql_all_types order by c_bigint; ---- --128 -32767 -8388608 -2147483647 -9223372036854775807 -10 -10000 -10000 a b 1001-01-01 00:00:00 1998-01-01 00:00:00 1970-01-01 00:00:01+00:00 -NULL NULL -8388608 -2147483647 9223372036854775806 -10 -10000 -10000 c d 1001-01-01 NULL 2000-01-01 00:00:00 NULL +t t -128 -32767 -8388608 -2147483647 -9223372036854775807 -10 -10000 -10000 a b 1001-01-01 00:00:00 1998-01-01 00:00:00 1970-01-01 00:00:01+00:00 +f f NULL NULL -8388608 -2147483647 9223372036854775806 -10 -10000 -10000 c d 1001-01-01 NULL 2000-01-01 00:00:00 NULL statement ok create secret pg_pwd with ( diff --git a/src/connector/src/parser/mysql.rs b/src/connector/src/parser/mysql.rs index e9a8eeba70cb..9970ad50003c 100644 --- a/src/connector/src/parser/mysql.rs +++ b/src/connector/src/parser/mysql.rs @@ -75,6 +75,14 @@ pub fn mysql_datum_to_rw_datum( ) -> Result { match rw_data_type { DataType::Boolean => { + // TinyInt(1) is used to represent boolean in MySQL + // This handles backwards compatibility, + // before https://github.com/risingwavelabs/risingwave/pull/19071 + // we permit boolean and tinyint(1) to be equivalent to boolean in RW. + if let Some(Ok(val)) = mysql_row.get_opt::, _>(mysql_datum_index) { + let _ = mysql_row.take::(mysql_datum_index); + return Ok(val.map(ScalarImpl::from)); + } // Bit(1) match mysql_row.take_opt::>, _>(mysql_datum_index) { None => bail!( From b796d0ddb02112ddedf2a4ae755924865fbf0ba5 Mon Sep 17 00:00:00 2001 From: Richard Chien Date: Mon, 4 Nov 2024 22:09:25 +0800 Subject: [PATCH 11/15] refactor(common): simplify chunk iteration with `filter_by_bitmap` (#19254) Signed-off-by: Richard Chien --- src/batch/src/executor/group_top_n.rs | 15 ++- src/batch/src/executor/hash_agg.rs | 9 +- src/batch/src/executor/join/hash_join.rs | 94 +++++-------------- .../src/executor/join/lookup_join_base.rs | 9 +- src/common/src/bitmap.rs | 10 ++ 5 files changed, 48 insertions(+), 89 deletions(-) diff --git a/src/batch/src/executor/group_top_n.rs b/src/batch/src/executor/group_top_n.rs index b1f479135813..7dda468066e9 100644 --- a/src/batch/src/executor/group_top_n.rs +++ b/src/batch/src/executor/group_top_n.rs @@ -20,6 +20,7 @@ use futures_async_stream::try_stream; use hashbrown::HashMap; use itertools::Itertools; use risingwave_common::array::DataChunk; +use risingwave_common::bitmap::FilterByBitmap; use risingwave_common::catalog::Schema; use risingwave_common::hash::{HashKey, HashKeyDispatcher, PrecomputedBuildHasher}; use risingwave_common::memory::{MemoryContext, MonitoredGlobalAlloc}; @@ -198,16 +199,12 @@ impl GroupTopNExecutor { let chunk = Arc::new(chunk?); let keys = K::build_many(self.group_key.as_slice(), &chunk); - for (row_id, ((encoded_row, key), visible)) in - encode_chunk(&chunk, &self.column_orders)? - .into_iter() - .zip_eq_fast(keys.into_iter()) - .zip_eq_fast(chunk.visibility().iter()) - .enumerate() + for (row_id, (encoded_row, key)) in encode_chunk(&chunk, &self.column_orders)? + .into_iter() + .zip_eq_fast(keys.into_iter()) + .enumerate() + .filter_by_bitmap(chunk.visibility()) { - if !visible { - continue; - } let heap = groups.entry(key).or_insert_with(|| { TopNHeap::new( self.limit, diff --git a/src/batch/src/executor/hash_agg.rs b/src/batch/src/executor/hash_agg.rs index bde5d36bd8c6..ea780d7bf83b 100644 --- a/src/batch/src/executor/hash_agg.rs +++ b/src/batch/src/executor/hash_agg.rs @@ -21,7 +21,7 @@ use futures_async_stream::try_stream; use hashbrown::hash_map::Entry; use itertools::Itertools; use risingwave_common::array::{DataChunk, StreamChunk}; -use risingwave_common::bitmap::Bitmap; +use risingwave_common::bitmap::{Bitmap, FilterByBitmap}; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::hash::{HashKey, HashKeyDispatcher, PrecomputedBuildHasher}; use risingwave_common::memory::MemoryContext; @@ -545,14 +545,11 @@ impl HashAggExecutor { let chunk = StreamChunk::from(chunk?); let keys = K::build_many(self.group_key_columns.as_slice(), &chunk); let mut memory_usage_diff = 0; - for (row_id, (key, visible)) in keys + for (row_id, key) in keys .into_iter() - .zip_eq_fast(chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(chunk.visibility()) { - if !visible { - continue; - } let mut new_group = false; let states = match groups.entry(key) { Entry::Occupied(entry) => entry.into_mut(), diff --git a/src/batch/src/executor/join/hash_join.rs b/src/batch/src/executor/join/hash_join.rs index 834320b60d2b..5ec115e0990a 100644 --- a/src/batch/src/executor/join/hash_join.rs +++ b/src/batch/src/executor/join/hash_join.rs @@ -21,7 +21,7 @@ use bytes::Bytes; use futures_async_stream::try_stream; use itertools::Itertools; use risingwave_common::array::{Array, DataChunk, RowRef}; -use risingwave_common::bitmap::{Bitmap, BitmapBuilder}; +use risingwave_common::bitmap::{Bitmap, BitmapBuilder, FilterByBitmap}; use risingwave_common::catalog::Schema; use risingwave_common::hash::{HashKey, HashKeyDispatcher, PrecomputedBuildHasher}; use risingwave_common::memory::{MemoryContext, MonitoredGlobalAlloc}; @@ -514,15 +514,12 @@ impl HashJoinExecutor { for (build_chunk_id, build_chunk) in build_side.iter().enumerate() { let build_keys = K::build_many(&self.build_key_idxs, build_chunk); - for (build_row_id, (build_key, visible)) in build_keys + for (build_row_id, build_key) in build_keys .into_iter() - .zip_eq_fast(build_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(build_chunk.visibility()) { self.shutdown_rx.check()?; - if !visible { - continue; - } // Only insert key to hash map if it is consistent with the null safe restriction. if build_key.null_bitmap().is_subset(&null_matched) { let row_id = RowId::new(build_chunk_id, build_row_id); @@ -765,14 +762,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -828,14 +822,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { for build_row_id in next_build_row_with_same_key.row_id_iter(Some(*first_matched_build_row_id)) @@ -898,14 +889,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { non_equi_state .first_output_row_id @@ -979,14 +967,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } shutdown_rx.check()?; if !ANTI_JOIN { if hash_map.contains_key(probe_key) { @@ -1043,14 +1028,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } non_equi_state.found_matched = false; if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { non_equi_state @@ -1119,14 +1101,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } non_equi_state.found_matched = false; if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { non_equi_state @@ -1201,14 +1180,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -1266,14 +1242,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -1338,13 +1311,7 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_key, visible) in probe_keys - .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) - { - if !visible { - continue; - } + for probe_key in probe_keys.iter().filter_by_bitmap(probe_chunk.visibility()) { for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -1392,14 +1359,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } for build_row_id in next_build_row_with_same_key.row_id_iter(hash_map.get(probe_key).copied()) { @@ -1465,14 +1429,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { for build_row_id in next_build_row_with_same_key.row_id_iter(Some(*first_matched_build_row_id)) @@ -1547,14 +1508,11 @@ impl HashJoinExecutor { for probe_chunk in probe_side.execute() { let probe_chunk = probe_chunk?; let probe_keys = K::build_many(&probe_key_idxs, &probe_chunk); - for (probe_row_id, (probe_key, visible)) in probe_keys + for (probe_row_id, probe_key) in probe_keys .iter() - .zip_eq_fast(probe_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(probe_chunk.visibility()) { - if !visible { - continue; - } left_non_equi_state.found_matched = false; if let Some(first_matched_build_row_id) = hash_map.get(probe_key) { left_non_equi_state diff --git a/src/batch/src/executor/join/lookup_join_base.rs b/src/batch/src/executor/join/lookup_join_base.rs index 39a39b9a1424..743ae25cf4d6 100644 --- a/src/batch/src/executor/join/lookup_join_base.rs +++ b/src/batch/src/executor/join/lookup_join_base.rs @@ -18,13 +18,13 @@ use futures::StreamExt; use futures_async_stream::try_stream; use itertools::Itertools; use risingwave_common::array::DataChunk; +use risingwave_common::bitmap::FilterByBitmap; use risingwave_common::catalog::Schema; use risingwave_common::hash::{HashKey, NullBitmap, PrecomputedBuildHasher}; use risingwave_common::memory::MemoryContext; use risingwave_common::row::Row; use risingwave_common::types::{DataType, ToOwnedDatum}; use risingwave_common::util::chunk_coalesce::DataChunkBuilder; -use risingwave_common::util::iter_util::ZipEqFast; use risingwave_common::util::sort_util::{cmp_datum_iter, OrderType}; use risingwave_common_estimate_size::EstimateSize; use risingwave_expr::expr::BoxedExpression; @@ -150,14 +150,11 @@ impl LookupJoinBase { for (build_chunk_id, build_chunk) in build_side.iter().enumerate() { let build_keys = K::build_many(&hash_join_build_side_key_idxs, build_chunk); - for (build_row_id, (build_key, visible)) in build_keys + for (build_row_id, build_key) in build_keys .into_iter() - .zip_eq_fast(build_chunk.visibility().iter()) .enumerate() + .filter_by_bitmap(build_chunk.visibility()) { - if !visible { - continue; - } // Only insert key to hash map if it is consistent with the null safe // restriction. if build_key.null_bitmap().is_subset(&null_matched) { diff --git a/src/common/src/bitmap.rs b/src/common/src/bitmap.rs index 22869b23bc1d..995771970ece 100644 --- a/src/common/src/bitmap.rs +++ b/src/common/src/bitmap.rs @@ -42,6 +42,7 @@ use std::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, Not, Range, Ran use risingwave_common_estimate_size::EstimateSize; use risingwave_pb::common::buffer::CompressionType; use risingwave_pb::common::PbBuffer; +use rw_iter_util::ZipEqFast; #[derive(Default, Debug, Clone, EstimateSize)] pub struct BitmapBuilder { @@ -826,6 +827,15 @@ impl iter::Iterator for BitmapOnesIter<'_> { } } +pub trait FilterByBitmap: ExactSizeIterator + Sized { + fn filter_by_bitmap(self, bitmap: &Bitmap) -> impl Iterator { + self.zip_eq_fast(bitmap.iter()) + .filter_map(|(item, bit)| bit.then_some(item)) + } +} + +impl FilterByBitmap for T where T: ExactSizeIterator {} + #[cfg(test)] mod tests { use super::*; From 46ad59877c7d5115d5285e3e24ac6590b1478b28 Mon Sep 17 00:00:00 2001 From: zwang28 <70626450+zwang28@users.noreply.github.com> Date: Tue, 5 Nov 2024 00:49:33 +0800 Subject: [PATCH 12/15] fix(meta): record delta for new compaction group (#19253) --- src/meta/src/hummock/manager/commit_epoch.rs | 110 +++++++++---------- src/meta/src/hummock/manager/time_travel.rs | 26 +++-- src/meta/src/hummock/manager/versioning.rs | 2 - 3 files changed, 65 insertions(+), 73 deletions(-) diff --git a/src/meta/src/hummock/manager/commit_epoch.rs b/src/meta/src/hummock/manager/commit_epoch.rs index 93e5d5be1dfb..5875feabf6db 100644 --- a/src/meta/src/hummock/manager/commit_epoch.rs +++ b/src/meta/src/hummock/manager/commit_epoch.rs @@ -44,9 +44,9 @@ use crate::hummock::metrics_utils::{ }; use crate::hummock::model::CompactionGroup; use crate::hummock::sequence::{next_compaction_group_id, next_sstable_object_id}; +use crate::hummock::time_travel::should_mark_next_time_travel_version_snapshot; use crate::hummock::{ - commit_multi_var, commit_multi_var_with_provided_txn, start_measure_real_process_timer, - HummockManager, + commit_multi_var_with_provided_txn, start_measure_real_process_timer, HummockManager, }; pub enum NewTableFragmentInfo { @@ -109,13 +109,6 @@ impl HummockManager { ); } - let previous_time_travel_toggle_check = versioning.time_travel_toggle_check; - versioning.time_travel_toggle_check = self.time_travel_enabled().await; - if !previous_time_travel_toggle_check && versioning.time_travel_toggle_check { - // Take a snapshot for the first commit epoch after enabling time travel. - versioning.mark_next_time_travel_version_snapshot(); - } - let mut version = HummockVersionTransaction::new( &mut versioning.current_version, &mut versioning.hummock_version_deltas, @@ -214,6 +207,10 @@ impl HummockManager { new_table_watermarks, change_log_delta, ); + if should_mark_next_time_travel_version_snapshot(&time_travel_delta) { + // Unable to invoke mark_next_time_travel_version_snapshot because versioning is already mutable borrowed. + versioning.time_travel_snapshot_interval_counter = u64::MAX; + } // Apply stats changes. let mut version_stats = HummockVersionStatsTransaction::new( @@ -247,59 +244,50 @@ impl HummockManager { ); table_metrics.inc_write_throughput(stats_value as u64); } - if versioning.time_travel_toggle_check { - let mut time_travel_version = None; - if versioning.time_travel_snapshot_interval_counter - >= self.env.opts.hummock_time_travel_snapshot_interval - { - versioning.time_travel_snapshot_interval_counter = 0; - time_travel_version = Some(version.latest_version()); - } else { - versioning.time_travel_snapshot_interval_counter = versioning - .time_travel_snapshot_interval_counter - .saturating_add(1); - } - let group_parents = version - .latest_version() - .levels - .values() - .map(|g| (g.group_id, g.parent_group_id)) - .collect(); - let time_travel_tables_to_commit = - table_compaction_group_mapping - .iter() - .filter_map(|(table_id, cg_id)| { - tables_to_commit - .get(table_id) - .map(|committed_epoch| (table_id, cg_id, *committed_epoch)) - }); - let mut txn = self.env.meta_store_ref().conn.begin().await?; - let version_snapshot_sst_ids = self - .write_time_travel_metadata( - &txn, - time_travel_version, - time_travel_delta, - &group_parents, - &versioning.last_time_travel_snapshot_sst_ids, - time_travel_tables_to_commit, - ) - .await?; - commit_multi_var_with_provided_txn!( - txn, - version, - version_stats, - compaction_group_manager_txn - )?; - if let Some(version_snapshot_sst_ids) = version_snapshot_sst_ids { - versioning.last_time_travel_snapshot_sst_ids = version_snapshot_sst_ids; - } + let mut time_travel_version = None; + if versioning.time_travel_snapshot_interval_counter + >= self.env.opts.hummock_time_travel_snapshot_interval + { + versioning.time_travel_snapshot_interval_counter = 0; + time_travel_version = Some(version.latest_version()); } else { - commit_multi_var!( - self.meta_store_ref(), - version, - version_stats, - compaction_group_manager_txn - )?; + versioning.time_travel_snapshot_interval_counter = versioning + .time_travel_snapshot_interval_counter + .saturating_add(1); + } + let group_parents = version + .latest_version() + .levels + .values() + .map(|g| (g.group_id, g.parent_group_id)) + .collect(); + let time_travel_tables_to_commit = + table_compaction_group_mapping + .iter() + .filter_map(|(table_id, cg_id)| { + tables_to_commit + .get(table_id) + .map(|committed_epoch| (table_id, cg_id, *committed_epoch)) + }); + let mut txn = self.env.meta_store_ref().conn.begin().await?; + let version_snapshot_sst_ids = self + .write_time_travel_metadata( + &txn, + time_travel_version, + time_travel_delta, + &group_parents, + &versioning.last_time_travel_snapshot_sst_ids, + time_travel_tables_to_commit, + ) + .await?; + commit_multi_var_with_provided_txn!( + txn, + version, + version_stats, + compaction_group_manager_txn + )?; + if let Some(version_snapshot_sst_ids) = version_snapshot_sst_ids { + versioning.last_time_travel_snapshot_sst_ids = version_snapshot_sst_ids; } for compaction_group_id in &modified_compaction_groups { diff --git a/src/meta/src/hummock/manager/time_travel.rs b/src/meta/src/hummock/manager/time_travel.rs index fab79fe01f9a..1ceaad5cfd39 100644 --- a/src/meta/src/hummock/manager/time_travel.rs +++ b/src/meta/src/hummock/manager/time_travel.rs @@ -17,14 +17,13 @@ use std::collections::{HashMap, HashSet, VecDeque}; use anyhow::anyhow; use itertools::Itertools; use risingwave_common::catalog::TableId; -use risingwave_common::system_param::reader::SystemParamsRead; use risingwave_common::util::epoch::Epoch; use risingwave_hummock_sdk::compaction_group::StaticCompactionGroupId; use risingwave_hummock_sdk::sstable_info::SstableInfo; use risingwave_hummock_sdk::time_travel::{ refill_version, IncompleteHummockVersion, IncompleteHummockVersionDelta, }; -use risingwave_hummock_sdk::version::{HummockVersion, HummockVersionDelta}; +use risingwave_hummock_sdk::version::{GroupDeltaCommon, HummockVersion, HummockVersionDelta}; use risingwave_hummock_sdk::{ CompactionGroupId, HummockEpoch, HummockSstableId, HummockSstableObjectId, }; @@ -45,14 +44,6 @@ use crate::hummock::HummockManager; /// Time travel. impl HummockManager { - pub(crate) async fn time_travel_enabled(&self) -> bool { - self.env - .system_params_reader() - .await - .time_travel_retention_ms() - > 0 - } - pub(crate) async fn init_time_travel_state(&self) -> Result<()> { let sql_store = self.env.meta_store_ref(); let mut guard = self.versioning.write().await; @@ -473,6 +464,11 @@ fn replay_archive( let mut last_version = HummockVersion::from_persisted_protobuf(&version); for d in deltas { let d = HummockVersionDelta::from_persisted_protobuf(&d); + debug_assert!( + !should_mark_next_time_travel_version_snapshot(&d), + "unexpected time travel delta {:?}", + d + ); // Need to work around the assertion in `apply_version_delta`. // Because compaction deltas are not included in time travel archive. while last_version.id < d.prev_id { @@ -506,3 +502,13 @@ fn should_ignore_group(root_group_id: CompactionGroupId) -> bool { pub fn require_sql_meta_store_err() -> Error { Error::TimeTravel(anyhow!("require SQL meta store")) } + +/// Time travel delta replay only expect `NewL0SubLevel`. In all other cases, a new version snapshot should be created. +pub fn should_mark_next_time_travel_version_snapshot(delta: &HummockVersionDelta) -> bool { + delta.group_deltas.iter().any(|(_, deltas)| { + deltas + .group_deltas + .iter() + .any(|d| !matches!(d, GroupDeltaCommon::NewL0SubLevel(_))) + }) +} diff --git a/src/meta/src/hummock/manager/versioning.rs b/src/meta/src/hummock/manager/versioning.rs index 7fa93f72d02b..fd516a2ba3e1 100644 --- a/src/meta/src/hummock/manager/versioning.rs +++ b/src/meta/src/hummock/manager/versioning.rs @@ -55,8 +55,6 @@ pub struct Versioning { pub time_travel_snapshot_interval_counter: u64, /// Used to avoid the attempts to rewrite the same SST to meta store pub last_time_travel_snapshot_sst_ids: HashSet, - /// Whether time travel is enabled during last commit epoch. - pub time_travel_toggle_check: bool, // Persistent states below pub hummock_version_deltas: BTreeMap, From 9d83354629bde178ba31e5fbea4ebc51c814fecf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 11:42:08 +0800 Subject: [PATCH 13/15] chore(deps): Bump speedate from 0.14.0 to 0.15.0 (#19257) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 11 ++++------- src/common/Cargo.toml | 2 +- src/frontend/Cargo.toml | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb9830efdcc4..2e84e822497b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13599,12 +13599,12 @@ dependencies = [ [[package]] name = "speedate" -version = "0.14.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c323c4e6fece5a5a1a2a7f726d243144cce9fbcfe3ce4d9f3c6ede726a2bc780" +checksum = "9a5e7adf4e07e7de39a64d77962ca14a09165e592d42d0c9f9acadb679f4f937" dependencies = [ - "strum 0.25.0", - "strum_macros 0.25.3", + "strum 0.26.3", + "strum_macros 0.26.4", ] [[package]] @@ -14015,9 +14015,6 @@ name = "strum" version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" -dependencies = [ - "strum_macros 0.25.3", -] [[package]] name = "strum" diff --git a/src/common/Cargo.toml b/src/common/Cargo.toml index 8b341fb621fe..4b002bd8084c 100644 --- a/src/common/Cargo.toml +++ b/src/common/Cargo.toml @@ -97,7 +97,7 @@ serde_default = "0.2" serde_json = "1" serde_with = "3" smallbitset = "0.7.1" -speedate = "0.14.0" +speedate = "0.15.0" stacker = "0.1" static_assertions = "1" strum = "0.26" diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml index 76ee2aa076e8..502eaeba2f68 100644 --- a/src/frontend/Cargo.toml +++ b/src/frontend/Cargo.toml @@ -81,7 +81,7 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" sha2 = "0.10.7" smallvec = { version = "1.13.1", features = ["serde"] } -speedate = "0.14.0" +speedate = "0.15.0" tempfile = "3" thiserror = "1" thiserror-ext = { workspace = true } From 0f78e6a3c3ae5eba074209a7b2a8b3d6fb4fd63f Mon Sep 17 00:00:00 2001 From: Bohan Zhang Date: Tue, 5 Nov 2024 14:08:26 +0800 Subject: [PATCH 14/15] fix: support Kinesis with include timestamp (#19211) Co-authored-by: tabversion --- .../kinesis-s3-source/create_mv.sql | 7 +++++ .../kinesis-s3-source/create_source.sql | 8 +++++- .../kinesis-s3-source/data_check | 2 +- src/connector/src/parser/mod.rs | 26 +++++++------------ src/connector/src/parser/util.rs | 5 ++-- .../src/source/kafka/source/message.rs | 17 +++++------- .../src/source/kinesis/source/message.rs | 17 ++++++++---- 7 files changed, 47 insertions(+), 35 deletions(-) diff --git a/integration_tests/kinesis-s3-source/create_mv.sql b/integration_tests/kinesis-s3-source/create_mv.sql index 7c1cbe1cdeac..1a5c28803537 100644 --- a/integration_tests/kinesis-s3-source/create_mv.sql +++ b/integration_tests/kinesis-s3-source/create_mv.sql @@ -23,6 +23,13 @@ FROM ai.ad_id ) AS ad_clicks ON ad_impressions.ad_id = ad_clicks.ad_id; +CREATE MATERIALIZED VIEW kinesis_timestamp AS +SELECT + ad_impression._rw_kinesis_timestamp AS timestamp +FROM + ad_impression +WHERE ad_impression._rw_kinesis_timestamp IS NOT NULL AND ad_impression._rw_kinesis_timestamp >= '2024-01-01 00:00:00+00'::TIMESTAMPTZ; + CREATE MATERIALIZED VIEW ad_ctr_5min AS SELECT ac.ad_id AS ad_id, diff --git a/integration_tests/kinesis-s3-source/create_source.sql b/integration_tests/kinesis-s3-source/create_source.sql index 902eccdf1950..95f6e2ef933d 100644 --- a/integration_tests/kinesis-s3-source/create_source.sql +++ b/integration_tests/kinesis-s3-source/create_source.sql @@ -2,7 +2,13 @@ CREATE SOURCE ad_impression ( bid_id BIGINT, ad_id BIGINT, impression_timestamp TIMESTAMPTZ -) WITH ( +) +INCLUDE key +INCLUDE partition +INCLUDE offset +INCLUDE timestamp +INCLUDE payload +WITH ( connector = 'kinesis', stream = 'ad-impression', aws.region='us-east-1', diff --git a/integration_tests/kinesis-s3-source/data_check b/integration_tests/kinesis-s3-source/data_check index b95837955a51..60fd175091c0 100644 --- a/integration_tests/kinesis-s3-source/data_check +++ b/integration_tests/kinesis-s3-source/data_check @@ -1 +1 @@ -ad_ctr,ad_ctr_5min \ No newline at end of file +ad_ctr,ad_ctr_5min,kinesis_timestamp \ No newline at end of file diff --git a/src/connector/src/parser/mod.rs b/src/connector/src/parser/mod.rs index d8bd4a9dbcd5..22cce134906c 100644 --- a/src/connector/src/parser/mod.rs +++ b/src/connector/src/parser/mod.rs @@ -215,7 +215,7 @@ impl<'a> MessageMeta<'a> { /// Extract the value for the given column. /// /// Returns `None` if the column is not a meta column. - fn value_for_column(self, desc: &SourceColumnDesc) -> Option> { + fn value_for_column(self, desc: &SourceColumnDesc) -> DatumRef<'a> { let datum: DatumRef<'_> = match desc.column_type { // Row id columns are filled with `NULL` here and will be filled with the real // row id generated by `RowIdGenExecutor` later. @@ -229,11 +229,7 @@ impl<'a> MessageMeta<'a> { KAFKA_TIMESTAMP_COLUMN_NAME, "unexpected kafka meta column name" ); - kafka_meta.timestamp.map(|ts| { - risingwave_common::cast::i64_to_timestamptz(ts) - .unwrap() - .into() - }) + kafka_meta.extract_timestamp() } SourceColumnType::Meta if let SourceMeta::DebeziumCdc(cdc_meta) = self.meta => { assert_eq!( @@ -248,7 +244,7 @@ impl<'a> MessageMeta<'a> { SourceColumnType::Meta | SourceColumnType::Normal => return None, }; - Some(datum) + datum } } @@ -392,8 +388,7 @@ impl SourceStreamChunkRowWriter<'_> { Ok(A::output_for( self.row_meta .as_ref() - .and_then(|row_meta| row_meta.value_for_column(desc)) - .unwrap(), // handled all match cases in internal match, unwrap is safe + .and_then(|row_meta| row_meta.value_for_column(desc)), )) } (&SourceColumnType::Meta, _) @@ -403,12 +398,11 @@ impl SourceStreamChunkRowWriter<'_> { ) => { // SourceColumnType is for CDC source only. - return Ok(A::output_for( + Ok(A::output_for( self.row_meta .as_ref() - .and_then(|row_meta| row_meta.value_for_column(desc)) - .unwrap(), // handled all match cases in internal match, unwrap is safe - )); + .and_then(|row_meta| row_meta.value_for_column(desc)), + )) } ( @@ -434,9 +428,9 @@ impl SourceStreamChunkRowWriter<'_> { } } (_, &Some(AdditionalColumnType::Timestamp(_))) => match self.row_meta { - Some(row_meta) => Ok(A::output_for( - extreact_timestamp_from_meta(row_meta.meta).unwrap_or(None), - )), + Some(row_meta) => { + Ok(A::output_for(extreact_timestamp_from_meta(row_meta.meta))) + } None => parse_field(desc), // parse from payload }, (_, &Some(AdditionalColumnType::CollectionName(_))) => { diff --git a/src/connector/src/parser/util.rs b/src/connector/src/parser/util.rs index 590ba927854d..ef748ec0e2eb 100644 --- a/src/connector/src/parser/util.rs +++ b/src/connector/src/parser/util.rs @@ -116,10 +116,11 @@ pub(super) async fn bytes_from_url( } } -pub fn extreact_timestamp_from_meta(meta: &SourceMeta) -> Option> { +pub fn extreact_timestamp_from_meta(meta: &SourceMeta) -> DatumRef<'_> { match meta { SourceMeta::Kafka(kafka_meta) => kafka_meta.extract_timestamp(), - SourceMeta::DebeziumCdc(cdc_meta) => Some(cdc_meta.extract_timestamp()), + SourceMeta::DebeziumCdc(cdc_meta) => cdc_meta.extract_timestamp(), + SourceMeta::Kinesis(kinesis_meta) => kinesis_meta.extract_timestamp(), _ => None, } } diff --git a/src/connector/src/source/kafka/source/message.rs b/src/connector/src/source/kafka/source/message.rs index 247166a15676..38c781deab64 100644 --- a/src/connector/src/source/kafka/source/message.rs +++ b/src/connector/src/source/kafka/source/message.rs @@ -16,7 +16,7 @@ use std::borrow::Cow; use itertools::Itertools; use rdkafka::message::{BorrowedMessage, Headers, OwnedHeaders}; -use rdkafka::Message; +use rdkafka::{Message, Timestamp}; use risingwave_common::types::{ Datum, DatumCow, DatumRef, ListValue, ScalarImpl, ScalarRefImpl, StructValue, }; @@ -29,18 +29,15 @@ use crate::source::SourceMeta; #[derive(Debug, Clone)] pub struct KafkaMeta { - // timestamp(milliseconds) of message append in mq - pub timestamp: Option, + pub timestamp: Timestamp, pub headers: Option, } impl KafkaMeta { - pub fn extract_timestamp(&self) -> Option> { - self.timestamp.map(|ts| { - Some(ScalarRefImpl::Timestamptz( - risingwave_common::cast::i64_to_timestamptz(ts).unwrap(), - )) - }) + pub fn extract_timestamp(&self) -> DatumRef<'_> { + Some( + risingwave_common::types::Timestamptz::from_millis(self.timestamp.to_millis()?)?.into(), + ) } pub fn extract_header_inner<'a>( @@ -101,7 +98,7 @@ impl SourceMessage { offset: message.offset().to_string(), split_id: message.partition().to_string().into(), meta: SourceMeta::Kafka(KafkaMeta { - timestamp: message.timestamp().to_millis(), + timestamp: message.timestamp(), headers: if require_header { message.headers().map(|headers| headers.detach()) } else { diff --git a/src/connector/src/source/kinesis/source/message.rs b/src/connector/src/source/kinesis/source/message.rs index c12679437c1f..599609634048 100644 --- a/src/connector/src/source/kinesis/source/message.rs +++ b/src/connector/src/source/kinesis/source/message.rs @@ -13,15 +13,24 @@ // limitations under the License. use aws_sdk_kinesis::types::Record; +use aws_smithy_types::DateTime; use aws_smithy_types_convert::date_time::DateTimeExt; +use risingwave_common::types::{DatumRef, ScalarRefImpl}; use crate::source::{SourceMessage, SourceMeta, SplitId}; #[derive(Clone, Debug)] pub struct KinesisMeta { // from `approximate_arrival_timestamp` of type `Option` - #[expect(dead_code)] - timestamp: Option, + timestamp: Option, +} + +impl KinesisMeta { + pub fn extract_timestamp(&self) -> DatumRef<'_> { + Some(ScalarRefImpl::Timestamptz( + self.timestamp?.to_chrono_utc().ok()?.into(), + )) + } } pub fn from_kinesis_record(value: &Record, split_id: SplitId) -> SourceMessage { @@ -31,9 +40,7 @@ pub fn from_kinesis_record(value: &Record, split_id: SplitId) -> SourceMessage { offset: value.sequence_number.clone(), split_id, meta: SourceMeta::Kinesis(KinesisMeta { - timestamp: value - .approximate_arrival_timestamp - .map(|dt| dt.to_chrono_utc().unwrap().timestamp_millis()), + timestamp: value.approximate_arrival_timestamp, }), } } From a945f52de4633a6f87f62309aa3cdbbebaff8d11 Mon Sep 17 00:00:00 2001 From: Bohan Zhang Date: Tue, 5 Nov 2024 17:56:08 +0800 Subject: [PATCH 15/15] fix: none-shared source to secret dependence (#19263) Signed-off-by: tabversion Co-authored-by: tabversion --- ci/scripts/e2e-test-parallel.sh | 3 +- e2e_test/source_inline/kafka/secret_dep.slt | 32 +++++++++++++++++++++ src/meta/src/controller/catalog.rs | 26 ++++++++++++++++- 3 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 e2e_test/source_inline/kafka/secret_dep.slt diff --git a/ci/scripts/e2e-test-parallel.sh b/ci/scripts/e2e-test-parallel.sh index c366ef07fc20..effe8e364dd2 100755 --- a/ci/scripts/e2e-test-parallel.sh +++ b/ci/scripts/e2e-test-parallel.sh @@ -45,7 +45,8 @@ kill_cluster echo "--- e2e, ci-3streaming-2serving-3fe, batch" RUST_LOG=$RUST_LOG \ risedev ci-start ci-3streaming-2serving-3fe -sqllogictest "${host_args[@]}" -d dev './e2e_test/ddl/**/*.slt' --junit "parallel-batch-ddl-${profile}" --label "parallel" +# Exclude files that contain ALTER SYSTEM commands +find ./e2e_test/ddl -name "*.slt" -type f -exec grep -L "ALTER SYSTEM" {} \; | xargs -r sqllogictest "${host_args[@]}" -d dev --junit "parallel-batch-ddl-${profile}" --label "parallel" sqllogictest "${host_args[@]}" -d dev './e2e_test/visibility_mode/*.slt' -j 16 --junit "parallel-batch-${profile}" --label "parallel" kill_cluster diff --git a/e2e_test/source_inline/kafka/secret_dep.slt b/e2e_test/source_inline/kafka/secret_dep.slt new file mode 100644 index 000000000000..93426bdf5073 --- /dev/null +++ b/e2e_test/source_inline/kafka/secret_dep.slt @@ -0,0 +1,32 @@ +# Note: control substitution on will force us to use "\n" instead of "\n" in commands +control substitution on + +# for non-shared source +statement ok +set streaming_use_shared_source to false; + +system ok +rpk topic create test_secret_ref -p 3 + +statement ok +CREATE SECRET sec WITH (backend = 'meta') AS 'test_secret_ref'; + +statement ok +CREATE SOURCE s(x varchar) +WITH( + ${RISEDEV_KAFKA_WITH_OPTIONS_COMMON}, + topic = secret sec, + scan.startup.mode = 'earliest', +) FORMAT PLAIN ENCODE JSON; + +statement error +DROP SECRET sec; + +statement ok +DROP SOURCE s; + +statement ok +DROP SECRET sec; + +statement ok +set streaming_use_shared_source to true; diff --git a/src/meta/src/controller/catalog.rs b/src/meta/src/controller/catalog.rs index 1f7324d97493..227807b679d9 100644 --- a/src/meta/src/controller/catalog.rs +++ b/src/meta/src/controller/catalog.rs @@ -77,7 +77,10 @@ use crate::controller::utils::{ resolve_source_register_info_for_jobs, PartialObject, }; use crate::controller::ObjectModel; -use crate::manager::{MetaSrvEnv, NotificationVersion, IGNORED_NOTIFICATION_VERSION}; +use crate::manager::{ + get_referred_secret_ids_from_source, MetaSrvEnv, NotificationVersion, + IGNORED_NOTIFICATION_VERSION, +}; use crate::rpc::ddl_controller::DropMode; use crate::telemetry::MetaTelemetryJobDesc; use crate::{MetaError, MetaResult}; @@ -1232,6 +1235,9 @@ impl CatalogController { ) .await?; + // handle secret ref + let secret_ids = get_referred_secret_ids_from_source(&pb_source)?; + let source_obj = Self::create_object( &txn, ObjectType::Source, @@ -1245,6 +1251,19 @@ impl CatalogController { let source: source::ActiveModel = pb_source.clone().into(); Source::insert(source).exec(&txn).await?; + // add secret dependency + if !secret_ids.is_empty() { + ObjectDependency::insert_many(secret_ids.iter().map(|id| { + object_dependency::ActiveModel { + oid: Set(*id as _), + used_by: Set(source_id as _), + ..Default::default() + } + })) + .exec(&txn) + .await?; + } + txn.commit().await?; let version = self @@ -3506,6 +3525,8 @@ async fn update_internal_tables( #[cfg(test)] mod tests { + use risingwave_pb::catalog::StreamSourceInfo; + use super::*; const TEST_DATABASE_ID: DatabaseId = 1; @@ -3665,6 +3686,9 @@ mod tests { scan.startup.mode = 'earliest' ) FORMAT PLAIN ENCODE JSON"# .to_string(), + info: Some(StreamSourceInfo { + ..Default::default() + }), ..Default::default() }; mgr.create_source(pb_source).await?;