Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(meta): support drop creating materialized views for v2 backend #17503

Merged
merged 57 commits into from
Jul 16, 2024
Merged
Show file tree
Hide file tree
Changes from 48 commits
Commits
Show all changes
57 commits
Select commit Hold shift + click to select a range
6397ec9
update creating catalogs
kwannoel Jun 27, 2024
2b825e3
decouple finish create table and finish create mv
kwannoel Jun 27, 2024
0082554
refactor cancel
kwannoel Jun 27, 2024
fd6ff98
fix
kwannoel Jun 27, 2024
3714af9
handle cancel path in drop
kwannoel Jun 27, 2024
d3d4605
handle delete catalog to fe after cancel
kwannoel Jun 27, 2024
ba91e6c
filter out creating tables by default
kwannoel Jun 27, 2024
cbd78a6
refactor created table getters + only make creating tables visible in…
kwannoel Jun 27, 2024
b9789a9
report catalog in v2, left internal tables
kwannoel Jun 27, 2024
2dfb3e5
only commit internal tables
kwannoel Jun 27, 2024
044525f
fix v1
kwannoel Jun 28, 2024
7f51256
Revert "only commit internal tables"
kwannoel Jun 28, 2024
6a47d92
add fixme for clean up stream jobs + return all tables on frontend su…
kwannoel Jun 28, 2024
293f034
fix clean dirty tables for v1
kwannoel Jun 28, 2024
33d94ce
notify internal table catalog for v2
kwannoel Jun 28, 2024
99427d2
provide create job obj in finish_streaming_job
kwannoel Jun 28, 2024
884793c
Revert "report catalog in v2, left internal tables"
kwannoel Jun 28, 2024
339f089
Revert "notify internal table catalog for v2"
kwannoel Jun 28, 2024
c71f181
Revert "provide create job obj in finish_streaming_job"
kwannoel Jun 28, 2024
8db729c
fix test utils
kwannoel Jul 2, 2024
2753537
fix ut
kwannoel Jul 2, 2024
09b8f88
rename get_table to get_any_table
kwannoel Jul 3, 2024
ba911e4
fix log
kwannoel Jul 3, 2024
45ceed0
fix docs
kwannoel Jul 3, 2024
a188fb3
rm fixme
kwannoel Jul 4, 2024
5bd5fc4
rename
kwannoel Jul 4, 2024
bd4f20a
Revert "Revert "report catalog in v2, left internal tables""
kwannoel Jun 28, 2024
cdcadf2
Revert "Revert "notify internal table catalog for v2""
kwannoel Jun 28, 2024
bcba4aa
Revert "Revert "provide create job obj in finish_streaming_job""
kwannoel Jun 28, 2024
5bfcd49
refactor notification functionality
kwannoel Jul 1, 2024
d4445bc
report deleted dirty jobs catalog to fe
kwannoel Jul 1, 2024
d92e515
refactor logging out
kwannoel Jul 1, 2024
bef1421
notify on create
kwannoel Jul 1, 2024
b85f25f
notify delete for relation group
kwannoel Jul 1, 2024
c3c52ec
change add to update for finish jobs
kwannoel Jul 1, 2024
3a4ef73
start create ddl (internal tables)
kwannoel Jul 1, 2024
b27ef7c
fix create catalog txn deadlock
kwannoel Jul 1, 2024
80d7b12
handle fragment update
kwannoel Jul 1, 2024
b987025
fmt
kwannoel Jul 1, 2024
2ac2a9e
fix
kwannoel Jul 1, 2024
5fc93bb
fix
kwannoel Jul 1, 2024
ef45d8b
fix index test
kwannoel Jul 2, 2024
cd34f36
fmt
kwannoel Jul 2, 2024
2df055b
add e2e test
kwannoel Jul 2, 2024
c3906f6
delete only after querying objects to cancel
kwannoel Jul 2, 2024
8b69519
fix
kwannoel Jul 2, 2024
4d125d5
permit recovery
kwannoel Jul 2, 2024
db45d28
use label
kwannoel Jul 2, 2024
f88a5b4
Merge branch 'main' into kwannoel/mark-creating-v2
kwannoel Jul 4, 2024
cb1325c
fix
kwannoel Jul 4, 2024
60f4317
Merge branch 'main' into kwannoel/mark-creating-v2
kwannoel Jul 5, 2024
e2b75b3
allow adhoc recovery when disable_recovery set
kwannoel Jul 6, 2024
fcf50c2
revert refactor of oid to object
kwannoel Jul 8, 2024
ef85fde
make single node compat with risedev psql
kwannoel Jul 15, 2024
f62d7e6
fix path
kwannoel Jul 15, 2024
3eb1c6e
fix
kwannoel Jul 16, 2024
99186bf
Merge remote-tracking branch 'origin/main' into kwannoel/mark-creatin…
kwannoel Jul 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ci/scripts/run-e2e-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ cluster_stop
echo "--- e2e, $mode, batch"
RUST_LOG="info,risingwave_stream=info,risingwave_batch=info,risingwave_storage=info" \
cluster_start
sqllogictest -p 4566 -d dev './e2e_test/ddl/**/*.slt' --junit "batch-ddl-${profile}"
sqllogictest -p 4566 -d dev './e2e_test/ddl/**/*.slt' --junit "batch-ddl-${profile}" --label "can-use-recover"
if [[ "$mode" != "single-node" ]]; then
sqllogictest -p 4566 -d dev './e2e_test/background_ddl/basic.slt' --junit "batch-ddl-${profile}"
fi
Expand Down
76 changes: 76 additions & 0 deletions e2e_test/ddl/drop/drop_creating_mv.slt
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
statement ok
create table t(v1 int);

statement ok
insert into t select * from generate_series(1, 10000);

statement ok
flush;

statement ok
set streaming_rate_limit=1;

############## Test drop foreground mv
onlyif can-use-recover
system ok
psql -h localhost -p 4566 -d dev -U root -c 'create materialized view m1 as select * from t;' &
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't work under parallelism execution since the database name won't be dev anymore. 😕

Copy link
Contributor Author

@kwannoel kwannoel Jul 15, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Parallel execution won't run this test. Because trigger recovery in one test thread will affect the execution of another test thread.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can use ./risedev psql -c


onlyif can-use-recover
sleep 5s

onlyif can-use-recover
statement ok
drop materialized view m1;

############## Test drop background mv BEFORE recovery
statement ok
set background_ddl=true;

onlyif can-use-recover
statement ok
create materialized view m1 as select * from t;

onlyif can-use-recover
sleep 5s

onlyif can-use-recover
statement ok
drop materialized view m1;

############## Test drop background mv AFTER recovery
statement ok
set background_ddl=true;

onlyif can-use-recover
statement ok
create materialized view m1 as select * from t;

onlyif can-use-recover
sleep 5s

onlyif can-use-recover
statement ok
recover;

onlyif can-use-recover
sleep 10s

onlyif can-use-recover
statement ok
drop materialized view m1;

############## Make sure the mv can still be successfully created later.
statement ok
set streaming_rate_limit=default;

statement ok
set background_ddl=false;

statement ok
create materialized view m1 as select * from t;

statement ok
drop materialized view m1;

statement ok
drop table t;
19 changes: 15 additions & 4 deletions src/batch/src/worker_manager/worker_node_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use std::time::Duration;

use rand::seq::SliceRandom;
use risingwave_common::bail;
use risingwave_common::catalog::OBJECT_ID_PLACEHOLDER;
use risingwave_common::hash::{WorkerSlotId, WorkerSlotMapping};
use risingwave_common::vnode_mapping::vnode_placement::place_vnode;
use risingwave_pb::common::{WorkerNode, WorkerType};
Expand Down Expand Up @@ -220,10 +221,20 @@ impl WorkerNodeManager {

pub fn remove_streaming_fragment_mapping(&self, fragment_id: &FragmentId) {
let mut guard = self.inner.write().unwrap();
guard
.streaming_fragment_vnode_mapping
.remove(fragment_id)
.unwrap();

let res = guard.streaming_fragment_vnode_mapping.remove(fragment_id);
match &res {
Some(_) => {}
None if OBJECT_ID_PLACEHOLDER == *fragment_id => {
// Do nothing for placeholder fragment.
}
None => {
panic!(
"Streaming vnode mapping not found for fragment_id: {}",
fragment_id
)
}
};
}

/// Returns fragment's vnode mapping for serving.
Expand Down
2 changes: 1 addition & 1 deletion src/config/ci.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[meta]
disable_recovery = true
disable_recovery = false
kwannoel marked this conversation as resolved.
Show resolved Hide resolved
max_heartbeat_interval_secs = 60

[meta.compaction_config]
Expand Down
14 changes: 8 additions & 6 deletions src/frontend/src/binder/relation/table_or_source.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,9 @@ impl Binder {
table_name
);
}
} else if let Ok((table_catalog, schema_name)) =
self.catalog
.get_table_by_name(&self.db_name, schema_path, table_name)
} else if let Ok((table_catalog, schema_name)) = self
.catalog
.get_created_table_by_name(&self.db_name, schema_path, table_name)
{
self.resolve_table_relation(table_catalog.clone(), schema_name, as_of)?
} else if let Ok((source_catalog, _)) =
Expand Down Expand Up @@ -163,7 +163,9 @@ impl Binder {
if let Ok(schema) =
self.catalog.get_schema_by_name(&self.db_name, schema_name)
{
if let Some(table_catalog) = schema.get_table_by_name(table_name) {
if let Some(table_catalog) =
schema.get_created_table_by_name(table_name)
{
return self.resolve_table_relation(
table_catalog.clone(),
&schema_name.clone(),
Expand Down Expand Up @@ -314,7 +316,7 @@ impl Binder {
};
let (table_catalog, schema_name) =
self.catalog
.get_table_by_name(db_name, schema_path, table_name)?;
.get_created_table_by_name(db_name, schema_path, table_name)?;
let table_catalog = table_catalog.clone();

let table_id = table_catalog.id();
Expand Down Expand Up @@ -352,7 +354,7 @@ impl Binder {

let (table, _schema_name) =
self.catalog
.get_table_by_name(db_name, schema_path, table_name)?;
.get_created_table_by_name(db_name, schema_path, table_name)?;

match table.table_type() {
TableType::Table => {}
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/src/catalog/database_catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ impl DatabaseCatalog {
pub fn find_schema_containing_table_id(&self, table_id: &TableId) -> Option<&SchemaCatalog> {
self.schema_by_name
.values()
.find(|schema| schema.get_table_by_id(table_id).is_some())
.find(|schema| schema.get_created_table_by_id(table_id).is_some())
}

pub fn get_grant_object_by_oid(&self, oid: u32) -> Option<Object> {
Expand Down
36 changes: 27 additions & 9 deletions src/frontend/src/catalog/root_catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ impl Catalog {
.iter_schemas_mut()
.find(|schema| {
schema.id() != proto.schema_id
&& schema.get_table_by_id(&proto.id.into()).is_some()
&& schema.get_created_table_by_id(&proto.id.into()).is_some()
})
.unwrap()
.drop_table(proto.id.into());
Expand Down Expand Up @@ -587,7 +587,7 @@ impl Catalog {
}

pub fn get_table_name_by_id(&self, table_id: TableId) -> CatalogResult<String> {
self.get_table_by_id(&table_id)
self.get_any_table_by_id(&table_id)
.map(|table| table.name.clone())
}

Expand Down Expand Up @@ -636,7 +636,8 @@ impl Catalog {
}

/// Used to get `TableCatalog` for Materialized Views, Tables and Indexes.
pub fn get_table_by_name<'a>(
/// Retrieves all tables, created or creating.
pub fn get_any_table_by_name<'a>(
&self,
db_name: &str,
schema_path: SchemaPath<'a>,
Expand All @@ -651,21 +652,38 @@ impl Catalog {
.ok_or_else(|| CatalogError::NotFound("table", table_name.to_string()))
}

pub fn get_table_by_id(&self, table_id: &TableId) -> CatalogResult<&Arc<TableCatalog>> {
/// Used to get `TableCatalog` for Materialized Views, Tables and Indexes.
/// Retrieves only created tables.
pub fn get_created_table_by_name<'a>(
&self,
db_name: &str,
schema_path: SchemaPath<'a>,
table_name: &str,
) -> CatalogResult<(&Arc<TableCatalog>, &'a str)> {
schema_path
.try_find(|schema_name| {
Ok(self
.get_schema_by_name(db_name, schema_name)?
.get_created_table_by_name(table_name))
})?
.ok_or_else(|| CatalogError::NotFound("table", table_name.to_string()))
}

pub fn get_any_table_by_id(&self, table_id: &TableId) -> CatalogResult<&Arc<TableCatalog>> {
self.table_by_id
.get(table_id)
.ok_or_else(|| CatalogError::NotFound("table id", table_id.to_string()))
}

/// This function is similar to `get_table_by_id` expect that a table must be in a given database.
pub fn get_table_by_id_with_db(
pub fn get_created_table_by_id_with_db(
&self,
db_name: &str,
table_id: u32,
) -> CatalogResult<&Arc<TableCatalog>> {
let table_id = TableId::from(table_id);
for schema in self.get_database_by_name(db_name)?.iter_schemas() {
if let Some(table) = schema.get_table_by_id(&table_id) {
if let Some(table) = schema.get_created_table_by_id(&table_id) {
return Ok(table);
}
}
Expand Down Expand Up @@ -702,7 +720,7 @@ impl Catalog {

if found {
let mut table = self
.get_table_by_id(table_id)
.get_any_table_by_id(table_id)
.unwrap()
.to_prost(schema_id, database_id);
table.name = table_name.to_string();
Expand Down Expand Up @@ -939,7 +957,7 @@ impl Catalog {
) -> CatalogResult<()> {
let schema = self.get_schema_by_name(db_name, schema_name)?;

if let Some(table) = schema.get_table_by_name(relation_name) {
if let Some(table) = schema.get_created_table_by_name(relation_name) {
if table.is_index() {
Err(CatalogError::Duplicated("index", relation_name.to_string()))
} else if table.is_mview() {
Expand Down Expand Up @@ -1069,7 +1087,7 @@ impl Catalog {
#[allow(clippy::manual_map)]
if let Some(item) = schema.get_system_table_by_name(class_name) {
Ok(Some(item.id().into()))
} else if let Some(item) = schema.get_table_by_name(class_name) {
} else if let Some(item) = schema.get_created_table_by_name(class_name) {
Ok(Some(item.id().into()))
} else if let Some(item) = schema.get_index_by_name(class_name) {
Ok(Some(item.id.into()))
Expand Down
29 changes: 21 additions & 8 deletions src/frontend/src/catalog/schema_catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use std::collections::HashMap;
use std::sync::Arc;

use itertools::Itertools;
use risingwave_common::catalog::{valid_table_name, FunctionId, IndexId, TableId};
use risingwave_common::catalog::{valid_table_name, FunctionId, IndexId, StreamJobStatus, TableId};
use risingwave_common::types::DataType;
use risingwave_connector::sink::catalog::SinkCatalog;
pub use risingwave_expr::sig::*;
Expand Down Expand Up @@ -128,9 +128,11 @@ impl SchemaCatalog {
let name = prost.name.clone();
let id = prost.id.into();
let old_index = self.index_by_id.get(&id).unwrap();
let index_table = self.get_table_by_id(&prost.index_table_id.into()).unwrap();
let index_table = self
.get_created_table_by_id(&prost.index_table_id.into())
.unwrap();
let primary_table = self
.get_table_by_id(&prost.primary_table_id.into())
.get_created_table_by_id(&prost.primary_table_id.into())
.unwrap();
let index: IndexCatalog = IndexCatalog::build_from(prost, index_table, primary_table);
let index_ref = Arc::new(index);
Expand Down Expand Up @@ -166,10 +168,9 @@ impl SchemaCatalog {
pub fn create_index(&mut self, prost: &PbIndex) {
let name = prost.name.clone();
let id = prost.id.into();

let index_table = self.get_table_by_id(&prost.index_table_id.into()).unwrap();
let primary_table = self
.get_table_by_id(&prost.primary_table_id.into())
.get_created_table_by_id(&prost.primary_table_id.into())
.unwrap();
let index: IndexCatalog = IndexCatalog::build_from(prost, index_table, primary_table);
let index_ref = Arc::new(index);
Expand Down Expand Up @@ -556,10 +557,10 @@ impl SchemaCatalog {
}

/// Iterate all materialized views, excluding the indices.
pub fn iter_mv(&self) -> impl Iterator<Item = &Arc<TableCatalog>> {
pub fn iter_created_mvs(&self) -> impl Iterator<Item = &Arc<TableCatalog>> {
self.table_by_name
.iter()
.filter(|(_, v)| v.is_mview() && valid_table_name(&v.name))
.filter(|(_, v)| v.is_mview() && valid_table_name(&v.name) && v.is_created())
.map(|(_, v)| v)
}

Expand Down Expand Up @@ -605,10 +606,22 @@ impl SchemaCatalog {
self.table_by_name.get(table_name)
}

pub fn get_created_table_by_name(&self, table_name: &str) -> Option<&Arc<TableCatalog>> {
self.table_by_name
.get(table_name)
.filter(|&table| table.stream_job_status == StreamJobStatus::Created)
}

pub fn get_table_by_id(&self, table_id: &TableId) -> Option<&Arc<TableCatalog>> {
self.table_by_id.get(table_id)
}

pub fn get_created_table_by_id(&self, table_id: &TableId) -> Option<&Arc<TableCatalog>> {
self.table_by_id
.get(table_id)
.filter(|&table| table.stream_job_status == StreamJobStatus::Created)
}

pub fn get_view_by_name(&self, view_name: &str) -> Option<&Arc<ViewCatalog>> {
self.view_by_name.get(view_name)
}
Expand Down Expand Up @@ -765,7 +778,7 @@ impl SchemaCatalog {

pub fn get_grant_object_by_oid(&self, oid: u32) -> Option<Object> {
#[allow(clippy::manual_map)]
if self.get_table_by_id(&TableId::new(oid)).is_some()
if self.get_created_table_by_id(&TableId::new(oid)).is_some()
|| self.get_index_by_id(&IndexId::new(oid)).is_some()
{
Some(Object::TableId(oid))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ fn read_rw_materialized_views(reader: &SysCatalogReaderImpl) -> Result<Vec<RwMat

Ok(schemas
.flat_map(|schema| {
schema.iter_mv().map(|table| RwMaterializedView {
schema.iter_created_mvs().map(|table| RwMaterializedView {
id: table.id.table_id as i32,
name: table.name().into(),
schema_id: schema.id() as i32,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ async fn read_relation_info(reader: &SysCatalogReaderImpl) -> Result<Vec<RwRelat
let schema_catalog =
catalog_reader.get_schema_by_name(&reader.auth_context.database, schema)?;

schema_catalog.iter_mv().for_each(|t| {
schema_catalog.iter_created_mvs().for_each(|t| {
table_ids.push(t.id.table_id);
});

Expand Down Expand Up @@ -78,7 +78,7 @@ async fn read_relation_info(reader: &SysCatalogReaderImpl) -> Result<Vec<RwRelat
for schema in &schemas {
let schema_catalog =
catalog_reader.get_schema_by_name(&reader.auth_context.database, schema)?;
schema_catalog.iter_mv().for_each(|t| {
schema_catalog.iter_created_mvs().for_each(|t| {
if let Some(fragments) = table_fragments.get(&t.id.table_id) {
rows.push(RwRelationInfo {
schemaname: schema.clone(),
Expand Down
4 changes: 4 additions & 0 deletions src/frontend/src/catalog/table_catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -493,6 +493,10 @@ impl TableCatalog {
.collect(),
)
}

pub fn is_created(&self) -> bool {
self.stream_job_status == StreamJobStatus::Created
}
}

impl From<PbTable> for TableCatalog {
Expand Down
Loading
Loading