From 37d7b96dadee25e6f24f6ac69f379dceeb852da6 Mon Sep 17 00:00:00 2001 From: Bugen Zhao Date: Mon, 21 Oct 2024 18:04:55 +0800 Subject: [PATCH] tolerate vnode count assertion Signed-off-by: Bugen Zhao --- src/frontend/src/scheduler/plan_fragmenter.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/frontend/src/scheduler/plan_fragmenter.rs b/src/frontend/src/scheduler/plan_fragmenter.rs index 9bd1851383af6..b008da4efe4b0 100644 --- a/src/frontend/src/scheduler/plan_fragmenter.rs +++ b/src/frontend/src/scheduler/plan_fragmenter.rs @@ -1228,8 +1228,16 @@ fn derive_partitions( table_desc: &TableDesc, vnode_mapping: &WorkerSlotMapping, ) -> SchedulerResult> { + let vnode_mapping = if table_desc.vnode_count != vnode_mapping.len() { + // The vnode count mismatch occurs only in special cases where a hash-distributed fragment + // contains singleton internal tables. e.g., the state table of `Source` executors. + // In this case, we reduce the vnode mapping to a single vnode as only `SINGLETON_VNODE` is used. + assert_eq!(table_desc.vnode_count, 1); + &WorkerSlotMapping::new_single(vnode_mapping.iter().next().unwrap()) + } else { + vnode_mapping + }; let vnode_count = vnode_mapping.len(); - assert_eq!(vnode_count, table_desc.vnode_count); let mut partitions: HashMap)> = HashMap::new();