diff --git a/src/frontend/planner_test/tests/testdata/output/agg.yaml b/src/frontend/planner_test/tests/testdata/output/agg.yaml index ba5b6ba5e7908..003d196d85efa 100644 --- a/src/frontend/planner_test/tests/testdata/output/agg.yaml +++ b/src/frontend/planner_test/tests/testdata/output/agg.yaml @@ -54,7 +54,7 @@ └─StreamProject { exprs: [t.v1, (min(t.v2) + (max(t.v3) * count(t.v1))) as $expr1] } └─StreamHashAgg { group_key: [t.v1], aggs: [min(t.v2), max(t.v3), count(t.v1), count] } └─StreamExchange { dist: HashShard(t.v1) } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(v1 int, v2 int, v3 int); select min(v1) + max(v2) * count(v3) as agg from t; @@ -76,7 +76,7 @@ └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [min(t.v1), max(t.v2), count(t.v3), count] } └─StreamProject { exprs: [t.v1, t.v2, t.v3, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(v1 int, v2 int); select v1 from t group by v2; @@ -107,7 +107,7 @@ └─StreamHashAgg { group_key: [t.v3], aggs: [min(t.v1), sum($expr1), count($expr1), count] } └─StreamExchange { dist: HashShard(t.v3) } └─StreamProject { exprs: [t.v3, t.v1, (t.v1 + t.v2) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: test logical_agg with complex group expression sql: | create table t(v1 int, v2 int); @@ -278,7 +278,7 @@ └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [count($expr1), sum($expr1)] } └─StreamProject { exprs: [(t.v1 + t.v2) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(v1 int, v2 int, v3 int); select v1, sum(v2 + v3) / count(v2 + v3) + max(v1) as agg from t group by v1; @@ -295,7 +295,7 @@ └─StreamHashAgg { group_key: [t.v1], aggs: [sum($expr1), count($expr1), max(t.v1), count] } └─StreamExchange { dist: HashShard(t.v1) } └─StreamProject { exprs: [t.v1, (t.v2 + t.v3) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 real); select v1, count(*) from t group by v1; @@ -561,7 +561,7 @@ └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [min(t.v1), max(t.v3), count(t.v2), count] } └─StreamProject { exprs: [t.v1, t.v2, t.v3, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: dup group key sql: | create table t(v1 int) with (appendonly = false); @@ -579,7 +579,7 @@ └─StreamProject { exprs: [t.v1] } └─StreamHashAgg { group_key: [t.v1], aggs: [count] } └─StreamExchange { dist: HashShard(t.v1) } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: dup group key sql: | create table t(v1 int, v2 int, v3 int) with (appendonly = false); @@ -598,7 +598,7 @@ └─StreamProject { exprs: [t.v2, min(t.v1), t.v3, max(t.v1)] } └─StreamHashAgg { group_key: [t.v2, t.v3], aggs: [min(t.v1), max(t.v1), count] } └─StreamExchange { dist: HashShard(t.v2, t.v3) } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: order by agg input sql: | create table t(v1 int); @@ -617,7 +617,7 @@ └─StreamSimpleAgg { aggs: [sum(sum(t.v1)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum(t.v1)] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: order by other columns sql: | create table t(v1 int, v2 varchar); @@ -636,7 +636,7 @@ └─StreamSimpleAgg { aggs: [sum(sum(t.v1)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum(t.v1)] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: order by ASC/DESC and default sql: | create table t(v1 int, v2 varchar, v3 int); @@ -655,7 +655,7 @@ └─StreamSimpleAgg { aggs: [sum(sum(t.v1)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum(t.v1)] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: filter clause sql: | create table t(v1 int); @@ -674,7 +674,7 @@ └─StreamSimpleAgg { aggs: [sum(sum(t.v1) filter((t.v1 > 0:Int32))), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum(t.v1) filter((t.v1 > 0:Int32))] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: | filter clause extra calculation, should reuse result from project @@ -710,7 +710,7 @@ └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr2], aggs: [max($expr1) filter((t.a < t.b) AND ((t.a + t.b) < 100:Int32) AND ((t.a * t.b) <> ((t.a + t.b) - 1:Int32))), count] } └─StreamProject { exprs: [t.a, t.b, (t.a * t.b) as $expr1, t._row_id, Vnode(t._row_id) as $expr2] } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: avg filter clause + group by sql: | create table t(a int, b int); @@ -729,7 +729,7 @@ └─StreamProject { exprs: [(sum(t.a) filter((t.a > t.b))::Decimal / count(t.a) filter((t.a > t.b))::Decimal) as $expr1, t.b] } └─StreamHashAgg { group_key: [t.b], aggs: [sum(t.a) filter((t.a > t.b)), count(t.a) filter((t.a > t.b)), count] } └─StreamExchange { dist: HashShard(t.b) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: count filter clause sql: | create table t(a int, b int); @@ -748,7 +748,7 @@ └─StreamSimpleAgg { aggs: [sum0(count filter((t.a > t.b))), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [count filter((t.a > t.b))] } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: filter clause + non-boolean function sql: | create table t(a int, b int); @@ -802,7 +802,7 @@ └─StreamSimpleAgg { aggs: [sum(sum(t.v2) filter((t.v2 < 5:Int32))), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum(t.v2) filter((t.v2 < 5:Int32))] } - └─StreamTableScan { table: t, columns: [t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: distinct agg on empty args sql: | create table t(x int, y varchar); @@ -841,7 +841,7 @@ └─StreamProject { exprs: [t.a, count(distinct t.b), sum(t.c)] } └─StreamHashAgg { group_key: [t.a], aggs: [count(distinct t.b), sum(t.c), count] } └─StreamExchange { dist: HashShard(t.a) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: distinct agg and non-disintct agg with intersected argument sql: | create table t(a int, b int, c int); @@ -864,7 +864,7 @@ └─StreamProject { exprs: [t.a, count(distinct t.b), count(distinct t.c), sum(t.c)] } └─StreamHashAgg { group_key: [t.a], aggs: [count(distinct t.b), count(distinct t.c), sum(t.c), count] } └─StreamExchange { dist: HashShard(t.a) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: distinct & string_agg sql: | create table t(x int, y varchar); @@ -885,7 +885,7 @@ └─StreamSimpleAgg { aggs: [string_agg(t.y, ',':Varchar), count(distinct t.x), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [t.y, ',':Varchar, t.x, t._row_id] } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: distinct & order by on different calls sql: | create table t(x int, y varchar); @@ -906,7 +906,7 @@ └─StreamSimpleAgg { aggs: [string_agg(t.y, ',':Varchar order_by(t.y ASC)), count(distinct t.x), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [t.y, ',':Varchar, t.x, t._row_id] } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: distinct & order by on same calls sql: | create table t(x int, y varchar); @@ -927,7 +927,7 @@ └─StreamSimpleAgg { aggs: [string_agg(distinct t.y, ',':Varchar order_by(t.y ASC)), count(distinct t.x), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [t.y, ',':Varchar, t.x, t._row_id] } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: distinct & invalid order by sql: | create table t(x int, y varchar); @@ -967,7 +967,7 @@ └─StreamProject { exprs: [t.a, count(distinct t.b) filter((t.b < 100:Int32)), sum(t.c)] } └─StreamHashAgg { group_key: [t.a], aggs: [count(distinct t.b) filter((t.b < 100:Int32)), sum(t.c), count] } └─StreamExchange { dist: HashShard(t.a) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: non-distinct agg with filter sql: | create table t(a int, b int, c int); @@ -996,7 +996,7 @@ └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum($expr1) filter((t.b < 100:Int32) AND ((t.b * 2:Int32) > 10:Int32))] } └─StreamProject { exprs: [t.b, (Length(t.a) * t.b) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(v1 int, v2 int); with z(a, b) as (select count(distinct v1), count(v2) from t) select a from z; @@ -1018,7 +1018,7 @@ StreamMaterialize { columns: [cnt, i.x(hidden)], stream_key: [i.x], pk_columns: [i.x], pk_conflict: NoCheck } └─StreamProject { exprs: [count, i.x] } └─StreamHashAgg { group_key: [i.x], aggs: [count] } - └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: Backfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } + └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: ArrangementBackfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } - name: distinct aggregates only have one distinct argument doesn't need expand sql: | create table t(x int, y int); @@ -1303,7 +1303,7 @@ └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum($expr1), sum(t.v1), count(t.v1)] } └─StreamProject { exprs: [t.v1, (t.v1 * t.v1) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: stddev_samp with other columns sql: | select count(''), stddev_samp(1); @@ -1341,7 +1341,7 @@ └─StreamExchange { dist: HashShard(t.v1, t.v2, t.v3) } └─StreamHashAgg { group_key: [t.v1, t.v2, t.v3, $expr1], aggs: [min(t.v3), sum(t.v1), count] } └─StreamProject { exprs: [t.v1, t.v2, t.v3, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: enable two phase aggregation with simple agg should have two phase agg sql: | SET QUERY_MODE TO DISTRIBUTED; @@ -1360,7 +1360,7 @@ └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [min(t.v1), sum(t.v2), count] } └─StreamProject { exprs: [t.v1, t.v2, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: disable two phase aggregation with simple agg sql: | SET QUERY_MODE TO DISTRIBUTED; @@ -1376,7 +1376,7 @@ └─StreamProject { exprs: [min(t.v1), sum(t.v2)] } └─StreamSimpleAgg { aggs: [min(t.v1), sum(t.v2), count] } └─StreamExchange { dist: Single } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: force two phase agg with different distributions on inner and outer agg should have exchange sql: | set QUERY_MODE to DISTRIBUTED; @@ -1420,7 +1420,7 @@ └─StreamHashAgg { group_key: [internal_last_seen_value(lineitem.l_commitdate), $expr1], aggs: [max(internal_last_seen_value(lineitem.l_commitdate)), count] } └─StreamProject { exprs: [lineitem.l_orderkey, internal_last_seen_value(lineitem.l_commitdate), Vnode(lineitem.l_orderkey) as $expr1] } └─StreamHashAgg { group_key: [lineitem.l_orderkey], aggs: [internal_last_seen_value(lineitem.l_commitdate), count] } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_commitdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey], dist: UpstreamHashShard(lineitem.l_orderkey) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_commitdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey], dist: UpstreamHashShard(lineitem.l_orderkey) } - name: two phase agg on hop window input should use two phase agg sql: | SET QUERY_MODE TO DISTRIBUTED; @@ -1465,7 +1465,7 @@ └─StreamProject { exprs: [bid.auction, window_start, bid._row_id, Vnode(bid._row_id) as $expr1] } └─StreamHopWindow { time_col: bid.date_time, slide: 00:00:02, size: 00:00:10, output: [bid.auction, window_start, bid._row_id] } └─StreamFilter { predicate: IsNotNull(bid.date_time) } - └─StreamTableScan { table: bid, columns: [bid.date_time, bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.date_time, bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } - name: two phase agg with stream SomeShard (via index) but pk satisfies output dist should use shuffle agg sql: | SET QUERY_MODE TO DISTRIBUTED; @@ -1485,7 +1485,7 @@ └─StreamProject { exprs: [count, idx.id] } └─StreamHashAgg { group_key: [idx.id], aggs: [count] } └─StreamExchange { dist: HashShard(idx.id) } - └─StreamTableScan { table: idx, columns: [idx.id], stream_scan_type: Backfill, pk: [idx.id], dist: SomeShard } + └─StreamTableScan { table: idx, columns: [idx.id], stream_scan_type: ArrangementBackfill, pk: [idx.id], dist: SomeShard } - name: two phase agg with stream SomeShard (via index) but pk does not satisfy output dist should use two phase agg sql: | SET QUERY_MODE TO DISTRIBUTED; @@ -1507,7 +1507,7 @@ └─StreamExchange { dist: HashShard(idx.col1) } └─StreamHashAgg { group_key: [idx.col1, $expr1], aggs: [count] } └─StreamProject { exprs: [idx.col1, idx.id, Vnode(idx.id) as $expr1] } - └─StreamTableScan { table: idx, columns: [idx.col1, idx.id], stream_scan_type: Backfill, pk: [idx.id], dist: UpstreamHashShard(idx.id) } + └─StreamTableScan { table: idx, columns: [idx.col1, idx.id], stream_scan_type: ArrangementBackfill, pk: [idx.id], dist: UpstreamHashShard(idx.id) } - name: sort agg on an ascending index sql: | create table t (a int, b int); @@ -1665,7 +1665,7 @@ └─StreamProject { exprs: [first_value(t.x order_by(t.y ASC))] } └─StreamSimpleAgg { aggs: [first_value(t.x order_by(t.y ASC)), count] } └─StreamExchange { dist: Single } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [first_value], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -1676,7 +1676,7 @@ └── StreamExchange Single from 1 Fragment 1 - StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -1718,7 +1718,7 @@ └── StreamExchange Single from 1 Fragment 1 - StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 3 ] ├── Upstream └── BatchPlanNode @@ -1732,12 +1732,7 @@ ├── distribution key: [] └── read pk prefix len hint: 0 - Table 2 - ├── columns: [ t_x, count_for_agg_call_0 ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1 ] - ├── distribution key: [] - └── read pk prefix len hint: 1 + Table 2 { columns: [ t_x, count_for_agg_call_0 ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [], read pk prefix len hint: 1 } Table 3 ├── columns: [ vnode, _row_id, t_backfill_finished, t_row_count ] @@ -1761,7 +1756,7 @@ └─StreamProject { exprs: [last_value(t.x order_by(t.y DESC NULLS LAST))] } └─StreamSimpleAgg { aggs: [last_value(t.x order_by(t.y DESC NULLS LAST)), count] } └─StreamExchange { dist: Single } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | CREATE TABLE integers(i INTEGER); SELECT count(DISTINCT i) FROM integers; @@ -1784,7 +1779,7 @@ StreamMaterialize { columns: [count, t.id(hidden)], stream_key: [t.id], pk_columns: [t.id], pk_conflict: NoCheck } └─StreamProject { exprs: [count, t.id] } └─StreamHashAgg { group_key: [t.id], aggs: [count] } - └─StreamTableScan { table: t, columns: [t.id], stream_scan_type: Backfill, pk: [t.id], dist: UpstreamHashShard(t.id) } + └─StreamTableScan { table: t, columns: [t.id], stream_scan_type: ArrangementBackfill, pk: [t.id], dist: UpstreamHashShard(t.id) } - sql: | CREATE TABLE t (a int, b int); SELECT a, sum((sum(b))) OVER (PARTITION BY a ORDER BY a) FROM t GROUP BY a; @@ -1803,7 +1798,7 @@ └─StreamProject { exprs: [t.a, sum(t.b)] } └─StreamHashAgg { group_key: [t.a], aggs: [sum(t.b), count] } └─StreamExchange { dist: HashShard(t.a) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | CREATE TABLE t (a int, b int); SELECT a, row_number() OVER (PARTITION BY a ORDER BY a DESC) FROM t GROUP BY a; @@ -1820,7 +1815,7 @@ └─StreamProject { exprs: [t.a] } └─StreamHashAgg { group_key: [t.a], aggs: [count] } └─StreamExchange { dist: HashShard(t.a) } - └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | CREATE TABLE t (a int, b int, c int); SELECT a, b, sum(sum(c)) OVER (PARTITION BY a ORDER BY b) @@ -1843,7 +1838,7 @@ └─StreamProject { exprs: [t.a, t.b, sum(t.c)] } └─StreamHashAgg { group_key: [t.a, t.b], aggs: [sum(t.c), count] } └─StreamExchange { dist: HashShard(t.a, t.b) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | CREATE TABLE t (a int, b int, c int, d int, e int); SELECT a, b, sum(sum(c)) OVER (PARTITION BY a, avg(d) ORDER BY max(e), b) @@ -1867,4 +1862,4 @@ └─StreamProject { exprs: [t.a, t.b, sum(t.c), max(t.e), (sum(t.d)::Decimal / count(t.d)::Decimal) as $expr1] } └─StreamHashAgg { group_key: [t.a, t.b], aggs: [sum(t.c), sum(t.d), count(t.d), max(t.e), count] } └─StreamExchange { dist: HashShard(t.a, t.b) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t.d, t.e, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t.c, t.d, t.e, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/append_only.yaml b/src/frontend/planner_test/tests/testdata/output/append_only.yaml index f6f9da08e24a6..9860ff9e62913 100644 --- a/src/frontend/planner_test/tests/testdata/output/append_only.yaml +++ b/src/frontend/planner_test/tests/testdata/output/append_only.yaml @@ -7,7 +7,7 @@ └─StreamProject { exprs: [t1.v1, max(t1.v2)] } └─StreamHashAgg [append_only] { group_key: [t1.v1], aggs: [max(t1.v2), count] } └─StreamExchange { dist: HashShard(t1.v1) } - └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (v1 int, v2 int) append only; create table t2 (v1 int, v3 int) append only; @@ -17,9 +17,9 @@ └─StreamExchange { dist: HashShard(t1.v1, t1._row_id, t2._row_id) } └─StreamHashJoin [append_only] { type: Inner, predicate: t1.v1 = t2.v1, output: [t1.v1, t1.v2, t2.v3, t1._row_id, t2._row_id] } ├─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.v1) } - └─StreamTableScan { table: t2, columns: [t2.v1, t2.v3, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v1, t2.v3, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - sql: | create table t1 (v1 int, v2 int) append only; select v1 from t1 order by v1 limit 3 offset 3; @@ -27,7 +27,7 @@ StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [v1, t1._row_id], pk_conflict: NoCheck } └─StreamTopN [append_only] { order: [t1.v1 ASC], limit: 3, offset: 3 } └─StreamExchange { dist: Single } - └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (v1 int, v2 int) append only; select max(v1) as max_v1 from t1; @@ -37,4 +37,4 @@ └─StreamSimpleAgg [append_only] { aggs: [max(max(t1.v1)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [max(t1.v1)] } - └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/basic_query.yaml b/src/frontend/planner_test/tests/testdata/output/basic_query.yaml index 9de236a77664f..327087a4dea2c 100644 --- a/src/frontend/planner_test/tests/testdata/output/basic_query.yaml +++ b/src/frontend/planner_test/tests/testdata/output/basic_query.yaml @@ -18,7 +18,7 @@ └─BatchScan { table: t, columns: [t.v1, t.v2], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [v1, v2, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 bigint, v2 double precision); select t2.* from t; @@ -32,7 +32,7 @@ stream_plan: |- StreamMaterialize { columns: [t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamFilter { predicate: true } - └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 int); select * from t where v1<1; @@ -43,7 +43,7 @@ stream_plan: |- StreamMaterialize { columns: [v1, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamFilter { predicate: (t.v1 < 1:Int32) } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: test boolean expression common factor extraction sql: | create table t (v1 Boolean, v2 Boolean, v3 Boolean); @@ -100,7 +100,7 @@ └─BatchScan { table: t, columns: [t.v1], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [v1, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: select 1 batch_plan: 'BatchValues { rows: [[1:Int32]] }' - sql: | @@ -206,14 +206,14 @@ select a, b from mv; stream_plan: |- StreamMaterialize { columns: [a, b, mv.t._row_id(hidden)], stream_key: [mv.t._row_id], pk_columns: [mv.t._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: mv, columns: [mv.a, mv.b, mv.t._row_id], stream_scan_type: Backfill, pk: [mv.t._row_id], dist: UpstreamHashShard(mv.t._row_id) } + └─StreamTableScan { table: mv, columns: [mv.a, mv.b, mv.t._row_id], stream_scan_type: ArrangementBackfill, pk: [mv.t._row_id], dist: UpstreamHashShard(mv.t._row_id) } - sql: | create table t (v1 int, v2 int); create materialized view mv(a,b) as select v1+1,v2+1 from t; select * from mv; stream_plan: |- StreamMaterialize { columns: [a, b, mv.t._row_id(hidden)], stream_key: [mv.t._row_id], pk_columns: [mv.t._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: mv, columns: [mv.a, mv.b, mv.t._row_id], stream_scan_type: Backfill, pk: [mv.t._row_id], dist: UpstreamHashShard(mv.t._row_id) } + └─StreamTableScan { table: mv, columns: [mv.a, mv.b, mv.t._row_id], stream_scan_type: ArrangementBackfill, pk: [mv.t._row_id], dist: UpstreamHashShard(mv.t._row_id) } - sql: | create table t (id int primary key, col int); create index idx on t(col); @@ -221,7 +221,7 @@ stream_plan: |- StreamMaterialize { columns: [id], stream_key: [id], pk_columns: [id], pk_conflict: NoCheck } └─StreamExchange { dist: HashShard(idx.id) } - └─StreamTableScan { table: idx, columns: [idx.id], stream_scan_type: Backfill, pk: [idx.id], dist: SomeShard } + └─StreamTableScan { table: idx, columns: [idx.id], stream_scan_type: ArrangementBackfill, pk: [idx.id], dist: SomeShard } - sql: | select * from generate_series(1, 10000000, 1) where Now() is null; batch_plan: 'BatchValues { rows: [] }' @@ -240,7 +240,7 @@ └─StreamExchange { dist: HashShard(t.v, t._row_id, t._row_id) } └─StreamHashJoin { type: Inner, predicate: t.v = t.v, output: [t.v, t._row_id, t._row_id] } ├─StreamExchange { dist: HashShard(t.v) } - │ └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t.v) } └─StreamFilter { predicate: false:Boolean } - └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/bushy_join.yaml b/src/frontend/planner_test/tests/testdata/output/bushy_join.yaml index 47ff0c01621a8..6bd212dce06b8 100644 --- a/src/frontend/planner_test/tests/testdata/output/bushy_join.yaml +++ b/src/frontend/planner_test/tests/testdata/output/bushy_join.yaml @@ -19,30 +19,30 @@ │ │ └─StreamHashJoin { type: Inner, predicate: t.id = t.id, output: [t.id, t.id, t._row_id, t._row_id] } │ │ ├─StreamExchange { dist: HashShard(t.id) } │ │ │ └─StreamFilter { predicate: (t.id = t.id) } - │ │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ │ └─StreamExchange { dist: HashShard(t.id) } │ │ └─StreamFilter { predicate: (t.id = t.id) } - │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ └─StreamHashJoin { type: Inner, predicate: t.id = t.id, output: [t.id, t.id, t._row_id, t._row_id] } │ ├─StreamExchange { dist: HashShard(t.id) } │ │ └─StreamFilter { predicate: (t.id = t.id) } - │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ └─StreamExchange { dist: HashShard(t.id) } │ └─StreamFilter { predicate: (t.id = t.id) } - │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamHashJoin { type: Inner, predicate: t.id = t.id AND t.id = t.id AND t.id = t.id AND t.id = t.id, output: [t.id, t.id, t.id, t.id, t._row_id, t._row_id, t._row_id, t._row_id] } ├─StreamExchange { dist: HashShard(t.id) } │ └─StreamHashJoin { type: Inner, predicate: t.id = t.id, output: [t.id, t.id, t._row_id, t._row_id] } │ ├─StreamExchange { dist: HashShard(t.id) } │ │ └─StreamFilter { predicate: (t.id = t.id) } - │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ └─StreamExchange { dist: HashShard(t.id) } │ └─StreamFilter { predicate: (t.id = t.id) } - │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamHashJoin { type: Inner, predicate: t.id = t.id, output: [t.id, t.id, t._row_id, t._row_id] } ├─StreamExchange { dist: HashShard(t.id) } │ └─StreamFilter { predicate: (t.id = t.id) } - │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t.id) } └─StreamFilter { predicate: (t.id = t.id) } - └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.id, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/ch_benchmark.yaml b/src/frontend/planner_test/tests/testdata/output/ch_benchmark.yaml index 61a26891cd660..14f7aa081817a 100644 --- a/src/frontend/planner_test/tests/testdata/output/ch_benchmark.yaml +++ b/src/frontend/planner_test/tests/testdata/output/ch_benchmark.yaml @@ -31,7 +31,7 @@ └─StreamExchange { dist: HashShard(order_line.ol_number) } └─StreamProject { exprs: [order_line.ol_number, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id] } └─StreamFilter { predicate: (order_line.ol_delivery_d > '2007-01-02 00:00:00':Timestamp) } - └─StreamTableScan { table: order_line, columns: [order_line.ol_number, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_delivery_d], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └─StreamTableScan { table: order_line, columns: [order_line.ol_number, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_delivery_d], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [ol_number, sum_qty, sum_amount, avg_qty, avg_amount, count_order], stream_key: [ol_number], pk_columns: [ol_number], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -42,7 +42,7 @@ Fragment 1 StreamProject { exprs: [order_line.ol_number, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id] } └── StreamFilter { predicate: (order_line.ol_delivery_d > '2007-01-02 00:00:00':Timestamp) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_number, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_delivery_d], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_number, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_delivery_d], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -148,36 +148,36 @@ │ │ │ ├─StreamExchange { dist: HashShard(region.r_regionkey) } │ │ │ │ └─StreamProject { exprs: [region.r_regionkey] } │ │ │ │ └─StreamFilter { predicate: Like(region.r_name, 'EUROP%':Varchar) } - │ │ │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } + │ │ │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } │ │ │ └─StreamExchange { dist: HashShard(nation.n_regionkey) } │ │ │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [supplier.s_suppkey, nation.n_regionkey, supplier.s_nationkey, nation.n_nationkey] } │ │ │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ │ │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } - │ │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ │ └─StreamExchange { dist: HashShard($expr1) } │ │ └─StreamProject { exprs: [stock.s_i_id, stock.s_quantity, ((stock.s_w_id * stock.s_i_id) % 10000:Int32)::Int64 as $expr1, stock.s_w_id] } │ │ └─StreamFilter { predicate: (stock.s_i_id = stock.s_i_id) } - │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } │ └─StreamHashJoin { type: Inner, predicate: item.i_id = stock.s_i_id, output: all } │ ├─StreamExchange { dist: HashShard(item.i_id) } │ │ └─StreamProject { exprs: [item.i_id, item.i_name] } │ │ └─StreamFilter { predicate: Like(item.i_data, '%b':Varchar) } - │ │ └─StreamTableScan { table: item, columns: [item.i_id, item.i_name, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + │ │ └─StreamTableScan { table: item, columns: [item.i_id, item.i_name, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } │ └─StreamExchange { dist: HashShard(stock.s_i_id) } - │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } └─StreamExchange { dist: HashShard(supplier.s_suppkey) } └─StreamHashJoin { type: Inner, predicate: region.r_regionkey = nation.n_regionkey, output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, supplier.s_comment, nation.n_name, region.r_regionkey, supplier.s_nationkey] } ├─StreamExchange { dist: HashShard(region.r_regionkey) } │ └─StreamProject { exprs: [region.r_regionkey] } │ └─StreamFilter { predicate: Like(region.r_name, 'EUROP%':Varchar) } - │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } + │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } └─StreamExchange { dist: HashShard(nation.n_regionkey) } └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, supplier.s_comment, nation.n_name, nation.n_regionkey, supplier.s_nationkey, nation.n_nationkey] } ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_comment], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_comment], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } └─StreamExchange { dist: HashShard(nation.n_nationkey) } - └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_suppkey, s_name, n_name, i_id, i_name, s_address, s_phone, s_comment, stock.s_i_id(hidden), stock.s_w_id(hidden), min(stock.s_quantity)(hidden), $expr2(hidden), region.r_regionkey(hidden), supplier.s_nationkey(hidden)], stream_key: [stock.s_i_id, stock.s_w_id, min(stock.s_quantity), region.r_regionkey, supplier.s_nationkey, $expr2], pk_columns: [n_name, s_name, i_id, stock.s_i_id, stock.s_w_id, min(stock.s_quantity), region.r_regionkey, supplier.s_nationkey, $expr2], pk_conflict: NoCheck } @@ -212,7 +212,7 @@ Fragment 5 StreamProject { exprs: [region.r_regionkey] } └── StreamFilter { predicate: Like(region.r_name, 'EUROP%':Varchar) } - └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 18 ] } + └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 18 ] } ├── Upstream └── BatchPlanNode @@ -222,31 +222,31 @@ └── StreamExchange Hash([0]) from 8 Fragment 7 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 23 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 23 ] } ├── Upstream └── BatchPlanNode Fragment 8 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 24 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 24 ] } ├── Upstream └── BatchPlanNode Fragment 9 StreamProject { exprs: [stock.s_i_id, stock.s_quantity, ((stock.s_w_id * stock.s_i_id) % 10000:Int32)::Int64 as $expr1, stock.s_w_id] } └── StreamFilter { predicate: (stock.s_i_id = stock.s_i_id) } - └── StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 25 ] } + └── StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 25 ] } ├── Upstream └── BatchPlanNode Fragment 10 StreamProject { exprs: [item.i_id, item.i_name] } └── StreamFilter { predicate: Like(item.i_data, '%b':Varchar) } - └── StreamTableScan { table: item, columns: [item.i_id, item.i_name, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 30 ] } + └── StreamTableScan { table: item, columns: [item.i_id, item.i_name, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 30 ] } ├── Upstream └── BatchPlanNode Fragment 11 - StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 31 ] } + StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 31 ] } ├── Upstream └── BatchPlanNode @@ -258,7 +258,7 @@ Fragment 13 StreamProject { exprs: [region.r_regionkey] } └── StreamFilter { predicate: Like(region.r_name, 'EUROP%':Varchar) } - └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 36 ] } + └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 36 ] } ├── Upstream └── BatchPlanNode @@ -268,12 +268,12 @@ └── StreamExchange Hash([0]) from 16 Fragment 15 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_comment], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 41 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_comment], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 41 ] } ├── Upstream └── BatchPlanNode Fragment 16 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 42 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 42 ] } ├── Upstream └── BatchPlanNode @@ -422,17 +422,17 @@ │ ├─StreamExchange { dist: HashShard(customer.c_d_id, customer.c_w_id) } │ │ └─StreamProject { exprs: [customer.c_id, customer.c_d_id, customer.c_w_id] } │ │ └─StreamFilter { predicate: (customer.c_w_id = customer.c_w_id) AND (customer.c_d_id = customer.c_d_id) AND Like(customer.c_state, 'a%':Varchar) } - │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } │ └─StreamExchange { dist: HashShard(new_order.no_d_id, new_order.no_w_id) } │ └─StreamFilter { predicate: (new_order.no_w_id = new_order.no_w_id) AND (new_order.no_d_id = new_order.no_d_id) AND (new_order.no_o_id = new_order.no_o_id) } - │ └─StreamTableScan { table: new_order, columns: [new_order.no_o_id, new_order.no_d_id, new_order.no_w_id], stream_scan_type: Backfill, pk: [new_order.no_w_id, new_order.no_d_id, new_order.no_o_id], dist: UpstreamHashShard(new_order.no_w_id, new_order.no_d_id, new_order.no_o_id) } + │ └─StreamTableScan { table: new_order, columns: [new_order.no_o_id, new_order.no_d_id, new_order.no_w_id], stream_scan_type: ArrangementBackfill, pk: [new_order.no_w_id, new_order.no_d_id, new_order.no_o_id], dist: UpstreamHashShard(new_order.no_w_id, new_order.no_d_id, new_order.no_o_id) } └─StreamHashJoin { type: Inner, predicate: orders.o_w_id = order_line.ol_w_id AND orders.o_d_id = order_line.ol_d_id AND orders.o_id = order_line.ol_o_id, output: all } ├─StreamExchange { dist: HashShard(orders.o_id, orders.o_d_id, orders.o_w_id) } │ └─StreamFilter { predicate: (orders.o_d_id = orders.o_d_id) AND (orders.o_w_id = orders.o_w_id) AND (orders.o_entry_d > '2007-01-02 00:00:00':Timestamp) } - │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } └─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id) } └─StreamFilter { predicate: (order_line.ol_d_id = order_line.ol_d_id) AND (order_line.ol_w_id = order_line.ol_w_id) } - └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [ol_o_id, ol_w_id, ol_d_id, revenue, o_entry_d], stream_key: [o_entry_d, ol_o_id, ol_d_id, ol_w_id], pk_columns: [revenue, o_entry_d, ol_o_id, ol_d_id, ol_w_id], pk_conflict: NoCheck } @@ -457,25 +457,25 @@ Fragment 3 StreamProject { exprs: [customer.c_id, customer.c_d_id, customer.c_w_id] } └── StreamFilter { predicate: (customer.c_w_id = customer.c_w_id) AND (customer.c_d_id = customer.c_d_id) AND Like(customer.c_state, 'a%':Varchar) } - └── StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 9 ] } + └── StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 9 ] } ├── Upstream └── BatchPlanNode Fragment 4 StreamFilter { predicate: (new_order.no_w_id = new_order.no_w_id) AND (new_order.no_d_id = new_order.no_d_id) AND (new_order.no_o_id = new_order.no_o_id) } - └── StreamTableScan { table: new_order, columns: [new_order.no_o_id, new_order.no_d_id, new_order.no_w_id], stream_scan_type: Backfill, pk: [new_order.no_w_id, new_order.no_d_id, new_order.no_o_id], dist: UpstreamHashShard(new_order.no_w_id, new_order.no_d_id, new_order.no_o_id) } { tables: [ StreamScan: 10 ] } + └── StreamTableScan { table: new_order, columns: [new_order.no_o_id, new_order.no_d_id, new_order.no_w_id], stream_scan_type: ArrangementBackfill, pk: [new_order.no_w_id, new_order.no_d_id, new_order.no_o_id], dist: UpstreamHashShard(new_order.no_w_id, new_order.no_d_id, new_order.no_o_id) } { tables: [ StreamScan: 10 ] } ├── Upstream └── BatchPlanNode Fragment 5 StreamFilter { predicate: (orders.o_d_id = orders.o_d_id) AND (orders.o_w_id = orders.o_w_id) AND (orders.o_entry_d > '2007-01-02 00:00:00':Timestamp) } - └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 15 ] } + └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 15 ] } ├── Upstream └── BatchPlanNode Fragment 6 StreamFilter { predicate: (order_line.ol_d_id = order_line.ol_d_id) AND (order_line.ol_w_id = order_line.ol_w_id) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } { tables: [ StreamScan: 16 ] } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } { tables: [ StreamScan: 16 ] } ├── Upstream └── BatchPlanNode @@ -554,10 +554,10 @@ └─StreamHashJoin { type: LeftSemi, predicate: orders.o_id = order_line.ol_o_id AND orders.o_w_id = order_line.ol_w_id AND orders.o_d_id = order_line.ol_d_id AND (order_line.ol_delivery_d >= orders.o_entry_d), output: [orders.o_ol_cnt, orders.o_w_id, orders.o_d_id, orders.o_id] } ├─StreamExchange { dist: HashShard(orders.o_id, orders.o_w_id, orders.o_d_id) } │ └─StreamFilter { predicate: (orders.o_entry_d >= '2007-01-02 00:00:00':Timestamp) AND (orders.o_entry_d < '2032-01-02 00:00:00':Timestamp) } - │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_ol_cnt], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_ol_cnt], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } └─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_w_id, order_line.ol_d_id) } └─StreamProject { exprs: [order_line.ol_o_id, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_delivery_d, order_line.ol_number] } - └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [o_ol_cnt, order_count], stream_key: [o_ol_cnt], pk_columns: [o_ol_cnt], pk_conflict: NoCheck } @@ -573,14 +573,14 @@ Fragment 2 StreamFilter { predicate: (orders.o_entry_d >= '2007-01-02 00:00:00':Timestamp) AND (orders.o_entry_d < '2032-01-02 00:00:00':Timestamp) } - └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_ol_cnt], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_ol_cnt], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 StreamProject { exprs: [order_line.ol_o_id, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_delivery_d, order_line.ol_number] } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -675,30 +675,30 @@ │ ├─StreamExchange { dist: HashShard(orders.o_id, customer.c_d_id, customer.c_w_id, orders.o_w_id) } │ │ └─StreamHashJoin { type: Inner, predicate: customer.c_id = orders.o_c_id AND customer.c_w_id = orders.o_w_id AND customer.c_d_id = orders.o_d_id, output: [customer.c_d_id, customer.c_w_id, customer.c_state, orders.o_id, orders.o_d_id, orders.o_w_id, customer.c_id] } │ │ ├─StreamExchange { dist: HashShard(customer.c_d_id, customer.c_w_id, customer.c_id) } - │ │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + │ │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } │ │ └─StreamExchange { dist: HashShard(orders.o_d_id, orders.o_w_id, orders.o_c_id) } │ │ └─StreamProject { exprs: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id] } │ │ └─StreamFilter { predicate: (orders.o_entry_d >= '2007-01-02 00:00:00':Timestamp) } - │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } │ └─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, stock.s_w_id) } │ └─StreamHashJoin { type: Inner, predicate: order_line.ol_w_id = stock.s_w_id AND order_line.ol_i_id = stock.s_i_id, output: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, stock.s_i_id, stock.s_w_id, order_line.ol_number, order_line.ol_i_id] } │ ├─StreamExchange { dist: HashShard(order_line.ol_w_id, order_line.ol_i_id) } │ │ └─StreamFilter { predicate: (order_line.ol_d_id = order_line.ol_d_id) } - │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } │ └─StreamExchange { dist: HashShard(stock.s_w_id, stock.s_i_id) } - │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } └─StreamExchange { dist: HashShard(supplier.s_suppkey, supplier.s_nationkey) } └─StreamHashJoin { type: Inner, predicate: region.r_regionkey = nation.n_regionkey, output: [supplier.s_suppkey, supplier.s_nationkey, nation.n_name, region.r_regionkey] } ├─StreamExchange { dist: HashShard(region.r_regionkey) } │ └─StreamProject { exprs: [region.r_regionkey] } │ └─StreamFilter { predicate: (region.r_name = 'EUROPE':Varchar) } - │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } + │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } └─StreamExchange { dist: HashShard(nation.n_regionkey) } └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [supplier.s_suppkey, supplier.s_nationkey, nation.n_name, nation.n_regionkey, nation.n_nationkey] } ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } └─StreamExchange { dist: HashShard(nation.n_nationkey) } - └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [n_name, revenue], stream_key: [n_name], pk_columns: [n_name], pk_conflict: NoCheck } @@ -727,14 +727,14 @@ └── StreamExchange Hash([1, 2, 3]) from 5 Fragment 4 - StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 13 ] } + StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 13 ] } ├── Upstream └── BatchPlanNode Fragment 5 StreamProject { exprs: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id] } └── StreamFilter { predicate: (orders.o_entry_d >= '2007-01-02 00:00:00':Timestamp) } - └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 14 ] } + └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 14 ] } ├── Upstream └── BatchPlanNode @@ -745,12 +745,12 @@ Fragment 7 StreamFilter { predicate: (order_line.ol_d_id = order_line.ol_d_id) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } { tables: [ StreamScan: 19 ] } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } { tables: [ StreamScan: 19 ] } ├── Upstream └── BatchPlanNode Fragment 8 - StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 20 ] } + StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 20 ] } ├── Upstream └── BatchPlanNode @@ -762,7 +762,7 @@ Fragment 10 StreamProject { exprs: [region.r_regionkey] } └── StreamFilter { predicate: (region.r_name = 'EUROPE':Varchar) } - └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 25 ] } + └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 25 ] } ├── Upstream └── BatchPlanNode @@ -772,12 +772,12 @@ └── StreamExchange Hash([0]) from 13 Fragment 12 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 30 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 30 ] } ├── Upstream └── BatchPlanNode Fragment 13 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 31 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 31 ] } ├── Upstream └── BatchPlanNode @@ -875,7 +875,7 @@ └─StreamStatelessSimpleAgg { aggs: [sum(order_line.ol_amount)] } └─StreamProject { exprs: [order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } └─StreamFilter { predicate: (order_line.ol_delivery_d >= '1997-01-01 00:00:00':Timestamp) AND (order_line.ol_delivery_d < '2030-01-01 00:00:00':Timestamp) AND (order_line.ol_quantity >= 1:Int32) AND (order_line.ol_quantity <= 100000:Int32) } - └─StreamTableScan { table: order_line, columns: [order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d, order_line.ol_quantity], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └─StreamTableScan { table: order_line, columns: [order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d, order_line.ol_quantity], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [revenue], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -889,7 +889,7 @@ StreamStatelessSimpleAgg { aggs: [sum(order_line.ol_amount)] } └── StreamProject { exprs: [order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } └── StreamFilter { predicate: (order_line.ol_delivery_d >= '1997-01-01 00:00:00':Timestamp) AND (order_line.ol_delivery_d < '2030-01-01 00:00:00':Timestamp) AND (order_line.ol_quantity >= 1:Int32) AND (order_line.ol_quantity <= 100000:Int32) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d, order_line.ol_quantity], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d, order_line.ol_quantity], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -986,25 +986,25 @@ │ │ ├─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_w_id) } │ │ │ └─StreamHashJoin { type: Inner, predicate: stock.s_w_id = order_line.ol_supply_w_id AND stock.s_i_id = order_line.ol_i_id, output: [stock.s_i_id, stock.s_w_id, order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number] } │ │ │ ├─StreamExchange { dist: HashShard(stock.s_i_id, stock.s_w_id) } - │ │ │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ │ │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } │ │ │ └─StreamExchange { dist: HashShard(order_line.ol_i_id, order_line.ol_supply_w_id) } │ │ │ └─StreamProject { exprs: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number] } │ │ │ └─StreamFilter { predicate: (order_line.ol_w_id = order_line.ol_w_id) AND (order_line.ol_d_id = order_line.ol_d_id) AND (order_line.ol_delivery_d >= '2007-01-02 00:00:00':Timestamp) AND (order_line.ol_delivery_d <= '2032-01-02 00:00:00':Timestamp) } - │ │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } │ │ └─StreamExchange { dist: HashShard(orders.o_id, orders.o_d_id, orders.o_w_id, customer.c_d_id, customer.c_w_id) } │ │ └─StreamHashJoin { type: Inner, predicate: orders.o_c_id = customer.c_id AND orders.o_w_id = customer.c_w_id AND orders.o_d_id = customer.c_d_id, output: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, customer.c_d_id, customer.c_w_id, customer.c_state, orders.o_c_id, customer.c_id] } │ │ ├─StreamExchange { dist: HashShard(orders.o_d_id, orders.o_w_id, orders.o_c_id) } - │ │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } │ │ └─StreamExchange { dist: HashShard(customer.c_d_id, customer.c_w_id, customer.c_id) } - │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(supplier.s_suppkey) } └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [supplier.s_suppkey, supplier.s_nationkey, nation.n_name, nation.n_nationkey] } ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } └─StreamExchange { dist: HashShard(nation.n_nationkey) } - └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [supp_nation, cust_nation, l_year, revenue, supplier.s_nationkey(hidden)], stream_key: [supplier.s_nationkey, cust_nation, l_year], pk_columns: [supplier.s_nationkey, cust_nation, l_year], pk_conflict: NoCheck } @@ -1040,14 +1040,14 @@ └── StreamExchange Hash([3, 4]) from 6 Fragment 5 - StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 17 ] } + StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 17 ] } ├── Upstream └── BatchPlanNode Fragment 6 StreamProject { exprs: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number] } └── StreamFilter { predicate: (order_line.ol_w_id = order_line.ol_w_id) AND (order_line.ol_d_id = order_line.ol_d_id) AND (order_line.ol_delivery_d >= '2007-01-02 00:00:00':Timestamp) AND (order_line.ol_delivery_d <= '2032-01-02 00:00:00':Timestamp) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 18 ] ├── Upstream └── BatchPlanNode @@ -1058,17 +1058,17 @@ └── StreamExchange Hash([1, 2, 0]) from 9 Fragment 8 - StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 23 ] } + StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 23 ] } ├── Upstream └── BatchPlanNode Fragment 9 - StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 24 ] } + StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 24 ] } ├── Upstream └── BatchPlanNode Fragment 10 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 25 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 25 ] } ├── Upstream └── BatchPlanNode @@ -1078,12 +1078,12 @@ └── StreamExchange Hash([0]) from 13 Fragment 12 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 30 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 30 ] } ├── Upstream └── BatchPlanNode Fragment 13 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 31 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 31 ] } ├── Upstream └── BatchPlanNode @@ -1251,36 +1251,36 @@ │ │ │ └─StreamHashJoin { type: Inner, predicate: orders.o_c_id = customer.c_id AND orders.o_w_id = customer.c_w_id AND orders.o_d_id = customer.c_d_id, output: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, customer.c_d_id, customer.c_w_id, customer.c_state, orders.o_c_id, customer.c_id] } │ │ │ ├─StreamExchange { dist: HashShard(orders.o_d_id, orders.o_w_id, orders.o_c_id) } │ │ │ │ └─StreamFilter { predicate: (orders.o_entry_d >= '2007-01-02 00:00:00':Timestamp) AND (orders.o_entry_d <= '2032-01-02 00:00:00':Timestamp) } - │ │ │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ │ │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } │ │ │ └─StreamExchange { dist: HashShard(customer.c_d_id, customer.c_w_id, customer.c_id) } - │ │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + │ │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } │ │ └─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id) } │ │ └─StreamHashJoin { type: Inner, predicate: stock.s_i_id = order_line.ol_i_id AND stock.s_w_id = order_line.ol_supply_w_id AND item.i_id = order_line.ol_i_id, output: [stock.s_i_id, stock.s_w_id, order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, item.i_id, order_line.ol_number] } │ │ ├─StreamExchange { dist: HashShard(item.i_id, stock.s_w_id) } │ │ │ └─StreamHashJoin { type: Inner, predicate: item.i_id = stock.s_i_id, output: all } │ │ │ ├─StreamExchange { dist: HashShard(item.i_id) } │ │ │ │ └─StreamFilter { predicate: (item.i_id < 1000:Int32) } - │ │ │ │ └─StreamTableScan { table: item, columns: [item.i_id], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + │ │ │ │ └─StreamTableScan { table: item, columns: [item.i_id], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } │ │ │ └─StreamExchange { dist: HashShard(stock.s_i_id) } │ │ │ └─StreamFilter { predicate: (stock.s_i_id < 1000:Int32) } - │ │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } │ │ └─StreamExchange { dist: HashShard(order_line.ol_i_id, order_line.ol_supply_w_id) } │ │ └─StreamFilter { predicate: (order_line.ol_w_id = order_line.ol_w_id) AND (order_line.ol_d_id = order_line.ol_d_id) AND (order_line.ol_i_id = order_line.ol_i_id) AND (order_line.ol_i_id < 1000:Int32) } - │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } │ └─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [supplier.s_suppkey, nation.n_name, supplier.s_nationkey, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(nation.n_nationkey) } └─StreamHashJoin { type: Inner, predicate: nation.n_regionkey = region.r_regionkey, output: all } ├─StreamExchange { dist: HashShard(nation.n_regionkey) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(region.r_regionkey) } └─StreamProject { exprs: [region.r_regionkey] } └─StreamFilter { predicate: (region.r_name = 'ASIA':Varchar) } - └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } + └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [l_year, mkt_share], stream_key: [l_year], pk_columns: [l_year], pk_conflict: NoCheck } @@ -1318,12 +1318,12 @@ Fragment 5 StreamFilter { predicate: (orders.o_entry_d >= '2007-01-02 00:00:00':Timestamp) AND (orders.o_entry_d <= '2032-01-02 00:00:00':Timestamp) } - └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 17 ] } + └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 17 ] } ├── Upstream └── BatchPlanNode Fragment 6 - StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 18 ] } + StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 18 ] } ├── Upstream └── BatchPlanNode @@ -1339,19 +1339,19 @@ Fragment 9 StreamFilter { predicate: (item.i_id < 1000:Int32) } - └── StreamTableScan { table: item, columns: [item.i_id], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 27 ] } + └── StreamTableScan { table: item, columns: [item.i_id], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 27 ] } ├── Upstream └── BatchPlanNode Fragment 10 StreamFilter { predicate: (stock.s_i_id < 1000:Int32) } - └── StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 28 ] } + └── StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 28 ] } ├── Upstream └── BatchPlanNode Fragment 11 StreamFilter { predicate: (order_line.ol_w_id = order_line.ol_w_id) AND (order_line.ol_d_id = order_line.ol_d_id) AND (order_line.ol_i_id = order_line.ol_i_id) AND (order_line.ol_i_id < 1000:Int32) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } { tables: [ StreamScan: 29 ] } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } { tables: [ StreamScan: 29 ] } ├── Upstream └── BatchPlanNode @@ -1361,12 +1361,12 @@ └── StreamExchange Hash([0]) from 14 Fragment 13 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 34 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 34 ] } ├── Upstream └── BatchPlanNode Fragment 14 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 35 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 35 ] } ├── Upstream └── BatchPlanNode @@ -1376,14 +1376,14 @@ └── StreamExchange Hash([0]) from 17 Fragment 16 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 40 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 40 ] } ├── Upstream └── BatchPlanNode Fragment 17 StreamProject { exprs: [region.r_regionkey] } └── StreamFilter { predicate: (region.r_name = 'ASIA':Varchar) } - └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 41 ] } + └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 41 ] } ├── Upstream └── BatchPlanNode @@ -1550,22 +1550,22 @@ │ │ ├─StreamExchange { dist: HashShard(item.i_id) } │ │ │ └─StreamProject { exprs: [item.i_id] } │ │ │ └─StreamFilter { predicate: Like(item.i_data, '%BB':Varchar) } - │ │ │ └─StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + │ │ │ └─StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } │ │ └─StreamExchange { dist: HashShard(stock.s_i_id) } - │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } │ └─StreamExchange { dist: HashShard(order_line.ol_i_id, order_line.ol_supply_w_id) } │ └─StreamHashJoin { type: Inner, predicate: orders.o_w_id = order_line.ol_w_id AND orders.o_d_id = order_line.ol_d_id AND orders.o_id = order_line.ol_o_id, output: [orders.o_entry_d, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, orders.o_w_id, orders.o_d_id, orders.o_id, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } │ ├─StreamExchange { dist: HashShard(orders.o_id, orders.o_d_id, orders.o_w_id) } - │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } │ └─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id) } │ └─StreamFilter { predicate: (order_line.ol_i_id = order_line.ol_i_id) } - │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } └─StreamExchange { dist: HashShard(supplier.s_suppkey) } └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: [supplier.s_suppkey, nation.n_name, supplier.s_nationkey, nation.n_nationkey] } ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } └─StreamExchange { dist: HashShard(nation.n_nationkey) } - └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [n_name, l_year, sum_profit], stream_key: [n_name, l_year], pk_columns: [n_name, l_year], pk_conflict: NoCheck } @@ -1596,12 +1596,12 @@ Fragment 4 StreamProject { exprs: [item.i_id] } └── StreamFilter { predicate: Like(item.i_data, '%BB':Varchar) } - └── StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 13 ] } + └── StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 13 ] } ├── Upstream └── BatchPlanNode Fragment 5 - StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 14 ] } + StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 14 ] } ├── Upstream └── BatchPlanNode @@ -1612,13 +1612,13 @@ └── StreamExchange Hash([0, 1, 2]) from 8 Fragment 7 - StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 19 ] } + StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 19 ] } ├── Upstream └── BatchPlanNode Fragment 8 StreamFilter { predicate: (order_line.ol_i_id = order_line.ol_i_id) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_supply_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 20 ] ├── Upstream └── BatchPlanNode @@ -1629,12 +1629,12 @@ └── StreamExchange Hash([0]) from 11 Fragment 10 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 25 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 25 ] } ├── Upstream └── BatchPlanNode Fragment 11 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 26 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 26 ] } ├── Upstream └── BatchPlanNode @@ -1752,16 +1752,16 @@ │ └─StreamHashJoin { type: Inner, predicate: order_line.ol_w_id = orders.o_w_id AND order_line.ol_d_id = orders.o_d_id AND order_line.ol_o_id = orders.o_id AND order_line.ol_d_id = customer.c_d_id AND order_line.ol_w_id = customer.c_w_id, output: all } │ ├─StreamExchange { dist: HashShard(order_line.ol_d_id, order_line.ol_w_id, order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id) } │ │ └─StreamFilter { predicate: (order_line.ol_w_id = order_line.ol_w_id) AND (order_line.ol_d_id = order_line.ol_d_id) } - │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } │ └─StreamExchange { dist: HashShard(customer.c_d_id, customer.c_w_id, orders.o_id, orders.o_d_id, orders.o_w_id) } │ └─StreamHashJoin { type: Inner, predicate: customer.c_id = orders.o_c_id AND customer.c_w_id = orders.o_w_id AND customer.c_d_id = orders.o_d_id, output: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last, customer.c_city, customer.c_state, customer.c_phone, orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d] } │ ├─StreamExchange { dist: HashShard(customer.c_d_id, customer.c_w_id, customer.c_id) } - │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last, customer.c_city, customer.c_state, customer.c_phone], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last, customer.c_city, customer.c_state, customer.c_phone], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } │ └─StreamExchange { dist: HashShard(orders.o_d_id, orders.o_w_id, orders.o_c_id) } │ └─StreamFilter { predicate: (orders.o_entry_d >= '2007-01-02 00:00:00':Timestamp) } - │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } └─StreamExchange { dist: HashShard(nation.n_nationkey) } - └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [c_id, c_last, revenue, c_city, c_phone, n_name], stream_key: [c_id, c_last, c_city, c_phone, n_name], pk_columns: [revenue, c_id, c_last, c_city, c_phone, n_name], pk_conflict: NoCheck } @@ -1786,7 +1786,7 @@ Fragment 3 StreamFilter { predicate: (order_line.ol_w_id = order_line.ol_w_id) AND (order_line.ol_d_id = order_line.ol_d_id) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 9 ] ├── Upstream └── BatchPlanNode @@ -1798,18 +1798,18 @@ └── StreamExchange Hash([1, 2, 3]) from 6 Fragment 5 - StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last, customer.c_city, customer.c_state, customer.c_phone], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 14 ] } + StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last, customer.c_city, customer.c_state, customer.c_phone], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 14 ] } ├── Upstream └── BatchPlanNode Fragment 6 StreamFilter { predicate: (orders.o_entry_d >= '2007-01-02 00:00:00':Timestamp) } - └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 15 ] } + └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 15 ] } ├── Upstream └── BatchPlanNode Fragment 7 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 16 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 16 ] } ├── Upstream └── BatchPlanNode @@ -1931,14 +1931,14 @@ │ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: all } │ │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } │ │ └─StreamProject { exprs: [nation.n_nationkey] } │ │ └─StreamFilter { predicate: (nation.n_name = 'CHINA':Varchar) } - │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ └─StreamExchange { dist: HashShard($expr1) } │ └─StreamProject { exprs: [stock.s_i_id, stock.s_order_cnt, ((stock.s_w_id * stock.s_i_id) % 10000:Int32)::Int64 as $expr1, stock.s_w_id] } - │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_order_cnt], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_order_cnt], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [(sum(sum(stock.s_order_cnt))::Decimal * 0.005:Decimal) as $expr3] } └─StreamSimpleAgg { aggs: [sum(sum(stock.s_order_cnt)), count] } @@ -1949,14 +1949,14 @@ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: all } │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } │ └─StreamProject { exprs: [nation.n_nationkey] } │ └─StreamFilter { predicate: (nation.n_name = 'CHINA':Varchar) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard($expr1) } └─StreamProject { exprs: [stock.s_i_id, stock.s_order_cnt, ((stock.s_w_id * stock.s_i_id) % 10000:Int32)::Int64 as $expr1, stock.s_w_id] } - └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_order_cnt], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_order_cnt], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_i_id, ordercount], stream_key: [s_i_id], pk_columns: [ordercount, s_i_id], pk_conflict: NoCheck } @@ -1987,7 +1987,7 @@ └── StreamExchange Hash([0]) from 5 Fragment 4 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } ├── tables: [ StreamScan: 11 ] ├── Upstream └── BatchPlanNode @@ -1995,14 +1995,14 @@ Fragment 5 StreamProject { exprs: [nation.n_nationkey] } └── StreamFilter { predicate: (nation.n_name = 'CHINA':Varchar) } - └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } ├── tables: [ StreamScan: 12 ] ├── Upstream └── BatchPlanNode Fragment 6 StreamProject { exprs: [stock.s_i_id, stock.s_order_cnt, ((stock.s_w_id * stock.s_i_id) % 10000:Int32)::Int64 as $expr1, stock.s_w_id] } - └── StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_order_cnt], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + └── StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_order_cnt], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } ├── tables: [ StreamScan: 13 ] ├── Upstream └── BatchPlanNode @@ -2038,29 +2038,11 @@ Table 10 { columns: [ nation_n_nationkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 11 - ├── columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 11 { columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 12 - ├── columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 12 { columns: [ vnode, n_nationkey, nation_backfill_finished, nation_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 13 - ├── columns: [ vnode, s_w_id, s_i_id, stock_backfill_finished, stock_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3, 4 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 13 { columns: [ vnode, s_w_id, s_i_id, stock_backfill_finished, stock_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 14 { columns: [ sum(sum(stock_s_order_cnt)), count ], primary key: [], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } @@ -2105,10 +2087,10 @@ └─StreamFilter { predicate: (orders.o_entry_d <= order_line.ol_delivery_d) } └─StreamHashJoin { type: Inner, predicate: orders.o_w_id = order_line.ol_w_id AND orders.o_d_id = order_line.ol_d_id AND orders.o_id = order_line.ol_o_id, output: all } ├─StreamExchange { dist: HashShard(orders.o_id, orders.o_d_id, orders.o_w_id) } - │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_carrier_id, orders.o_ol_cnt], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_carrier_id, orders.o_ol_cnt], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } └─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id) } └─StreamFilter { predicate: (order_line.ol_delivery_d < '2030-01-01 00:00:00':Timestamp) } - └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [o_ol_cnt, high_line_count, low_line_count], stream_key: [o_ol_cnt], pk_columns: [o_ol_cnt], pk_conflict: NoCheck } @@ -2125,14 +2107,14 @@ └── StreamExchange Hash([0, 1, 2]) from 3 Fragment 2 - StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_carrier_id, orders.o_ol_cnt], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_carrier_id, orders.o_ol_cnt], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 StreamFilter { predicate: (order_line.ol_delivery_d < '2030-01-01 00:00:00':Timestamp) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -2196,11 +2178,11 @@ └─StreamExchange { dist: HashShard(customer.c_id) } └─StreamHashJoin { type: LeftOuter, predicate: customer.c_w_id = orders.o_w_id AND customer.c_d_id = orders.o_d_id AND customer.c_id = orders.o_c_id, output: [customer.c_id, orders.o_id, customer.c_w_id, customer.c_d_id, orders.o_w_id, orders.o_d_id] } ├─StreamExchange { dist: HashShard(customer.c_d_id, customer.c_w_id, customer.c_id) } - │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } └─StreamExchange { dist: HashShard(orders.o_d_id, orders.o_w_id, orders.o_c_id) } └─StreamProject { exprs: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id] } └─StreamFilter { predicate: (orders.o_carrier_id > 8:Int32) } - └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_carrier_id], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_carrier_id], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [c_count, custdist], stream_key: [c_count], pk_columns: [custdist, c_count], pk_conflict: NoCheck } @@ -2220,7 +2202,7 @@ └── StreamExchange Hash([1, 2, 3]) from 4 Fragment 3 - StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -2228,7 +2210,7 @@ Fragment 4 StreamProject { exprs: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id] } └── StreamFilter { predicate: (orders.o_carrier_id > 8:Int32) } - └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_carrier_id], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + └── StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_carrier_id], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } ├── tables: [ StreamScan: 7 ] ├── Upstream └── BatchPlanNode @@ -2292,9 +2274,9 @@ ├─StreamExchange { dist: HashShard(order_line.ol_i_id) } │ └─StreamProject { exprs: [order_line.ol_i_id, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } │ └─StreamFilter { predicate: (order_line.ol_delivery_d >= '2007-01-02 00:00:00':Timestamp) AND (order_line.ol_delivery_d < '2030-01-02 00:00:00':Timestamp) } - │ └─StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ └─StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } └─StreamExchange { dist: HashShard(item.i_id) } - └─StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + └─StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [promo_revenue], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -2315,13 +2297,13 @@ Fragment 2 StreamProject { exprs: [order_line.ol_i_id, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } └── StreamFilter { predicate: (order_line.ol_delivery_d >= '2007-01-02 00:00:00':Timestamp) AND (order_line.ol_delivery_d < '2030-01-02 00:00:00':Timestamp) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 6 ] } + StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 6 ] } ├── Upstream └── BatchPlanNode @@ -2371,17 +2353,17 @@ ├─StreamExchange { dist: HashShard(revenue1.total_revenue) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = $expr1, output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, revenue1.total_revenue, revenue1.supplier_no] } │ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard($expr1) } │ └─StreamProject { exprs: [revenue1.total_revenue, revenue1.supplier_no::Int64 as $expr1, revenue1.supplier_no] } - │ └─StreamTableScan { table: revenue1, columns: [revenue1.supplier_no, revenue1.total_revenue], stream_scan_type: Backfill, pk: [revenue1.supplier_no], dist: UpstreamHashShard(revenue1.supplier_no) } + │ └─StreamTableScan { table: revenue1, columns: [revenue1.supplier_no, revenue1.total_revenue], stream_scan_type: ArrangementBackfill, pk: [revenue1.supplier_no], dist: UpstreamHashShard(revenue1.supplier_no) } └─StreamExchange { dist: HashShard(max(max(revenue1.total_revenue))) } └─StreamProject { exprs: [max(max(revenue1.total_revenue))] } └─StreamSimpleAgg { aggs: [max(max(revenue1.total_revenue)), count] } └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr2], aggs: [max(revenue1.total_revenue), count] } └─StreamProject { exprs: [revenue1.total_revenue, revenue1.supplier_no, Vnode(revenue1.supplier_no) as $expr2] } - └─StreamTableScan { table: revenue1, columns: [revenue1.total_revenue, revenue1.supplier_no], stream_scan_type: Backfill, pk: [revenue1.supplier_no], dist: UpstreamHashShard(revenue1.supplier_no) } + └─StreamTableScan { table: revenue1, columns: [revenue1.total_revenue, revenue1.supplier_no], stream_scan_type: ArrangementBackfill, pk: [revenue1.supplier_no], dist: UpstreamHashShard(revenue1.supplier_no) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_suppkey, s_name, s_address, s_phone, total_revenue, revenue1.supplier_no(hidden)], stream_key: [s_suppkey, revenue1.supplier_no, total_revenue], pk_columns: [s_suppkey, revenue1.supplier_no, total_revenue], pk_conflict: NoCheck } @@ -2401,13 +2383,14 @@ └── StreamExchange Hash([1]) from 4 Fragment 3 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 8 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + ├── tables: [ StreamScan: 8 ] ├── Upstream └── BatchPlanNode Fragment 4 StreamProject { exprs: [revenue1.total_revenue, revenue1.supplier_no::Int64 as $expr1, revenue1.supplier_no] } - └── StreamTableScan { table: revenue1, columns: [revenue1.supplier_no, revenue1.total_revenue], stream_scan_type: Backfill, pk: [revenue1.supplier_no], dist: UpstreamHashShard(revenue1.supplier_no) } { tables: [ StreamScan: 9 ] } + └── StreamTableScan { table: revenue1, columns: [revenue1.supplier_no, revenue1.total_revenue], stream_scan_type: ArrangementBackfill, pk: [revenue1.supplier_no], dist: UpstreamHashShard(revenue1.supplier_no) } { tables: [ StreamScan: 9 ] } ├── Upstream └── BatchPlanNode @@ -2419,7 +2402,7 @@ Fragment 6 StreamHashAgg { group_key: [$expr2], aggs: [max(revenue1.total_revenue), count] } { tables: [ HashAggState: 13, HashAggCall0: 12 ] } └── StreamProject { exprs: [revenue1.total_revenue, revenue1.supplier_no, Vnode(revenue1.supplier_no) as $expr2] } - └── StreamTableScan { table: revenue1, columns: [revenue1.total_revenue, revenue1.supplier_no], stream_scan_type: Backfill, pk: [revenue1.supplier_no], dist: UpstreamHashShard(revenue1.supplier_no) } { tables: [ StreamScan: 14 ] } + └── StreamTableScan { table: revenue1, columns: [revenue1.total_revenue, revenue1.supplier_no], stream_scan_type: ArrangementBackfill, pk: [revenue1.supplier_no], dist: UpstreamHashShard(revenue1.supplier_no) } { tables: [ StreamScan: 14 ] } ├── Upstream └── BatchPlanNode @@ -2511,14 +2494,14 @@ │ └─StreamProject { exprs: [stock.s_i_id, stock.s_w_id, item.i_name, item.i_price, item.i_data, ((stock.s_w_id * stock.s_i_id) % 10000:Int32)::Int64 as $expr1] } │ └─StreamHashJoin { type: Inner, predicate: stock.s_i_id = item.i_id, output: [stock.s_i_id, stock.s_w_id, item.i_name, item.i_price, item.i_data, item.i_id] } │ ├─StreamExchange { dist: HashShard(stock.s_i_id) } - │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } │ └─StreamExchange { dist: HashShard(item.i_id) } │ └─StreamFilter { predicate: Not(Like(item.i_data, 'zz%':Varchar)) } - │ └─StreamTableScan { table: item, columns: [item.i_id, item.i_name, item.i_price, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + │ └─StreamTableScan { table: item, columns: [item.i_id, item.i_name, item.i_price, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } └─StreamExchange { dist: HashShard(supplier.s_suppkey) } └─StreamProject { exprs: [supplier.s_suppkey] } └─StreamFilter { predicate: Like(supplier.s_comment, '%bad%':Varchar) } - └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_comment], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_comment], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [i_name, brand, i_price, supplier_cnt], stream_key: [i_name, brand, i_price], pk_columns: [supplier_cnt, i_name, brand, i_price], pk_conflict: NoCheck } @@ -2541,14 +2524,14 @@ └── StreamExchange Hash([0]) from 4 Fragment 3 - StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } ├── tables: [ StreamScan: 10 ] ├── Upstream └── BatchPlanNode Fragment 4 StreamFilter { predicate: Not(Like(item.i_data, 'zz%':Varchar)) } - └── StreamTableScan { table: item, columns: [item.i_id, item.i_name, item.i_price, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + └── StreamTableScan { table: item, columns: [item.i_id, item.i_name, item.i_price, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } ├── tables: [ StreamScan: 11 ] ├── Upstream └── BatchPlanNode @@ -2556,7 +2539,7 @@ Fragment 5 StreamProject { exprs: [supplier.s_suppkey] } └── StreamFilter { predicate: Like(supplier.s_comment, '%bad%':Varchar) } - └── StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_comment], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + └── StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_comment], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } ├── tables: [ StreamScan: 12 ] ├── Upstream └── BatchPlanNode @@ -2604,13 +2587,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 11 - ├── columns: [ vnode, i_id, item_backfill_finished, item_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 11 { columns: [ vnode, i_id, item_backfill_finished, item_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 12 ├── columns: [ vnode, s_suppkey, supplier_backfill_finished, supplier_row_count ] @@ -2685,16 +2662,16 @@ └─StreamHashJoin { type: Inner, predicate: order_line.ol_i_id = item.i_id, output: all } ├─StreamExchange { dist: HashShard(order_line.ol_i_id) } │ └─StreamProject { exprs: [order_line.ol_i_id, order_line.ol_amount, order_line.ol_quantity::Decimal as $expr1, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } - │ └─StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ └─StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } └─StreamProject { exprs: [item.i_id, (sum(order_line.ol_quantity)::Decimal / count(order_line.ol_quantity)::Decimal) as $expr2] } └─StreamHashAgg { group_key: [item.i_id], aggs: [sum(order_line.ol_quantity), count(order_line.ol_quantity), count] } └─StreamHashJoin { type: Inner, predicate: item.i_id = order_line.ol_i_id, output: [item.i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } ├─StreamExchange { dist: HashShard(item.i_id) } │ └─StreamProject { exprs: [item.i_id] } │ └─StreamFilter { predicate: Like(item.i_data, '%b':Varchar) } - │ └─StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + │ └─StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } └─StreamExchange { dist: HashShard(order_line.ol_i_id) } - └─StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └─StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [avg_yearly], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -2719,7 +2696,7 @@ Fragment 2 StreamProject { exprs: [order_line.ol_i_id, order_line.ol_amount, order_line.ol_quantity::Decimal as $expr1, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } - └── StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_amount, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode @@ -2727,12 +2704,12 @@ Fragment 3 StreamProject { exprs: [item.i_id] } └── StreamFilter { predicate: Like(item.i_data, '%b':Varchar) } - └── StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 11 ] } + └── StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 11 ] } ├── Upstream └── BatchPlanNode Fragment 4 - StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 12 ] ├── Upstream └── BatchPlanNode @@ -2816,11 +2793,11 @@ ├─StreamExchange { dist: HashShard(orders.o_id, orders.o_d_id, orders.o_w_id) } │ └─StreamHashJoin { type: Inner, predicate: customer.c_id = orders.o_c_id AND customer.c_w_id = orders.o_w_id AND customer.c_d_id = orders.o_d_id, output: [customer.c_id, customer.c_last, orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d, orders.o_ol_cnt, customer.c_w_id, customer.c_d_id] } │ ├─StreamExchange { dist: HashShard(customer.c_d_id, customer.c_w_id, customer.c_id) } - │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } │ └─StreamExchange { dist: HashShard(orders.o_d_id, orders.o_w_id, orders.o_c_id) } - │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d, orders.o_ol_cnt], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d, orders.o_ol_cnt], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } └─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id) } - └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [c_last, o_id, o_entry_d, o_ol_cnt, sum, sum(order_line.ol_amount)(hidden), orders.o_id(hidden), orders.o_d_id(hidden), orders.o_w_id(hidden)], stream_key: [o_id, c_last, orders.o_id, orders.o_d_id, orders.o_w_id, o_entry_d, o_ol_cnt], pk_columns: [sum(order_line.ol_amount), o_entry_d, o_id, c_last, orders.o_id, orders.o_d_id, orders.o_w_id, o_ol_cnt], pk_conflict: NoCheck } @@ -2840,17 +2817,17 @@ └── StreamExchange Hash([1, 2, 3]) from 3 Fragment 2 - StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 9 ] } + StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_last], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } { tables: [ StreamScan: 9 ] } ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d, orders.o_ol_cnt], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 10 ] } + StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_entry_d, orders.o_ol_cnt], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 10 ] } ├── Upstream └── BatchPlanNode Fragment 4 - StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } { tables: [ StreamScan: 11 ] } + StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_amount, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } { tables: [ StreamScan: 11 ] } ├── Upstream └── BatchPlanNode @@ -2918,11 +2895,11 @@ ├─StreamExchange { dist: HashShard(order_line.ol_i_id) } │ └─StreamProject { exprs: [order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } │ └─StreamFilter { predicate: (order_line.ol_quantity >= 1:Int32) AND (order_line.ol_quantity <= 10:Int32) } - │ └─StreamTableScan { table: order_line, columns: [order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_quantity], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ └─StreamTableScan { table: order_line, columns: [order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_quantity], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } └─StreamExchange { dist: HashShard(item.i_id) } └─StreamProject { exprs: [item.i_id, item.i_data] } └─StreamFilter { predicate: (item.i_price >= 1:Decimal) AND (item.i_price <= 400000:Decimal) } - └─StreamTableScan { table: item, columns: [item.i_id, item.i_data, item.i_price], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + └─StreamTableScan { table: item, columns: [item.i_id, item.i_data, item.i_price], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [revenue], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -2943,7 +2920,7 @@ Fragment 2 StreamProject { exprs: [order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } └── StreamFilter { predicate: (order_line.ol_quantity >= 1:Int32) AND (order_line.ol_quantity <= 10:Int32) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_quantity], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_w_id, order_line.ol_i_id, order_line.ol_amount, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_quantity], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode @@ -2951,7 +2928,7 @@ Fragment 3 StreamProject { exprs: [item.i_id, item.i_data] } └── StreamFilter { predicate: (item.i_price >= 1:Decimal) AND (item.i_price <= 400000:Decimal) } - └── StreamTableScan { table: item, columns: [item.i_id, item.i_data, item.i_price], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 6 ] } + └── StreamTableScan { table: item, columns: [item.i_id, item.i_data, item.i_price], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 6 ] } ├── Upstream └── BatchPlanNode @@ -3035,11 +3012,11 @@ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: all } │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } │ └─StreamProject { exprs: [nation.n_nationkey] } │ └─StreamFilter { predicate: (nation.n_name = 'CHINA':Varchar) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard($expr1) } └─StreamProject { exprs: [((stock.s_i_id * stock.s_w_id) % 10000:Int32)::Int64 as $expr1, stock.s_i_id, stock.s_w_id, stock.s_quantity] } └─StreamFilter { predicate: ((2:Int32 * stock.s_quantity) > sum(order_line.ol_quantity)) } @@ -3048,15 +3025,15 @@ └─StreamHashJoin { type: LeftSemi, predicate: stock.s_i_id = item.i_id, output: all } ├─StreamHashJoin { type: Inner, predicate: stock.s_i_id = order_line.ol_i_id, output: [stock.s_i_id, stock.s_w_id, stock.s_quantity, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } │ ├─StreamExchange { dist: HashShard(stock.s_i_id) } - │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } │ └─StreamExchange { dist: HashShard(order_line.ol_i_id) } │ └─StreamProject { exprs: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } │ └─StreamFilter { predicate: (order_line.ol_delivery_d > '2010-05-23 12:00:00':Timestamp) } - │ └─StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ └─StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } └─StreamExchange { dist: HashShard(item.i_id) } └─StreamProject { exprs: [item.i_id] } └─StreamFilter { predicate: Like(item.i_data, 'co%':Varchar) } - └─StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } + └─StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_name, s_address, supplier.s_suppkey(hidden), supplier.s_nationkey(hidden)], stream_key: [supplier.s_suppkey, supplier.s_nationkey], pk_columns: [s_name, supplier.s_suppkey, supplier.s_nationkey], pk_conflict: NoCheck } @@ -3075,7 +3052,7 @@ └── StreamExchange Hash([0]) from 4 Fragment 3 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } ├── tables: [ StreamScan: 8 ] ├── Upstream └── BatchPlanNode @@ -3083,7 +3060,7 @@ Fragment 4 StreamProject { exprs: [nation.n_nationkey] } └── StreamFilter { predicate: (nation.n_name = 'CHINA':Varchar) } - └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 9 ] } + └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 9 ] } ├── Upstream └── BatchPlanNode @@ -3100,14 +3077,14 @@ └── StreamExchange Hash([0]) from 8 Fragment 6 - StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 19 ] } + StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id, stock.s_quantity], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 19 ] } ├── Upstream └── BatchPlanNode Fragment 7 StreamProject { exprs: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number] } └── StreamFilter { predicate: (order_line.ol_delivery_d > '2010-05-23 12:00:00':Timestamp) } - └── StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_i_id, order_line.ol_quantity, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number, order_line.ol_delivery_d], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 20 ] ├── Upstream └── BatchPlanNode @@ -3115,7 +3092,7 @@ Fragment 8 StreamProject { exprs: [item.i_id] } └── StreamFilter { predicate: Like(item.i_data, 'co%':Varchar) } - └── StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: Backfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 21 ] } + └── StreamTableScan { table: item, columns: [item.i_id, item.i_data], stream_scan_type: ArrangementBackfill, pk: [item.i_id], dist: UpstreamHashShard(item.i_id) } { tables: [ StreamScan: 21 ] } ├── Upstream └── BatchPlanNode @@ -3234,26 +3211,26 @@ │ │ └─StreamHashJoin { type: Inner, predicate: stock.s_w_id = order_line.ol_w_id AND stock.s_i_id = order_line.ol_i_id AND stock.s_w_id = orders.o_w_id, output: [stock.s_i_id, stock.s_w_id, order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number] } │ │ ├─StreamExchange { dist: HashShard(stock.s_w_id, stock.s_i_id, stock.s_w_id) } │ │ │ └─StreamFilter { predicate: (stock.s_w_id = stock.s_w_id) } - │ │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } + │ │ │ └─StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } │ │ └─StreamExchange { dist: HashShard(order_line.ol_w_id, order_line.ol_i_id, orders.o_w_id) } │ │ └─StreamProject { exprs: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_delivery_d, orders.o_w_id, order_line.ol_number, orders.o_d_id, orders.o_id] } │ │ └─StreamFilter { predicate: (order_line.ol_delivery_d > orders.o_entry_d) } │ │ └─StreamHashJoin { type: Inner, predicate: order_line.ol_o_id = orders.o_id AND order_line.ol_w_id = orders.o_w_id AND order_line.ol_d_id = orders.o_d_id, output: all } │ │ ├─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id) } - │ │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + │ │ │ └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } │ │ └─StreamExchange { dist: HashShard(orders.o_id, orders.o_d_id, orders.o_w_id) } - │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ │ └─StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } │ └─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: all } │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } │ └─StreamProject { exprs: [nation.n_nationkey] } │ └─StreamFilter { predicate: (nation.n_name = 'CHINA':Varchar) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(order_line.ol_o_id, order_line.ol_w_id, order_line.ol_d_id) } └─StreamProject { exprs: [order_line.ol_o_id, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_delivery_d, order_line.ol_number] } - └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └─StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_name, numwait], stream_key: [s_name], pk_columns: [numwait, s_name], pk_conflict: NoCheck } @@ -3280,7 +3257,7 @@ Fragment 4 StreamFilter { predicate: (stock.s_w_id = stock.s_w_id) } - └── StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: Backfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 13 ] } + └── StreamTableScan { table: stock, columns: [stock.s_i_id, stock.s_w_id], stream_scan_type: ArrangementBackfill, pk: [stock.s_w_id, stock.s_i_id], dist: UpstreamHashShard(stock.s_w_id, stock.s_i_id) } { tables: [ StreamScan: 13 ] } ├── Upstream └── BatchPlanNode @@ -3292,13 +3269,13 @@ └── StreamExchange Hash([0, 1, 2]) from 7 Fragment 6 - StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_i_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 18 ] ├── Upstream └── BatchPlanNode Fragment 7 - StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 19 ] } + StreamTableScan { table: orders, columns: [orders.o_id, orders.o_d_id, orders.o_w_id, orders.o_entry_d], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } { tables: [ StreamScan: 19 ] } ├── Upstream └── BatchPlanNode @@ -3308,20 +3285,20 @@ └── StreamExchange Hash([0]) from 10 Fragment 9 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 24 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 24 ] } ├── Upstream └── BatchPlanNode Fragment 10 StreamProject { exprs: [nation.n_nationkey] } └── StreamFilter { predicate: (nation.n_name = 'CHINA':Varchar) } - └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 25 ] } + └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 25 ] } ├── Upstream └── BatchPlanNode Fragment 11 StreamProject { exprs: [order_line.ol_o_id, order_line.ol_w_id, order_line.ol_d_id, order_line.ol_delivery_d, order_line.ol_number] } - └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: Backfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } + └── StreamTableScan { table: order_line, columns: [order_line.ol_o_id, order_line.ol_d_id, order_line.ol_w_id, order_line.ol_delivery_d, order_line.ol_number], stream_scan_type: ArrangementBackfill, pk: [order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number], dist: UpstreamHashShard(order_line.ol_w_id, order_line.ol_d_id, order_line.ol_o_id, order_line.ol_number) } ├── tables: [ StreamScan: 26 ] ├── Upstream └── BatchPlanNode @@ -3456,10 +3433,10 @@ │ ├─StreamExchange { dist: HashShard(customer.c_id, customer.c_w_id, customer.c_d_id) } │ │ └─StreamProject { exprs: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state, customer.c_balance] } │ │ └─StreamFilter { predicate: In(Substr(customer.c_phone, 1:Int32, 1:Int32), '1':Varchar, '2':Varchar, '3':Varchar, '4':Varchar, '5':Varchar, '6':Varchar, '7':Varchar) } - │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state, customer.c_balance, customer.c_phone], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + │ │ └─StreamTableScan { table: customer, columns: [customer.c_id, customer.c_d_id, customer.c_w_id, customer.c_state, customer.c_balance, customer.c_phone], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } │ └─StreamExchange { dist: HashShard(orders.o_c_id, orders.o_w_id, orders.o_d_id) } │ └─StreamProject { exprs: [orders.o_c_id, orders.o_w_id, orders.o_d_id, orders.o_id] } - │ └─StreamTableScan { table: orders, columns: [orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_id], stream_scan_type: Backfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } + │ └─StreamTableScan { table: orders, columns: [orders.o_d_id, orders.o_w_id, orders.o_c_id, orders.o_id], stream_scan_type: ArrangementBackfill, pk: [orders.o_w_id, orders.o_d_id, orders.o_id], dist: UpstreamHashShard(orders.o_w_id, orders.o_d_id, orders.o_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [(sum(sum(customer.c_balance)) / sum0(count(customer.c_balance))::Decimal) as $expr1] } └─StreamSimpleAgg { aggs: [sum(sum(customer.c_balance)), sum0(count(customer.c_balance)), count] } @@ -3467,4 +3444,4 @@ └─StreamStatelessSimpleAgg { aggs: [sum(customer.c_balance), count(customer.c_balance)] } └─StreamProject { exprs: [customer.c_balance, customer.c_w_id, customer.c_d_id, customer.c_id] } └─StreamFilter { predicate: (customer.c_balance > 0.00:Decimal) AND In(Substr(customer.c_phone, 1:Int32, 1:Int32), '1':Varchar, '2':Varchar, '3':Varchar, '4':Varchar, '5':Varchar, '6':Varchar, '7':Varchar) } - └─StreamTableScan { table: customer, columns: [customer.c_balance, customer.c_w_id, customer.c_d_id, customer.c_id, customer.c_phone], stream_scan_type: Backfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } + └─StreamTableScan { table: customer, columns: [customer.c_balance, customer.c_w_id, customer.c_d_id, customer.c_id, customer.c_phone], stream_scan_type: ArrangementBackfill, pk: [customer.c_w_id, customer.c_d_id, customer.c_id], dist: UpstreamHashShard(customer.c_w_id, customer.c_d_id, customer.c_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/column_pruning.yaml b/src/frontend/planner_test/tests/testdata/output/column_pruning.yaml index 8ca2dc7541083..b22b48304c8b3 100644 --- a/src/frontend/planner_test/tests/testdata/output/column_pruning.yaml +++ b/src/frontend/planner_test/tests/testdata/output/column_pruning.yaml @@ -190,4 +190,4 @@ StreamMaterialize { columns: [a, window_end, t1._row_id(hidden)], stream_key: [t1._row_id, window_end], pk_columns: [t1._row_id, window_end], pk_conflict: NoCheck } └─StreamHopWindow { time_col: t1.created_at, slide: 00:15:00, size: 00:30:00, output: [t1.a, window_end, t1._row_id] } └─StreamFilter { predicate: IsNotNull(t1.created_at) } - └─StreamTableScan { table: t1, columns: [t1.a, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.a, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/common_table_expressions.yaml b/src/frontend/planner_test/tests/testdata/output/common_table_expressions.yaml index fd6a012cf377e..ce0cf0d55baab 100644 --- a/src/frontend/planner_test/tests/testdata/output/common_table_expressions.yaml +++ b/src/frontend/planner_test/tests/testdata/output/common_table_expressions.yaml @@ -9,7 +9,7 @@ └─LogicalScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id] } stream_plan: |- StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (v1 int, v2 int); create table t2 (v3 int, v4 int); @@ -26,9 +26,9 @@ └─StreamExchange { dist: HashShard(t2.v3, t2._row_id, t1._row_id) } └─StreamHashJoin { type: Inner, predicate: t2.v3 = t1.v1, output: [t2.v3, t2.v4, t1.v1, t2._row_id, t1._row_id] } ├─StreamExchange { dist: HashShard(t2.v3) } - │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } └─StreamExchange { dist: HashShard(t1.v1) } - └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (v1 int, v2 int); create table t2 (v3 int, v4 int); @@ -42,7 +42,7 @@ └─LogicalScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id] } stream_plan: |- StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (x int); with with_0 as (select * from t1 group by x having EXISTS(select 0.1)) select * from with_0; @@ -83,6 +83,6 @@ └─StreamExchange { dist: HashShard(t.c, t._row_id, _row_id) } └─StreamHashJoin { type: Inner, predicate: t.c = *VALUES*_0.column_0, output: [t.v, t.c, *VALUES*_0.column_0, *VALUES*_0.column_1, t._row_id, _row_id] } ├─StreamExchange { dist: HashShard(t.c) } - │ └─StreamTableScan { table: t, columns: [t.v, t.c, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.v, t.c, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(*VALUES*_0.column_0) } └─StreamValues { rows: [['cn':Varchar, 'China':Varchar, 0:Int64], ['us':Varchar, 'United States':Varchar, 1:Int64]] } diff --git a/src/frontend/planner_test/tests/testdata/output/cse_expr.yaml b/src/frontend/planner_test/tests/testdata/output/cse_expr.yaml index 259d5ff0b5dd3..10248337ec32a 100644 --- a/src/frontend/planner_test/tests/testdata/output/cse_expr.yaml +++ b/src/frontend/planner_test/tests/testdata/output/cse_expr.yaml @@ -12,7 +12,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [JsonbAccess($expr1, 'c':Varchar) as $expr2, JsonbAccess($expr1, 'b':Varchar) as $expr3, t._row_id] } └─StreamProject { exprs: [t.v1, JsonbAccess(t.v1, 'a':Varchar) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: Common sub expression extract2 sql: | create table t (v1 jsonb); @@ -26,7 +26,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [$expr1, $expr1, t._row_id] } └─StreamProject { exprs: [t.v1, JsonbAccess(JsonbAccess(t.v1, 'a':Varchar), 'c':Varchar) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: Common sub expression shouldn't extract impure function sql: | create table t(v1 varchar, v2 int, v3 int); @@ -40,7 +40,7 @@ StreamMaterialize { columns: [vnode, vnode2, x, y, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [(Vnode(t.v2) + 1:Int32) as $expr2, (Vnode(t.v2) + 1:Int32) as $expr3, $expr1, $expr1, t._row_id] } └─StreamProject { exprs: [t.v2, (t.v2 + 1:Int32) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: Common sub expression shouldn't extract const sql: | SELECT x, @@ -79,7 +79,7 @@ └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum($expr1), sum(t.v), count(t.v)] } └─StreamProject { exprs: [t.v, (t.v * t.v) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: Common sub expression shouldn't extract partial expression of `some`/`all`. See 11766 sql: | with t(v, arr) as (select 1, array[2, 3]) select v < all(arr), v < some(arr) from t; diff --git a/src/frontend/planner_test/tests/testdata/output/distinct_on.yaml b/src/frontend/planner_test/tests/testdata/output/distinct_on.yaml index 08e492a95229d..b0a67d1f3f1da 100644 --- a/src/frontend/planner_test/tests/testdata/output/distinct_on.yaml +++ b/src/frontend/planner_test/tests/testdata/output/distinct_on.yaml @@ -15,7 +15,7 @@ └─StreamAppendOnlyDedup { dedup_cols: [t1.k] } └─StreamExchange { dist: HashShard(t1.k) } └─StreamProject { exprs: [(t1.k + t1.v) as $expr1, t1.k, t1._row_id] } - └─StreamTableScan { table: t1, columns: [t1.k, t1.v, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.k, t1.v, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t2 (k int, v int); select distinct on (k) k + v as sum from t2; @@ -32,7 +32,7 @@ └─StreamGroupTopN { order: [], limit: 1, offset: 0, group_key: [t2.k] } └─StreamExchange { dist: HashShard(t2.k) } └─StreamProject { exprs: [(t2.k + t2.v) as $expr1, t2.k, t2._row_id] } - └─StreamTableScan { table: t2, columns: [t2.k, t2.v, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.k, t2.v, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - sql: | create table t (a int, b int, c int); select distinct on (foo, b) a as foo, b from t; @@ -45,7 +45,7 @@ StreamMaterialize { columns: [foo, b, t._row_id(hidden)], stream_key: [foo, b], pk_columns: [foo, b], pk_conflict: NoCheck } └─StreamGroupTopN { order: [], limit: 1, offset: 0, group_key: [t.a, t.b] } └─StreamExchange { dist: HashShard(t.a, t.b) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (a int, b int, c int); select distinct on (2) a as foo, b from t; @@ -58,7 +58,7 @@ StreamMaterialize { columns: [foo, b, t._row_id(hidden)], stream_key: [b], pk_columns: [b], pk_conflict: NoCheck } └─StreamGroupTopN { order: [], limit: 1, offset: 0, group_key: [t.b] } └─StreamExchange { dist: HashShard(t.b) } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t1(a int, b int, c int, k int); create table t2(k int); diff --git a/src/frontend/planner_test/tests/testdata/output/distribution_derive.yaml b/src/frontend/planner_test/tests/testdata/output/distribution_derive.yaml index c276f24195eae..68104face900a 100644 --- a/src/frontend/planner_test/tests/testdata/output/distribution_derive.yaml +++ b/src/frontend/planner_test/tests/testdata/output/distribution_derive.yaml @@ -233,7 +233,7 @@ └─StreamProject { exprs: [max(a.v), a.k1] } └─StreamHashAgg { group_key: [a.k1], aggs: [max(a.v), count] } └─StreamExchange { dist: HashShard(a.k1) } - └─StreamTableScan { table: a, columns: [a.k1, a.v, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + └─StreamTableScan { table: a, columns: [a.k1, a.v, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [max_v, a.k1(hidden)], stream_key: [a.k1], pk_columns: [a.k1], pk_conflict: NoCheck } @@ -244,7 +244,7 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: a, columns: [a.k1, a.v, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + StreamTableScan { table: a, columns: [a.k1, a.v, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -256,12 +256,7 @@ ├── distribution key: [ 0 ] └── read pk prefix len hint: 1 - Table 1 - ├── columns: [ a_k1, max(a_v), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 1 { columns: [ a_k1, max(a_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 2 ├── columns: [ vnode, _row_id, a_backfill_finished, a_row_count ] @@ -271,12 +266,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 - ├── columns: [ max_v, a.k1 ] - ├── primary key: [ $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ max_v, a.k1 ], primary key: [ $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - id: aggk1_from_Ak1 before: @@ -294,23 +284,18 @@ StreamMaterialize { columns: [max_v, ak1.k1(hidden)], stream_key: [ak1.k1], pk_columns: [ak1.k1], pk_conflict: NoCheck } └─StreamProject { exprs: [max(ak1.v), ak1.k1] } └─StreamHashAgg { group_key: [ak1.k1], aggs: [max(ak1.v), count] } - └─StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: Backfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } + └─StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [max_v, ak1.k1(hidden)], stream_key: [ak1.k1], pk_columns: [ak1.k1], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [max(ak1.v), ak1.k1] } └── StreamHashAgg { group_key: [ak1.k1], aggs: [max(ak1.v), count] } { tables: [ HashAggState: 1, HashAggCall0: 0 ] } - └── StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: Backfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } + └── StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ ak1_k1, ak1_v, ak1_a__row_id ] - ├── primary key: [ $0 ASC, $1 DESC, $2 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ ak1_k1, ak1_v, ak1_a__row_id ], primary key: [ $0 ASC, $1 DESC, $2 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 { columns: [ ak1_k1, max(ak1_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -340,13 +325,13 @@ StreamMaterialize { columns: [max_v, ak1k2.k1(hidden)], stream_key: [ak1k2.k1], pk_columns: [ak1k2.k1], pk_conflict: NoCheck } └─StreamProject { exprs: [max(ak1k2.v), ak1k2.k1] } └─StreamHashAgg { group_key: [ak1k2.k1], aggs: [max(ak1k2.v), count] } - └─StreamTableScan { table: ak1k2, columns: [ak1k2.k1, ak1k2.v, ak1k2.k2, ak1k2.a._row_id], stream_scan_type: Backfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } + └─StreamTableScan { table: ak1k2, columns: [ak1k2.k1, ak1k2.v, ak1k2.k2, ak1k2.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [max_v, ak1k2.k1(hidden)], stream_key: [ak1k2.k1], pk_columns: [ak1k2.k1], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [max(ak1k2.v), ak1k2.k1] } └── StreamHashAgg { group_key: [ak1k2.k1], aggs: [max(ak1k2.v), count] } { tables: [ HashAggState: 1, HashAggCall0: 0 ] } - └── StreamTableScan { table: ak1k2, columns: [ak1k2.k1, ak1k2.v, ak1k2.k2, ak1k2.a._row_id], stream_scan_type: Backfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } + └── StreamTableScan { table: ak1k2, columns: [ak1k2.k1, ak1k2.v, ak1k2.k2, ak1k2.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -383,7 +368,7 @@ └─StreamProject { exprs: [max(ak1k2.v), ak1k2.k2] } └─StreamHashAgg { group_key: [ak1k2.k2], aggs: [max(ak1k2.v), count] } └─StreamExchange { dist: HashShard(ak1k2.k2) } - └─StreamTableScan { table: ak1k2, columns: [ak1k2.k2, ak1k2.v, ak1k2.k1, ak1k2.a._row_id], stream_scan_type: Backfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } + └─StreamTableScan { table: ak1k2, columns: [ak1k2.k2, ak1k2.v, ak1k2.k1, ak1k2.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [max_v, ak1k2.k2(hidden)], stream_key: [ak1k2.k2], pk_columns: [ak1k2.k2], pk_conflict: NoCheck } @@ -393,17 +378,12 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: ak1k2, columns: [ak1k2.k2, ak1k2.v, ak1k2.k1, ak1k2.a._row_id], stream_scan_type: Backfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } + StreamTableScan { table: ak1k2, columns: [ak1k2.k2, ak1k2.v, ak1k2.k1, ak1k2.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ ak1k2_k2, ak1k2_v, ak1k2_a__row_id ] - ├── primary key: [ $0 ASC, $1 DESC, $2 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ ak1k2_k2, ak1k2_v, ak1k2_a__row_id ], primary key: [ $0 ASC, $1 DESC, $2 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 { columns: [ ak1k2_k2, max(ak1k2_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -433,14 +413,14 @@ StreamMaterialize { columns: [sum_v, ak1k2.k1(hidden), ak1k2.k2(hidden)], stream_key: [ak1k2.k1, ak1k2.k2], pk_columns: [ak1k2.k1, ak1k2.k2], pk_conflict: NoCheck } └─StreamProject { exprs: [sum(ak1k2.v), ak1k2.k1, ak1k2.k2] } └─StreamHashAgg { group_key: [ak1k2.k1, ak1k2.k2], aggs: [sum(ak1k2.v), count] } - └─StreamTableScan { table: ak1k2, columns: [ak1k2.k1, ak1k2.k2, ak1k2.v, ak1k2.a._row_id], stream_scan_type: Backfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } + └─StreamTableScan { table: ak1k2, columns: [ak1k2.k1, ak1k2.k2, ak1k2.v, ak1k2.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [sum_v, ak1k2.k1(hidden), ak1k2.k2(hidden)], stream_key: [ak1k2.k1, ak1k2.k2], pk_columns: [ak1k2.k1, ak1k2.k2], pk_conflict: NoCheck } ├── tables: [ Materialize: 4294967294 ] └── StreamProject { exprs: [sum(ak1k2.v), ak1k2.k1, ak1k2.k2] } └── StreamHashAgg { group_key: [ak1k2.k1, ak1k2.k2], aggs: [sum(ak1k2.v), count] } { tables: [ HashAggState: 0 ] } - └── StreamTableScan { table: ak1k2, columns: [ak1k2.k1, ak1k2.k2, ak1k2.v, ak1k2.a._row_id], stream_scan_type: Backfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } + └── StreamTableScan { table: ak1k2, columns: [ak1k2.k1, ak1k2.k2, ak1k2.v, ak1k2.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1k2.a._row_id], dist: UpstreamHashShard(ak1k2.k1) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -473,14 +453,14 @@ StreamMaterialize { columns: [sum_v, ak1.k1(hidden), ak1.k2(hidden)], stream_key: [ak1.k1, ak1.k2], pk_columns: [ak1.k1, ak1.k2], pk_conflict: NoCheck } └─StreamProject { exprs: [sum(ak1.v), ak1.k1, ak1.k2] } └─StreamHashAgg { group_key: [ak1.k1, ak1.k2], aggs: [sum(ak1.v), count] } - └─StreamTableScan { table: ak1, columns: [ak1.k1, ak1.k2, ak1.v, ak1.a._row_id], stream_scan_type: Backfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } + └─StreamTableScan { table: ak1, columns: [ak1.k1, ak1.k2, ak1.v, ak1.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [sum_v, ak1.k1(hidden), ak1.k2(hidden)], stream_key: [ak1.k1, ak1.k2], pk_columns: [ak1.k1, ak1.k2], pk_conflict: NoCheck } ├── tables: [ Materialize: 4294967294 ] └── StreamProject { exprs: [sum(ak1.v), ak1.k1, ak1.k2] } └── StreamHashAgg { group_key: [ak1.k1, ak1.k2], aggs: [sum(ak1.v), count] } { tables: [ HashAggState: 0 ] } - └── StreamTableScan { table: ak1, columns: [ak1.k1, ak1.k2, ak1.v, ak1.a._row_id], stream_scan_type: Backfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } + └── StreamTableScan { table: ak1, columns: [ak1.k1, ak1.k2, ak1.v, ak1.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -523,7 +503,7 @@ └─StreamHashAgg { group_key: [a.k1], aggs: [max(count), count] } └─StreamHashAgg { group_key: [a.k1], aggs: [count] } └─StreamExchange { dist: HashShard(a.k1) } - └─StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + └─StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [max_num, a.k1(hidden)], stream_key: [a.k1], pk_columns: [a.k1], pk_conflict: NoCheck } @@ -535,17 +515,12 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } ├── tables: [ StreamScan: 3 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ a_k1, count ] - ├── primary key: [ $0 ASC, $1 DESC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ a_k1, count ], primary key: [ $0 ASC, $1 DESC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 ├── columns: [ a_k1, max(count), count ] @@ -612,7 +587,7 @@ └─StreamExchange { dist: HashShard(a.k1) } └─StreamHashAgg { group_key: [a.k1, a.k2], aggs: [count] } └─StreamExchange { dist: HashShard(a.k1, a.k2) } - └─StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + └─StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [max_num, a.k1(hidden)], stream_key: [a.k1], pk_columns: [a.k1], pk_conflict: NoCheck } @@ -627,7 +602,7 @@ └── StreamExchange Hash([0, 1]) from 2 Fragment 2 - StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } ├── tables: [ StreamScan: 3 ] ├── Upstream └── BatchPlanNode @@ -639,19 +614,9 @@ ├── distribution key: [ 0 ] └── read pk prefix len hint: 1 - Table 1 - ├── columns: [ a_k1, max(count), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 1 { columns: [ a_k1, max(count), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 2 - ├── columns: [ a_k1, a_k2, count ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 2 ] - ├── distribution key: [ 0, 1 ] - └── read pk prefix len hint: 2 + Table 2 { columns: [ a_k1, a_k2, count ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } Table 3 ├── columns: [ vnode, _row_id, a_backfill_finished, a_row_count ] @@ -661,12 +626,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 - ├── columns: [ max_num, a.k1 ] - ├── primary key: [ $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ max_num, a.k1 ], primary key: [ $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - id: aggk2_from_aggk1k2 before: @@ -709,7 +669,7 @@ └─StreamExchange { dist: HashShard(a.k2) } └─StreamHashAgg { group_key: [a.k1, a.k2], aggs: [count] } └─StreamExchange { dist: HashShard(a.k1, a.k2) } - └─StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + └─StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [max_num, a.k2(hidden)], stream_key: [a.k2], pk_columns: [a.k2], pk_conflict: NoCheck } @@ -724,7 +684,7 @@ └── StreamExchange Hash([0, 1]) from 2 Fragment 2 - StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } ├── tables: [ StreamScan: 3 ] ├── Upstream └── BatchPlanNode @@ -736,19 +696,9 @@ ├── distribution key: [ 0 ] └── read pk prefix len hint: 1 - Table 1 - ├── columns: [ a_k2, max(count), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 1 { columns: [ a_k2, max(count), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 2 - ├── columns: [ a_k1, a_k2, count ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 2 ] - ├── distribution key: [ 0, 1 ] - └── read pk prefix len hint: 2 + Table 2 { columns: [ a_k1, a_k2, count ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } Table 3 ├── columns: [ vnode, _row_id, a_backfill_finished, a_row_count ] @@ -758,12 +708,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 - ├── columns: [ max_num, a.k2 ] - ├── primary key: [ $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ max_num, a.k2 ], primary key: [ $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - id: aggk1k2_from_aggk1k2 before: @@ -791,7 +736,7 @@ └─StreamHashAgg { group_key: [a.k1, a.k2], aggs: [max(count), count] } └─StreamHashAgg { group_key: [a.k1, a.k2], aggs: [count] } └─StreamExchange { dist: HashShard(a.k1, a.k2) } - └─StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + └─StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [max_num, a.k1(hidden), a.k2(hidden)], stream_key: [a.k1, a.k2], pk_columns: [a.k1, a.k2], pk_conflict: NoCheck } @@ -802,7 +747,7 @@ └── StreamExchange Hash([0, 1]) from 1 Fragment 1 - StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + StreamTableScan { table: a, columns: [a.k1, a.k2, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } ├── tables: [ StreamScan: 3 ] ├── Upstream └── BatchPlanNode @@ -821,12 +766,7 @@ ├── distribution key: [ 0, 1 ] └── read pk prefix len hint: 2 - Table 2 - ├── columns: [ a_k1, a_k2, count ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 2 ] - ├── distribution key: [ 0, 1 ] - └── read pk prefix len hint: 2 + Table 2 { columns: [ a_k1, a_k2, count ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0, 1 ], read pk prefix len hint: 2 } Table 3 ├── columns: [ vnode, _row_id, a_backfill_finished, a_row_count ] @@ -876,10 +816,10 @@ └─StreamExchange { dist: HashShard(ak1.a._row_id, ak1.k1) } └─StreamHashJoin { type: Inner, predicate: ak1.k1 = a.k1, output: [ak1.v, count, ak1.a._row_id, ak1.k1, a.k1] } ├─StreamExchange { dist: HashShard(ak1.k1) } - │ └─StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: Backfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } + │ └─StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } └─StreamHashAgg { group_key: [a.k1], aggs: [count] } └─StreamExchange { dist: HashShard(a.k1) } - └─StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + └─StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [v, bv, ak1.a._row_id(hidden), ak1.k1(hidden), a.k1(hidden)], stream_key: [ak1.a._row_id, ak1.k1], pk_columns: [ak1.a._row_id, ak1.k1], pk_conflict: NoCheck } @@ -894,12 +834,13 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: Backfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } { tables: [ StreamScan: 4 ] } + StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } + ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } { tables: [ StreamScan: 6 ] } + StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } { tables: [ StreamScan: 6 ] } ├── Upstream └── BatchPlanNode @@ -969,9 +910,9 @@ └─StreamHashJoin { type: Inner, predicate: a.k1 = ak1.k1, output: [ak1.v, count, a.k1, ak1.a._row_id] } ├─StreamHashAgg { group_key: [a.k1], aggs: [count] } │ └─StreamExchange { dist: HashShard(a.k1) } - │ └─StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + │ └─StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } └─StreamExchange { dist: HashShard(ak1.k1) } - └─StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: Backfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } + └─StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [v, bv, a.k1(hidden), ak1.a._row_id(hidden)], stream_key: [a.k1, ak1.a._row_id], pk_columns: [a.k1, ak1.a._row_id], pk_conflict: NoCheck } @@ -986,13 +927,13 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: Backfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } + StreamTableScan { table: ak1, columns: [ak1.k1, ak1.v, ak1.a._row_id], stream_scan_type: ArrangementBackfill, pk: [ak1.a._row_id], dist: UpstreamHashShard(ak1.k1) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -1074,10 +1015,10 @@ └─StreamHashJoin { type: Inner, predicate: a.k1 = b.k1, output: [count, count, a.k1, b.k1] } ├─StreamHashAgg { group_key: [a.k1], aggs: [count] } │ └─StreamExchange { dist: HashShard(a.k1) } - │ └─StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + │ └─StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } └─StreamHashAgg { group_key: [b.k1], aggs: [count] } └─StreamExchange { dist: HashShard(b.k1) } - └─StreamTableScan { table: b, columns: [b.k1, b._row_id], stream_scan_type: Backfill, pk: [b._row_id], dist: UpstreamHashShard(b._row_id) } + └─StreamTableScan { table: b, columns: [b.k1, b._row_id], stream_scan_type: ArrangementBackfill, pk: [b._row_id], dist: UpstreamHashShard(b._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [num, bv, a.k1(hidden), b.k1(hidden)], stream_key: [a.k1], pk_columns: [a.k1], pk_conflict: NoCheck } @@ -1093,44 +1034,24 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + StreamTableScan { table: a, columns: [a.k1, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: b, columns: [b.k1, b._row_id], stream_scan_type: Backfill, pk: [b._row_id], dist: UpstreamHashShard(b._row_id) } + StreamTableScan { table: b, columns: [b.k1, b._row_id], stream_scan_type: ArrangementBackfill, pk: [b._row_id], dist: UpstreamHashShard(b._row_id) } ├── tables: [ StreamScan: 7 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ a_k1, count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ a_k1, count ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 1 - ├── columns: [ a_k1, _degree ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 1 { columns: [ a_k1, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 2 - ├── columns: [ b_k1, count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 2 { columns: [ b_k1, count ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 3 - ├── columns: [ b_k1, _degree ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 3 { columns: [ b_k1, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 4 { columns: [ a_k1, count ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -1179,14 +1100,14 @@ StreamMaterialize { columns: [row_id, uid, v, created_at, window_start, window_end, t1._row_id(hidden)], stream_key: [t1._row_id, window_start, window_end], pk_columns: [t1._row_id, window_start, window_end], pk_conflict: NoCheck } └─StreamHopWindow { time_col: t1.created_at, slide: 00:15:00, size: 00:30:00, output: [t1.row_id, t1.uid, t1.v, t1.created_at, window_start, window_end, t1._row_id] } └─StreamFilter { predicate: IsNotNull(t1.created_at) } - └─StreamTableScan { table: t1, columns: [t1.row_id, t1.uid, t1.v, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.row_id, t1.uid, t1.v, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [row_id, uid, v, created_at, window_start, window_end, t1._row_id(hidden)], stream_key: [t1._row_id, window_start, window_end], pk_columns: [t1._row_id, window_start, window_end], pk_conflict: NoCheck } ├── tables: [ Materialize: 4294967294 ] └── StreamHopWindow { time_col: t1.created_at, slide: 00:15:00, size: 00:30:00, output: [t1.row_id, t1.uid, t1.v, t1.created_at, window_start, window_end, t1._row_id] } └── StreamFilter { predicate: IsNotNull(t1.created_at) } - └── StreamTableScan { table: t1, columns: [t1.row_id, t1.uid, t1.v, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } { tables: [ StreamScan: 0 ] } + └── StreamTableScan { table: t1, columns: [t1.row_id, t1.uid, t1.v, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } { tables: [ StreamScan: 0 ] } ├── Upstream └── BatchPlanNode diff --git a/src/frontend/planner_test/tests/testdata/output/dynamic_filter.yaml b/src/frontend/planner_test/tests/testdata/output/dynamic_filter.yaml index 92db91cae829a..54bd99859b00f 100644 --- a/src/frontend/planner_test/tests/testdata/output/dynamic_filter.yaml +++ b/src/frontend/planner_test/tests/testdata/output/dynamic_filter.yaml @@ -16,14 +16,14 @@ stream_plan: |- StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } └─StreamDynamicFilter { predicate: (t1.v1 > max(max(t2.v2))), output: [t1.v1, t1._row_id] } - ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [max(max(t2.v2))] } └─StreamSimpleAgg { aggs: [max(max(t2.v2)), count] } └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [max(t2.v2), count] } └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: | With Top-1 on inner side TODO: currently not possible due to https://github.com/risingwavelabs/risingwave/issues/5764 @@ -54,9 +54,9 @@ stream_plan: |- StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } └─StreamDynamicFilter { predicate: (t1.v1 > max_v2.max), output: [t1.v1, t1._row_id] } - ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } - └─StreamTableScan { table: max_v2, columns: [max_v2.max], stream_scan_type: Backfill, pk: [], dist: Single } + └─StreamTableScan { table: max_v2, columns: [max_v2.max], stream_scan_type: ArrangementBackfill, pk: [], dist: Single } - name: | Output indices of Dynamic Filter TODO: currently implemented by adding a Project, https://github.com/risingwavelabs/risingwave/issues/3419 @@ -75,14 +75,14 @@ └─StreamProject { exprs: [t1.v1, t1._row_id] } └─StreamDynamicFilter { predicate: ($expr1 > max(max(t2.v2))), output: [t1.v1, $expr1, t1._row_id] } ├─StreamProject { exprs: [t1.v1, (t1.v1 + t1.v1) as $expr1, t1._row_id] } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [max(max(t2.v2))] } └─StreamSimpleAgg { aggs: [max(max(t2.v2)), count] } └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr2], aggs: [max(t2.v2), count] } └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr2] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: Ensure error on multiple rows on inner side before: - create_tables @@ -127,14 +127,14 @@ └─StreamExchange { dist: HashShard(t1.v1, t1._row_id) } └─StreamHashJoin { type: Inner, predicate: t1.v1 = max(max(t2.v2)), output: [t1.v1, max(max(t2.v2)), t1._row_id] } ├─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(max(max(t2.v2))) } └─StreamProject { exprs: [max(max(t2.v2))] } └─StreamSimpleAgg { aggs: [max(max(t2.v2)), count] } └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [max(t2.v2), count] } └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: Dynamic filter join on unequal types sql: | create table t1 (v1 int); @@ -151,14 +151,14 @@ └─StreamProject { exprs: [t1.v1, t1._row_id] } └─StreamDynamicFilter { predicate: ($expr1 > max(max(t2.v2))), output: [t1.v1, $expr1, t1._row_id] } ├─StreamProject { exprs: [t1.v1, t1.v1::Int64 as $expr1, t1._row_id] } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [max(max(t2.v2))] } └─StreamSimpleAgg { aggs: [max(max(t2.v2)), count] } └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr2], aggs: [max(t2.v2), count] } └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr2] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: Dynamic filter on semi join sql: | create table t1 (v1 int); @@ -167,14 +167,14 @@ stream_plan: |- StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } └─StreamDynamicFilter { predicate: (t1.v1 > max(max(t2.v2))), output: [t1.v1, t1._row_id] } - ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [max(max(t2.v2))] } └─StreamSimpleAgg { aggs: [max(max(t2.v2)), count] } └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [max(t2.v2), count] } └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: Complex expression on RHS of condition will still result in dynamic filter before: - create_tables @@ -189,14 +189,14 @@ stream_plan: |- StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } └─StreamDynamicFilter { predicate: (t1.v1 > $expr2), output: [t1.v1, t1._row_id] } - ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [(2:Int32 * max(max(t2.v2))) as $expr2] } └─StreamSimpleAgg { aggs: [max(max(t2.v2)), count] } └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [max(t2.v2), count] } └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: two predicate on the same subquery before: - create_tables @@ -217,7 +217,7 @@ StreamMaterialize { columns: [v1, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } └─StreamDynamicFilter { predicate: (t1.v1 < $expr2), output: [t1.v1, t1._row_id] } ├─StreamDynamicFilter { predicate: (t1.v1 > max(max(t2.v2))), output: [t1.v1, t1._row_id] } - │ ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamShare { id: 6 } │ └─StreamProject { exprs: [max(max(t2.v2))] } @@ -225,7 +225,7 @@ │ └─StreamExchange { dist: Single } │ └─StreamHashAgg { group_key: [$expr1], aggs: [max(t2.v2), count] } │ └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } - │ └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [(max(max(t2.v2)) + 5:Int32) as $expr2] } └─StreamShare { id: 6 } @@ -234,4 +234,4 @@ └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [max(t2.v2), count] } └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/emit_on_window_close.yaml b/src/frontend/planner_test/tests/testdata/output/emit_on_window_close.yaml index 1a81366c62b15..87ac0fca867b0 100644 --- a/src/frontend/planner_test/tests/testdata/output/emit_on_window_close.yaml +++ b/src/frontend/planner_test/tests/testdata/output/emit_on_window_close.yaml @@ -7,7 +7,7 @@ └─StreamProject { exprs: [t.v1, min(t.v2), count(distinct t.v3)] } └─StreamHashAgg { group_key: [t.v1], aggs: [min(t.v2), count(distinct t.v3), count] } └─StreamExchange { dist: HashShard(t.v1) } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } eowc_stream_error: |- Not supported: The query cannot be executed in Emit-On-Window-Close mode. HINT: Please make sure there is one and only one watermark column in GROUP BY @@ -93,14 +93,14 @@ └─StreamHashAgg [append_only] { group_key: [$expr1], aggs: [max(t.b), count], output_watermarks: [$expr1] } └─StreamExchange { dist: HashShard($expr1) } └─StreamProject { exprs: [TumbleStart(t.a, '01:00:00':Interval) as $expr1, t.b, t._row_id], output_watermarks: [$expr1] } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } eowc_stream_plan: |- StreamMaterialize { columns: [window_start, max], stream_key: [window_start], pk_columns: [window_start], pk_conflict: NoCheck, watermark_columns: [window_start] } └─StreamProject { exprs: [$expr1, max(t.b)], output_watermarks: [$expr1] } └─StreamHashAgg [append_only, eowc] { group_key: [$expr1], aggs: [max(t.b), count], output_watermarks: [$expr1] } └─StreamExchange { dist: HashShard($expr1) } └─StreamProject { exprs: [TumbleStart(t.a, '01:00:00':Interval) as $expr1, t.b, t._row_id], output_watermarks: [$expr1] } - └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } eowc_stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [window_start, max], stream_key: [window_start], pk_columns: [window_start], pk_conflict: NoCheck, watermark_columns: [window_start] } @@ -111,7 +111,7 @@ Fragment 1 StreamProject { exprs: [TumbleStart(t.a, '01:00:00':Interval) as $expr1, t.b, t._row_id], output_watermarks: [$expr1] } - └── StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -231,9 +231,9 @@ └─StreamHashJoin [interval, append_only] { type: Inner, predicate: s1.id = s2.id AND (s1.ts >= s2.ts) AND ($expr1 <= s2.ts), conditions_to_clean_left_state_table: (s1.ts >= s2.ts), conditions_to_clean_right_state_table: ($expr1 <= s2.ts), output_watermarks: [s1.ts, s2.ts], output: [s1.id, s1.value, s2.id, s2.value, s1.ts, s2.ts, s1._row_id, s2._row_id] } ├─StreamExchange { dist: HashShard(s1.id) } │ └─StreamProject { exprs: [s1.id, s1.value, s1.ts, (s1.ts - '00:01:00':Interval) as $expr1, s1._row_id], output_watermarks: [s1.ts, $expr1] } - │ └─StreamTableScan { table: s1, columns: [s1.id, s1.value, s1.ts, s1._row_id], stream_scan_type: Backfill, pk: [s1._row_id], dist: UpstreamHashShard(s1._row_id) } + │ └─StreamTableScan { table: s1, columns: [s1.id, s1.value, s1.ts, s1._row_id], stream_scan_type: ArrangementBackfill, pk: [s1._row_id], dist: UpstreamHashShard(s1._row_id) } └─StreamExchange { dist: HashShard(s2.id) } - └─StreamTableScan { table: s2, columns: [s2.id, s2.value, s2.ts, s2._row_id], stream_scan_type: Backfill, pk: [s2._row_id], dist: UpstreamHashShard(s2._row_id) } + └─StreamTableScan { table: s2, columns: [s2.id, s2.value, s2.ts, s2._row_id], stream_scan_type: ArrangementBackfill, pk: [s2._row_id], dist: UpstreamHashShard(s2._row_id) } eowc_stream_plan: |- StreamMaterialize { columns: [id1, value1, id2, value2, ts1, ts2, s1._row_id(hidden), s2._row_id(hidden), count], stream_key: [s1._row_id, s2._row_id, id1, value2], pk_columns: [s1._row_id, s2._row_id, id1, value2], pk_conflict: NoCheck } └─StreamEowcOverWindow { window_functions: [count() OVER(PARTITION BY s2.value ORDER BY s2.ts ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } @@ -242,6 +242,6 @@ └─StreamHashJoin [interval, append_only] { type: Inner, predicate: s1.id = s2.id AND (s1.ts >= s2.ts) AND ($expr1 <= s2.ts), conditions_to_clean_left_state_table: (s1.ts >= s2.ts), conditions_to_clean_right_state_table: ($expr1 <= s2.ts), output_watermarks: [s1.ts, s2.ts], output: [s1.id, s1.value, s2.id, s2.value, s1.ts, s2.ts, s1._row_id, s2._row_id] } ├─StreamExchange { dist: HashShard(s1.id) } │ └─StreamProject { exprs: [s1.id, s1.value, s1.ts, (s1.ts - '00:01:00':Interval) as $expr1, s1._row_id], output_watermarks: [s1.ts, $expr1] } - │ └─StreamTableScan { table: s1, columns: [s1.id, s1.value, s1.ts, s1._row_id], stream_scan_type: Backfill, pk: [s1._row_id], dist: UpstreamHashShard(s1._row_id) } + │ └─StreamTableScan { table: s1, columns: [s1.id, s1.value, s1.ts, s1._row_id], stream_scan_type: ArrangementBackfill, pk: [s1._row_id], dist: UpstreamHashShard(s1._row_id) } └─StreamExchange { dist: HashShard(s2.id) } - └─StreamTableScan { table: s2, columns: [s2.id, s2.value, s2.ts, s2._row_id], stream_scan_type: Backfill, pk: [s2._row_id], dist: UpstreamHashShard(s2._row_id) } + └─StreamTableScan { table: s2, columns: [s2.id, s2.value, s2.ts, s2._row_id], stream_scan_type: ArrangementBackfill, pk: [s2._row_id], dist: UpstreamHashShard(s2._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/except.yaml b/src/frontend/planner_test/tests/testdata/output/except.yaml index 45f22bfa967ed..227a0547f5dd1 100644 --- a/src/frontend/planner_test/tests/testdata/output/except.yaml +++ b/src/frontend/planner_test/tests/testdata/output/except.yaml @@ -29,9 +29,9 @@ └─StreamHashAgg { group_key: [t1.a, t1.b, t1.c], aggs: [count] } └─StreamHashJoin { type: LeftAnti, predicate: t1.a IS NOT DISTINCT FROM t2.a AND t1.b IS NOT DISTINCT FROM t2.b AND t1.c IS NOT DISTINCT FROM t2.c, output: all } ├─StreamExchange { dist: HashShard(t1.a, t1.b, t1.c) } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.a, t2.b, t2.c) } - └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a, b, c], stream_key: [a, b, c], pk_columns: [a, b, c], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -43,13 +43,13 @@ └── StreamExchange Hash([0, 1, 2]) from 2 Fragment 1 - StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 2 - StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -124,9 +124,9 @@ └─StreamExchange { dist: HashShard(t1.a) } └─StreamHashJoin { type: LeftAnti, predicate: t1.a IS NOT DISTINCT FROM t2.a AND t1.b IS NOT DISTINCT FROM t2.b AND t1.c IS NOT DISTINCT FROM t2.c, output: all } ├─StreamExchange { dist: HashShard(t1.a, t1.b, t1.c) } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } └─StreamExchange { dist: HashShard(t2.a, t2.b, t2.c) } - └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } + └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a, b, c], stream_key: [a], pk_columns: [a], pk_conflict: NoCheck } @@ -143,13 +143,13 @@ └── StreamExchange Hash([0, 1, 2]) from 3 Fragment 2 - StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } + StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } + StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode diff --git a/src/frontend/planner_test/tests/testdata/output/expr.yaml b/src/frontend/planner_test/tests/testdata/output/expr.yaml index 12d553f50b315..faeffca7d694a 100644 --- a/src/frontend/planner_test/tests/testdata/output/expr.yaml +++ b/src/frontend/planner_test/tests/testdata/output/expr.yaml @@ -186,7 +186,7 @@ stream_plan: |- StreamMaterialize { columns: [expr, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [Case((t.v1 = 1:Int32), 1:Decimal, (t.v1 = 2:Int32), 2:Decimal, 0.0:Decimal) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: case searched form without else sql: | create table t (v1 int); @@ -231,7 +231,7 @@ stream_plan: |- StreamMaterialize { columns: [expr, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [Case((t.v1 = 1:Int32), null:Int32, t.v1) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 int); select nullif(v1, 1, 2) from t; @@ -258,7 +258,7 @@ stream_plan: |- StreamMaterialize { columns: [expr, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [Coalesce(t.v1, 1:Int32) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 int); select coalesce(v1, 1.2) from t; @@ -292,7 +292,7 @@ stream_plan: |- StreamMaterialize { columns: [expr, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [ConcatWs(t.v1, '1':Varchar) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 varchar); select concat_ws(v1, 1.2) from t; @@ -326,7 +326,7 @@ stream_plan: |- StreamMaterialize { columns: [expr, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [ConcatWs('':Varchar, t.v1, t.v2::Varchar, t.v3::Varchar, '1':Varchar) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 float); select concat(v1) from t; @@ -488,7 +488,7 @@ stream_plan: |- StreamMaterialize { columns: [v1, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck, watermark_columns: [v1] } └─StreamDynamicFilter { predicate: (t.v1 >= now), output_watermarks: [t.v1], output: [t.v1, t._row_id], cleaned_by_watermark: true } - ├─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + ├─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: Broadcast } └─StreamNow { output: [now] } - name: now expression with proj @@ -502,7 +502,7 @@ stream_plan: |- StreamMaterialize { columns: [v1, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck, watermark_columns: [v1] } └─StreamDynamicFilter { predicate: (t.v1 >= $expr1), output_watermarks: [t.v1], output: [t.v1, t._row_id], cleaned_by_watermark: true } - ├─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + ├─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [SubtractWithTimeZone(now, '00:00:02':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } └─StreamNow { output: [now] } @@ -514,7 +514,7 @@ StreamMaterialize { columns: [v1, v2, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck, watermark_columns: [v2] } └─StreamDynamicFilter { predicate: (t.v2 >= now), output_watermarks: [t.v2], output: [t.v1, t.v2, t._row_id], cleaned_by_watermark: true } ├─StreamDynamicFilter { predicate: (t.v1 >= now), output_watermarks: [t.v1], output: [t.v1, t.v2, t._row_id], cleaned_by_watermark: true } - │ ├─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ ├─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamNow { output: [now] } └─StreamExchange { dist: Broadcast } @@ -535,7 +535,7 @@ ├─StreamProject { exprs: [t.v2, max(t.v1)] } │ └─StreamHashAgg { group_key: [t.v2], aggs: [max(t.v1), count] } │ └─StreamExchange { dist: HashShard(t.v2) } - │ └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: Broadcast } └─StreamNow { output: [now] } - name: forbid now in group by for stream diff --git a/src/frontend/planner_test/tests/testdata/output/grouping_sets.yaml b/src/frontend/planner_test/tests/testdata/output/grouping_sets.yaml index 04527818d6585..92c03ecd45384 100644 --- a/src/frontend/planner_test/tests/testdata/output/grouping_sets.yaml +++ b/src/frontend/planner_test/tests/testdata/output/grouping_sets.yaml @@ -16,7 +16,7 @@ └─StreamHashAgg { group_key: [items_sold.brand_expanded, items_sold.size_expanded, flag], aggs: [sum(items_sold.sales), count] } └─StreamExchange { dist: HashShard(items_sold.brand_expanded, items_sold.size_expanded, flag) } └─StreamExpand { column_subsets: [[items_sold.brand], [items_sold.size], []] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: with distinct sql: | create table items_sold (brand varchar, size varchar, sales int); @@ -36,7 +36,7 @@ └─StreamHashAgg { group_key: [items_sold.brand_expanded, items_sold.size_expanded, flag], aggs: [sum(distinct items_sold.sales), count] } └─StreamExchange { dist: HashShard(items_sold.brand_expanded, items_sold.size_expanded, flag) } └─StreamExpand { column_subsets: [[items_sold.brand], [items_sold.size], []] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: column pruning sql: | create table items_sold (c1 int, brand varchar, c2 int, size varchar, c3 int, sales int, c4 int,); @@ -56,7 +56,7 @@ └─StreamExchange { dist: HashShard(items_sold.size_expanded, items_sold.brand_expanded, flag) } └─StreamExpand { column_subsets: [[items_sold.size], [items_sold.brand], []] } └─StreamProject { exprs: [items_sold.size, items_sold.brand, items_sold.sales, items_sold._row_id] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: grouping agg calls sql: | create table items_sold (brand varchar, size varchar, sales int); @@ -76,7 +76,7 @@ └─StreamHashAgg { group_key: [items_sold.brand_expanded, items_sold.size_expanded, flag], aggs: [sum(items_sold.sales), count(distinct items_sold.sales), count] } └─StreamExchange { dist: HashShard(items_sold.brand_expanded, items_sold.size_expanded, flag) } └─StreamExpand { column_subsets: [[items_sold.brand], [items_sold.size], []] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: too many arguments for grouping error sql: | create table items_sold (brand varchar, size varchar, sales int); @@ -106,7 +106,7 @@ └─StreamHashAgg { group_key: [items_sold.brand_expanded, items_sold.size_expanded, flag], aggs: [sum(items_sold.sales), count] } └─StreamExchange { dist: HashShard(items_sold.brand_expanded, items_sold.size_expanded, flag) } └─StreamExpand { column_subsets: [[], [items_sold.brand], [items_sold.brand, items_sold.size]] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: rollup2 sql: | create table items_sold (brand varchar, size varchar, sales int); @@ -124,7 +124,7 @@ └─StreamHashAgg { group_key: [items_sold.brand_expanded, items_sold.size_expanded, flag], aggs: [sum(items_sold.sales), count] } └─StreamExchange { dist: HashShard(items_sold.brand_expanded, items_sold.size_expanded, flag) } └─StreamExpand { column_subsets: [[], [items_sold.brand, items_sold.size]] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: cube1 sql: | create table items_sold (brand varchar, size varchar, sales int); @@ -142,7 +142,7 @@ └─StreamHashAgg { group_key: [items_sold.brand_expanded, items_sold.size_expanded, flag], aggs: [sum(items_sold.sales), count] } └─StreamExchange { dist: HashShard(items_sold.brand_expanded, items_sold.size_expanded, flag) } └─StreamExpand { column_subsets: [[], [items_sold.brand], [items_sold.size], [items_sold.brand, items_sold.size]] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: cube2 sql: | create table items_sold (brand varchar, size varchar, sales int); @@ -160,7 +160,7 @@ └─StreamHashAgg { group_key: [items_sold.brand_expanded, items_sold.size_expanded, flag], aggs: [sum(items_sold.sales), count] } └─StreamExchange { dist: HashShard(items_sold.brand_expanded, items_sold.size_expanded, flag) } └─StreamExpand { column_subsets: [[], [items_sold.brand], [items_sold.size], [items_sold.size], [items_sold.brand, items_sold.size], [items_sold.brand, items_sold.size], [items_sold.size], [items_sold.brand, items_sold.size]] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: cube3 sql: | create table items_sold (brand varchar, size varchar, sales int); @@ -178,7 +178,7 @@ └─StreamHashAgg { group_key: [items_sold.brand_expanded, items_sold.size_expanded, flag], aggs: [sum(items_sold.sales), count] } └─StreamExchange { dist: HashShard(items_sold.brand_expanded, items_sold.size_expanded, flag) } └─StreamExpand { column_subsets: [[], [items_sold.brand, items_sold.size], [items_sold.size], [items_sold.brand, items_sold.size]] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold.size, items_sold.sales, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } - name: only one set in grouping sets sql: | create table items_sold (brand varchar, size varchar, sales int); @@ -196,4 +196,4 @@ └─StreamHashAgg { group_key: [items_sold.brand, 0:Int64], aggs: [sum(null:Int32), count] } └─StreamExchange { dist: HashShard(items_sold.brand, 0:Int64) } └─StreamProject { exprs: [items_sold.brand, null:Int32, 0:Int64, items_sold._row_id] } - └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold._row_id], stream_scan_type: Backfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } + └─StreamTableScan { table: items_sold, columns: [items_sold.brand, items_sold._row_id], stream_scan_type: ArrangementBackfill, pk: [items_sold._row_id], dist: UpstreamHashShard(items_sold._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/index_selection.yaml b/src/frontend/planner_test/tests/testdata/output/index_selection.yaml index 7aaef4b74eb03..c275c0ef16204 100644 --- a/src/frontend/planner_test/tests/testdata/output/index_selection.yaml +++ b/src/frontend/planner_test/tests/testdata/output/index_selection.yaml @@ -619,7 +619,7 @@ └─StreamExchange { dist: Single } └─StreamGroupTopN { order: [t1.a ASC], limit: 1, offset: 0, group_key: [$expr1] } └─StreamProject { exprs: [t1.a, Vnode(t1.a) as $expr1] } - └─StreamTableScan { table: t1, columns: [t1.a], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } + └─StreamTableScan { table: t1, columns: [t1.a], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } - sql: | create table t1 (a varchar, b int, c int, d int); create index idx on t1(a); diff --git a/src/frontend/planner_test/tests/testdata/output/intersect.yaml b/src/frontend/planner_test/tests/testdata/output/intersect.yaml index 776f663aa6b94..6b3b0bb882943 100644 --- a/src/frontend/planner_test/tests/testdata/output/intersect.yaml +++ b/src/frontend/planner_test/tests/testdata/output/intersect.yaml @@ -29,9 +29,9 @@ └─StreamHashAgg { group_key: [t1.a, t1.b, t1.c], aggs: [count] } └─StreamHashJoin { type: LeftSemi, predicate: t1.a IS NOT DISTINCT FROM t2.a AND t1.b IS NOT DISTINCT FROM t2.b AND t1.c IS NOT DISTINCT FROM t2.c, output: all } ├─StreamExchange { dist: HashShard(t1.a, t1.b, t1.c) } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.a, t2.b, t2.c) } - └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a, b, c], stream_key: [a, b, c], pk_columns: [a, b, c], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -43,13 +43,13 @@ └── StreamExchange Hash([0, 1, 2]) from 2 Fragment 1 - StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 2 - StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -124,9 +124,9 @@ └─StreamExchange { dist: HashShard(t1.a) } └─StreamHashJoin { type: LeftSemi, predicate: t1.a IS NOT DISTINCT FROM t2.a AND t1.b IS NOT DISTINCT FROM t2.b AND t1.c IS NOT DISTINCT FROM t2.c, output: all } ├─StreamExchange { dist: HashShard(t1.a, t1.b, t1.c) } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } └─StreamExchange { dist: HashShard(t2.a, t2.b, t2.c) } - └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } + └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a, b, c], stream_key: [a], pk_columns: [a], pk_conflict: NoCheck } @@ -143,13 +143,13 @@ └── StreamExchange Hash([0, 1, 2]) from 3 Fragment 2 - StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } + StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } + StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode diff --git a/src/frontend/planner_test/tests/testdata/output/join.yaml b/src/frontend/planner_test/tests/testdata/output/join.yaml index 66567d8a7c9a3..7b3e82c8843e6 100644 --- a/src/frontend/planner_test/tests/testdata/output/join.yaml +++ b/src/frontend/planner_test/tests/testdata/output/join.yaml @@ -18,11 +18,11 @@ └─StreamHashJoin { type: Inner, predicate: t1.v1 = t3.v5, output: [t1.v1, t1.v2, t2.v3, t2.v4, t3.v5, t3.v6, t1._row_id, t2._row_id, t3._row_id] } ├─StreamHashJoin { type: Inner, predicate: t1.v1 = t2.v3, output: [t1.v1, t1.v2, t2.v3, t2.v4, t1._row_id, t2._row_id] } │ ├─StreamExchange { dist: HashShard(t1.v1) } - │ │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: HashShard(t2.v3) } - │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } └─StreamExchange { dist: HashShard(t3.v5) } - └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } - name: self join sql: | create table t (v1 int, v2 int); @@ -37,9 +37,9 @@ └─StreamExchange { dist: HashShard(t.v1, t._row_id, t._row_id) } └─StreamHashJoin { type: Inner, predicate: t.v1 = t.v1, output: [t.v1, t.v1, t._row_id, t._row_id] } ├─StreamExchange { dist: HashShard(t.v1) } - │ └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t.v1) } - └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t1 (v1 int, v2 int); create table t2 (v1 int, v2 int); @@ -72,11 +72,11 @@ ├─StreamExchange { dist: HashShard(t2.v2) } │ └─StreamHashJoin { type: Inner, predicate: t1.v1 = t2.v1, output: [t1.v1, t1.v2, t2.v1, t2.v2, t1._row_id, t2._row_id] } │ ├─StreamExchange { dist: HashShard(t1.v1) } - │ │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: HashShard(t2.v1) } - │ └─StreamTableScan { table: t2, columns: [t2.v1, t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.v1, t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } └─StreamExchange { dist: HashShard(t3.v2) } - └─StreamTableScan { table: t3, columns: [t3.v1, t3.v2, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + └─StreamTableScan { table: t3, columns: [t3.v1, t3.v2, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } - sql: | create table t1 (v1 int, v2 int); create table t2 (v1 int, v2 int); @@ -99,9 +99,9 @@ └─StreamExchange { dist: HashShard(t1._row_id, t1.v1, t2._row_id) } └─StreamHashJoin { type: Inner, predicate: t1.v1 = t2.v1, output: [t1.v2, t2.v2, t1._row_id, t1.v1, t2._row_id] } ├─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.v1) } - └─StreamTableScan { table: t2, columns: [t2.v1, t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v1, t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - sql: | create table t1 (v1 int, v2 int); create table t2 (v1 int, v2 int); @@ -161,9 +161,9 @@ └─StreamExchange { dist: HashShard(i.x, i.t._row_id, i.t._row_id) } └─StreamHashJoin { type: Inner, predicate: i.x = i.x, output: [i.x, i.x, i.t._row_id, i.t._row_id] } ├─StreamExchange { dist: HashShard(i.x) } - │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: Backfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } + │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: ArrangementBackfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } └─StreamExchange { dist: HashShard(i.x) } - └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: Backfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } + └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: ArrangementBackfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } - name: Left & right has same SomeShard distribution. There should still be exchanges below hash join sql: | create table t(x int); @@ -178,9 +178,9 @@ └─StreamExchange { dist: HashShard(i.x, i.t._row_id, t._row_id) } └─StreamHashJoin { type: Inner, predicate: i.x = t.x, output: [i.x, t.x, i.t._row_id, t._row_id] } ├─StreamExchange { dist: HashShard(i.x) } - │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: Backfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } + │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: ArrangementBackfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: Left & right has same HashShard distribution. There should be no exchange below hash join sql: | create table t(x int); @@ -209,15 +209,15 @@ ├─StreamShare { id: 4 } │ └─StreamHashJoin { type: Inner, predicate: i.x = i.x, output: [i.x, i.t._row_id, i.t._row_id] } │ ├─StreamExchange { dist: HashShard(i.x) } - │ │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: Backfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } + │ │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: ArrangementBackfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } │ └─StreamExchange { dist: HashShard(i.x) } - │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: Backfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } + │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: ArrangementBackfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } └─StreamShare { id: 4 } └─StreamHashJoin { type: Inner, predicate: i.x = i.x, output: [i.x, i.t._row_id, i.t._row_id] } ├─StreamExchange { dist: HashShard(i.x) } - │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: Backfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } + │ └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: ArrangementBackfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } └─StreamExchange { dist: HashShard(i.x) } - └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: Backfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } + └─StreamTableScan { table: i, columns: [i.x, i.t._row_id], stream_scan_type: ArrangementBackfill, pk: [i.t._row_id], dist: UpstreamHashShard(i.x) } - name: Use lookup join sql: | create table t1 (v1 int, v2 int); @@ -538,9 +538,9 @@ └─StreamFilter { predicate: (IsNotNull(a._row_id) OR IsNotNull(b._row_id)) } └─StreamHashJoin { type: FullOuter, predicate: a.x = b.x, output: [a.x, b.x, a._row_id, b._row_id] } ├─StreamExchange { dist: HashShard(a.x) } - │ └─StreamTableScan { table: a, columns: [a.x, a._row_id], stream_scan_type: Backfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } + │ └─StreamTableScan { table: a, columns: [a.x, a._row_id], stream_scan_type: ArrangementBackfill, pk: [a._row_id], dist: UpstreamHashShard(a._row_id) } └─StreamExchange { dist: HashShard(b.x) } - └─StreamTableScan { table: b, columns: [b.x, b._row_id], stream_scan_type: Backfill, pk: [b._row_id], dist: UpstreamHashShard(b._row_id) } + └─StreamTableScan { table: b, columns: [b.x, b._row_id], stream_scan_type: ArrangementBackfill, pk: [b._row_id], dist: UpstreamHashShard(b._row_id) } - sql: | CREATE TABLE test (a INTEGER, b INTEGER); CREATE TABLE test2 (a INTEGER, c INTEGER); @@ -638,9 +638,9 @@ └─StreamHashJoin { type: Inner, predicate: $expr1 IS NOT DISTINCT FROM t2.v2, output: [t1.v1, t2.v2, t1._row_id, $expr1, t2._row_id] } ├─StreamExchange { dist: HashShard($expr1) } │ └─StreamProject { exprs: [t1.v1, t1.v1::Int64 as $expr1, t1._row_id] } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.v2) } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: Repeated columns in project should not interfere with join result (https://github.com/risingwavelabs/risingwave/issues/8216) sql: | create table t(x int); @@ -650,9 +650,9 @@ └─StreamProject { exprs: [t.x, t.x, t._row_id, t._row_id] } └─StreamHashJoin { type: Inner, predicate: t.x = t.x, output: [t.x, t._row_id, t._row_id] } ├─StreamExchange { dist: HashShard(t.x) } - │ └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: Choose correct distribution key in pk (https://github.com/risingwavelabs/risingwave/issues/7698) sql: | create table t (src int, dst int); @@ -664,11 +664,11 @@ ├─StreamExchange { dist: HashShard(t.src) } │ └─StreamHashJoin { type: Inner, predicate: t.dst = t.src, output: [t.src, t.dst, t.src, t.dst, t._row_id, t._row_id] } │ ├─StreamExchange { dist: HashShard(t.dst) } - │ │ └─StreamTableScan { table: t, columns: [t.src, t.dst, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ └─StreamTableScan { table: t, columns: [t.src, t.dst, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ └─StreamExchange { dist: HashShard(t.src) } - │ └─StreamTableScan { table: t, columns: [t.src, t.dst, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.src, t.dst, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t.dst) } - └─StreamTableScan { table: t, columns: [t.dst, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.dst, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [p1, p2, p3, t._row_id(hidden), t._row_id#1(hidden), t.src(hidden), t._row_id#2(hidden)], stream_key: [t._row_id, t._row_id#1, p2, t._row_id#2, t.src, p1], pk_columns: [t._row_id, t._row_id#1, p2, t._row_id#2, t.src, p1], pk_conflict: NoCheck } @@ -686,17 +686,17 @@ └── StreamExchange Hash([0]) from 4 Fragment 3 - StreamTableScan { table: t, columns: [t.src, t.dst, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } { tables: [ StreamScan: 8 ] } + StreamTableScan { table: t, columns: [t.src, t.dst, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } { tables: [ StreamScan: 8 ] } ├── Upstream └── BatchPlanNode Fragment 4 - StreamTableScan { table: t, columns: [t.src, t.dst, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } { tables: [ StreamScan: 9 ] } + StreamTableScan { table: t, columns: [t.src, t.dst, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } { tables: [ StreamScan: 9 ] } ├── Upstream └── BatchPlanNode Fragment 5 - StreamTableScan { table: t, columns: [t.dst, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } { tables: [ StreamScan: 10 ] } + StreamTableScan { table: t, columns: [t.dst, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } { tables: [ StreamScan: 10 ] } ├── Upstream └── BatchPlanNode diff --git a/src/frontend/planner_test/tests/testdata/output/join_ordering.yaml b/src/frontend/planner_test/tests/testdata/output/join_ordering.yaml index 0473e0f239ab3..59811c69f7b2a 100644 --- a/src/frontend/planner_test/tests/testdata/output/join_ordering.yaml +++ b/src/frontend/planner_test/tests/testdata/output/join_ordering.yaml @@ -41,13 +41,13 @@ │ ├─StreamExchange { dist: HashShard(t1.v2) } │ │ └─StreamHashJoin { type: Inner, predicate: t1.v1 = t2.v3, output: [t1.v1, t1.v2, t2.v3, t2.v4, t1._row_id, t2._row_id] } │ │ ├─StreamExchange { dist: HashShard(t1.v1) } - │ │ │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ │ └─StreamExchange { dist: HashShard(t2.v3) } - │ │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } │ └─StreamExchange { dist: HashShard(t3.v6) } - │ └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + │ └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } └─StreamExchange { dist: HashShard(t4.v7) } - └─StreamTableScan { table: t4, columns: [t4.v7, t4.v8, t4._row_id], stream_scan_type: Backfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } + └─StreamTableScan { table: t4, columns: [t4.v7, t4.v8, t4._row_id], stream_scan_type: ArrangementBackfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } - name: bushy tree join ordering sql: | create table t1 (v1 int, v2 int); @@ -87,15 +87,15 @@ ├─StreamExchange { dist: HashShard(t1.v2) } │ └─StreamHashJoin { type: Inner, predicate: t2.v3 = t1.v1, output: [t2.v3, t2.v4, t1.v1, t1.v2, t2._row_id, t1._row_id] } │ ├─StreamExchange { dist: HashShard(t2.v3) } - │ │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } │ └─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t3.v6) } └─StreamHashJoin { type: Inner, predicate: t4.v7 = t3.v5, output: [t4.v7, t4.v8, t3.v5, t3.v6, t4._row_id, t3._row_id] } ├─StreamExchange { dist: HashShard(t4.v7) } - │ └─StreamTableScan { table: t4, columns: [t4.v7, t4.v8, t4._row_id], stream_scan_type: Backfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } + │ └─StreamTableScan { table: t4, columns: [t4.v7, t4.v8, t4._row_id], stream_scan_type: ArrangementBackfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } └─StreamExchange { dist: HashShard(t3.v5) } - └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } - name: bushy tree join ordering manually sql: | set rw_enable_join_ordering = false; @@ -135,15 +135,15 @@ ├─StreamExchange { dist: HashShard(t1.v2) } │ └─StreamHashJoin { type: Inner, predicate: t1.v1 = t2.v3, output: [t1.v1, t1.v2, t2.v3, t2.v4, t1._row_id, t2._row_id] } │ ├─StreamExchange { dist: HashShard(t1.v1) } - │ │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: HashShard(t2.v3) } - │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } └─StreamExchange { dist: HashShard(t3.v6) } └─StreamHashJoin { type: Inner, predicate: t3.v5 = t4.v7, output: [t3.v5, t3.v6, t4.v7, t4.v8, t3._row_id, t4._row_id] } ├─StreamExchange { dist: HashShard(t3.v5) } - │ └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + │ └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } └─StreamExchange { dist: HashShard(t4.v7) } - └─StreamTableScan { table: t4, columns: [t4.v7, t4.v8, t4._row_id], stream_scan_type: Backfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } + └─StreamTableScan { table: t4, columns: [t4.v7, t4.v8, t4._row_id], stream_scan_type: ArrangementBackfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } - name: right deep tree join ordering manually sql: | set rw_enable_join_ordering = false; @@ -181,14 +181,14 @@ └─StreamExchange { dist: HashShard(t1.v1, t2.v4, t3.v5, t1._row_id, t2._row_id, t3._row_id, t4._row_id) } └─StreamHashJoin { type: Inner, predicate: t1.v1 = t2.v3, output: [t1.v1, t1.v2, t2.v3, t2.v4, t3.v5, t3.v6, t4.v7, t4.v8, t1._row_id, t2._row_id, t3._row_id, t4._row_id] } ├─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.v3) } └─StreamHashJoin { type: Inner, predicate: t2.v4 = t3.v6, output: [t2.v3, t2.v4, t3.v5, t3.v6, t4.v7, t4.v8, t2._row_id, t3._row_id, t4._row_id] } ├─StreamExchange { dist: HashShard(t2.v4) } - │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.v3, t2.v4, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } └─StreamExchange { dist: HashShard(t3.v6) } └─StreamHashJoin { type: Inner, predicate: t3.v5 = t4.v7, output: [t3.v5, t3.v6, t4.v7, t4.v8, t3._row_id, t4._row_id] } ├─StreamExchange { dist: HashShard(t3.v5) } - │ └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + │ └─StreamTableScan { table: t3, columns: [t3.v5, t3.v6, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } └─StreamExchange { dist: HashShard(t4.v7) } - └─StreamTableScan { table: t4, columns: [t4.v7, t4.v8, t4._row_id], stream_scan_type: Backfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } + └─StreamTableScan { table: t4, columns: [t4.v7, t4.v8, t4._row_id], stream_scan_type: ArrangementBackfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/lateral_subquery.yaml b/src/frontend/planner_test/tests/testdata/output/lateral_subquery.yaml index a3bd5e26f37fc..0541df05b9751 100644 --- a/src/frontend/planner_test/tests/testdata/output/lateral_subquery.yaml +++ b/src/frontend/planner_test/tests/testdata/output/lateral_subquery.yaml @@ -43,13 +43,13 @@ ├─StreamExchange { dist: HashShard(salesperson.id, max(all_sales.amount)) } │ └─StreamHashJoin { type: Inner, predicate: salesperson.id = all_sales.salesperson_id, output: [salesperson.id, salesperson.name, max(all_sales.amount), salesperson._row_id, all_sales.salesperson_id] } │ ├─StreamExchange { dist: HashShard(salesperson.id) } - │ │ └─StreamTableScan { table: salesperson, columns: [salesperson.id, salesperson.name, salesperson._row_id], stream_scan_type: Backfill, pk: [salesperson._row_id], dist: UpstreamHashShard(salesperson._row_id) } + │ │ └─StreamTableScan { table: salesperson, columns: [salesperson.id, salesperson.name, salesperson._row_id], stream_scan_type: ArrangementBackfill, pk: [salesperson._row_id], dist: UpstreamHashShard(salesperson._row_id) } │ └─StreamProject { exprs: [all_sales.salesperson_id, max(all_sales.amount)] } │ └─StreamHashAgg { group_key: [all_sales.salesperson_id], aggs: [max(all_sales.amount), count] } │ └─StreamExchange { dist: HashShard(all_sales.salesperson_id) } - │ └─StreamTableScan { table: all_sales, columns: [all_sales.salesperson_id, all_sales.amount, all_sales._row_id], stream_scan_type: Backfill, pk: [all_sales._row_id], dist: UpstreamHashShard(all_sales._row_id) } + │ └─StreamTableScan { table: all_sales, columns: [all_sales.salesperson_id, all_sales.amount, all_sales._row_id], stream_scan_type: ArrangementBackfill, pk: [all_sales._row_id], dist: UpstreamHashShard(all_sales._row_id) } └─StreamExchange { dist: HashShard(all_sales.salesperson_id, all_sales.amount) } - └─StreamTableScan { table: all_sales, columns: [all_sales.salesperson_id, all_sales.customer_name, all_sales.amount, all_sales._row_id], stream_scan_type: Backfill, pk: [all_sales._row_id], dist: UpstreamHashShard(all_sales._row_id) } + └─StreamTableScan { table: all_sales, columns: [all_sales.salesperson_id, all_sales.customer_name, all_sales.amount, all_sales._row_id], stream_scan_type: ArrangementBackfill, pk: [all_sales._row_id], dist: UpstreamHashShard(all_sales._row_id) } - name: lateral join 2 sql: | create table all_sales (salesperson_id int, customer_name varchar, amount int ); @@ -82,12 +82,12 @@ └─StreamExchange { dist: HashShard(salesperson._row_id, salesperson.id) } └─StreamHashJoin { type: Inner, predicate: salesperson.id IS NOT DISTINCT FROM all_sales.salesperson_id, output: [salesperson.name, all_sales.amount, all_sales.customer_name, salesperson._row_id, salesperson.id, all_sales.salesperson_id] } ├─StreamExchange { dist: HashShard(salesperson.id) } - │ └─StreamTableScan { table: salesperson, columns: [salesperson.id, salesperson.name, salesperson._row_id], stream_scan_type: Backfill, pk: [salesperson._row_id], dist: UpstreamHashShard(salesperson._row_id) } + │ └─StreamTableScan { table: salesperson, columns: [salesperson.id, salesperson.name, salesperson._row_id], stream_scan_type: ArrangementBackfill, pk: [salesperson._row_id], dist: UpstreamHashShard(salesperson._row_id) } └─StreamGroupTopN { order: [all_sales.amount DESC], limit: 1, offset: 0, group_key: [all_sales.salesperson_id] } └─StreamExchange { dist: HashShard(all_sales.salesperson_id) } └─StreamProject { exprs: [all_sales.salesperson_id, all_sales.amount, all_sales.customer_name, all_sales._row_id] } └─StreamFilter { predicate: IsNotNull(all_sales.salesperson_id) } - └─StreamTableScan { table: all_sales, columns: [all_sales.salesperson_id, all_sales.customer_name, all_sales.amount, all_sales._row_id], stream_scan_type: Backfill, pk: [all_sales._row_id], dist: UpstreamHashShard(all_sales._row_id) } + └─StreamTableScan { table: all_sales, columns: [all_sales.salesperson_id, all_sales.customer_name, all_sales.amount, all_sales._row_id], stream_scan_type: ArrangementBackfill, pk: [all_sales._row_id], dist: UpstreamHashShard(all_sales._row_id) } - name: lateral join 2 (left join) sql: | create table all_sales (salesperson_id int, customer_name varchar, amount int ); @@ -120,12 +120,12 @@ └─StreamExchange { dist: HashShard(salesperson._row_id, salesperson.id) } └─StreamHashJoin { type: LeftOuter, predicate: salesperson.id IS NOT DISTINCT FROM all_sales.salesperson_id, output: [salesperson.name, all_sales.amount, all_sales.customer_name, salesperson._row_id, salesperson.id, all_sales.salesperson_id] } ├─StreamExchange { dist: HashShard(salesperson.id) } - │ └─StreamTableScan { table: salesperson, columns: [salesperson.id, salesperson.name, salesperson._row_id], stream_scan_type: Backfill, pk: [salesperson._row_id], dist: UpstreamHashShard(salesperson._row_id) } + │ └─StreamTableScan { table: salesperson, columns: [salesperson.id, salesperson.name, salesperson._row_id], stream_scan_type: ArrangementBackfill, pk: [salesperson._row_id], dist: UpstreamHashShard(salesperson._row_id) } └─StreamGroupTopN { order: [all_sales.amount DESC], limit: 1, offset: 0, group_key: [all_sales.salesperson_id] } └─StreamExchange { dist: HashShard(all_sales.salesperson_id) } └─StreamProject { exprs: [all_sales.salesperson_id, all_sales.amount, all_sales.customer_name, all_sales._row_id] } └─StreamFilter { predicate: IsNotNull(all_sales.salesperson_id) } - └─StreamTableScan { table: all_sales, columns: [all_sales.salesperson_id, all_sales.customer_name, all_sales.amount, all_sales._row_id], stream_scan_type: Backfill, pk: [all_sales._row_id], dist: UpstreamHashShard(all_sales._row_id) } + └─StreamTableScan { table: all_sales, columns: [all_sales.salesperson_id, all_sales.customer_name, all_sales.amount, all_sales._row_id], stream_scan_type: ArrangementBackfill, pk: [all_sales._row_id], dist: UpstreamHashShard(all_sales._row_id) } - name: lateral join 2 (right join) should throw an error sql: | create table all_sales (salesperson_id int, customer_name varchar, amount int ); @@ -162,12 +162,12 @@ └─StreamExchange { dist: HashShard(t.arr, t._row_id, projected_row_id) } └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, Unnest($0), t._row_id, t.arr, projected_row_id] } ├─StreamExchange { dist: HashShard(t.arr) } - │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.arr] } └─StreamHashAgg { group_key: [t.arr], aggs: [count] } └─StreamExchange { dist: HashShard(t.arr) } - └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: https://github.com/risingwavelabs/risingwave/issues/12298 sql: | create table t1(c varchar, n varchar, id varchar, d varchar); @@ -190,7 +190,7 @@ │ ├─StreamExchange { dist: HashShard(t1.id) } │ │ └─StreamProject { exprs: [t1.n, t1.id, t1._row_id] } │ │ └─StreamFilter { predicate: (t1.c = 'abc':Varchar) } - │ │ └─StreamTableScan { table: t1, columns: [t1.n, t1.id, t1._row_id, t1.c], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ └─StreamTableScan { table: t1, columns: [t1.n, t1.id, t1._row_id, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: HashShard(Unnest(Case(($1 <> '':Varchar), ArrayAppend(StringToArray(Trim($1, ',':Varchar), ',':Varchar), $3), Array($3)))) } │ └─StreamProjectSet { select_list: [$0, $1, $2, $3, Unnest(Case(($1 <> '':Varchar), ArrayAppend(StringToArray(Trim($1, ',':Varchar), ',':Varchar), $3), Array($3)))] } │ └─StreamProject { exprs: [t2.p, t2.p, t2.d, t2.d] } @@ -198,8 +198,8 @@ │ └─StreamExchange { dist: HashShard(t2.p, t2.p, t2.d, t2.d) } │ └─StreamProject { exprs: [t2.p, t2.p, t2.d, t2.d, t2._row_id] } │ └─StreamFilter { predicate: (t2.c = 'abc':Varchar) } - │ └─StreamTableScan { table: t2, columns: [t2.p, t2.p, t2.d, t2.d, t2._row_id, t2.c], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.p, t2.p, t2.d, t2.d, t2._row_id, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } └─StreamExchange { dist: HashShard(t2.p, t2.d) } └─StreamProject { exprs: [t2.p, t2.d, t2._row_id] } └─StreamFilter { predicate: (t2.c = 'abc':Varchar) } - └─StreamTableScan { table: t2, columns: [t2.p, t2.d, t2._row_id, t2.c], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.p, t2.d, t2._row_id, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/limit.yaml b/src/frontend/planner_test/tests/testdata/output/limit.yaml index d4535cff253a1..f41010edbad51 100644 --- a/src/frontend/planner_test/tests/testdata/output/limit.yaml +++ b/src/frontend/planner_test/tests/testdata/output/limit.yaml @@ -135,7 +135,7 @@ └─StreamSimpleAgg { aggs: [sum0(count), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [count] } - └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (a int); select count(*) from t order by 1 limit 1; @@ -152,7 +152,7 @@ └─StreamSimpleAgg { aggs: [sum0(count), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [count] } - └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (a int primary key); select * from t limit 1; @@ -168,4 +168,4 @@ └─StreamExchange { dist: Single } └─StreamGroupTopN { order: [t.a ASC], limit: 1, offset: 0, group_key: [$expr1] } └─StreamProject { exprs: [t.a, Vnode(t.a) as $expr1] } - └─StreamTableScan { table: t, columns: [t.a], stream_scan_type: Backfill, pk: [t.a], dist: UpstreamHashShard(t.a) } + └─StreamTableScan { table: t, columns: [t.a], stream_scan_type: ArrangementBackfill, pk: [t.a], dist: UpstreamHashShard(t.a) } diff --git a/src/frontend/planner_test/tests/testdata/output/mv_column_name.yaml b/src/frontend/planner_test/tests/testdata/output/mv_column_name.yaml index b68098706f772..bf4f3c31fd258 100644 --- a/src/frontend/planner_test/tests/testdata/output/mv_column_name.yaml +++ b/src/frontend/planner_test/tests/testdata/output/mv_column_name.yaml @@ -16,7 +16,7 @@ stream_plan: |- StreamMaterialize { columns: [is_null, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [IsNull(t.a) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: is_true with alias sql: | create table t (a bool); @@ -24,7 +24,7 @@ stream_plan: |- StreamMaterialize { columns: [a, is_true, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [t.a, IsTrue(t.a) as $expr1, t._row_id] } - └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: column name specified more than once sql: | create table t1 (a int); @@ -68,4 +68,4 @@ └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [count, max(t.a)] } └─StreamProject { exprs: [t.a, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/mv_on_mv.yaml b/src/frontend/planner_test/tests/testdata/output/mv_on_mv.yaml index 578a5f2c07fdc..589b00bd16a39 100644 --- a/src/frontend/planner_test/tests/testdata/output/mv_on_mv.yaml +++ b/src/frontend/planner_test/tests/testdata/output/mv_on_mv.yaml @@ -15,6 +15,6 @@ └─StreamExchange { dist: HashShard(m1.v1, m1.t1._row_id, m2.t1._row_id) } └─StreamHashJoin { type: Inner, predicate: m1.v1 = m2.v1, output: [m1.v1, m1.v2, m2.v1, m2.v2, m1.t1._row_id, m2.t1._row_id] } ├─StreamExchange { dist: HashShard(m1.v1) } - │ └─StreamTableScan { table: m1, columns: [m1.v1, m1.v2, m1.t1._row_id], stream_scan_type: Backfill, pk: [m1.t1._row_id], dist: UpstreamHashShard(m1.t1._row_id) } + │ └─StreamTableScan { table: m1, columns: [m1.v1, m1.v2, m1.t1._row_id], stream_scan_type: ArrangementBackfill, pk: [m1.t1._row_id], dist: UpstreamHashShard(m1.t1._row_id) } └─StreamExchange { dist: HashShard(m2.v1) } - └─StreamTableScan { table: m2, columns: [m2.v1, m2.v2, m2.t1._row_id], stream_scan_type: Backfill, pk: [m2.t1._row_id], dist: UpstreamHashShard(m2.t1._row_id) } + └─StreamTableScan { table: m2, columns: [m2.v1, m2.v2, m2.t1._row_id], stream_scan_type: ArrangementBackfill, pk: [m2.t1._row_id], dist: UpstreamHashShard(m2.t1._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/nexmark.yaml b/src/frontend/planner_test/tests/testdata/output/nexmark.yaml index 36e6ba4a16a77..49223e85e62f5 100644 --- a/src/frontend/planner_test/tests/testdata/output/nexmark.yaml +++ b/src/frontend/planner_test/tests/testdata/output/nexmark.yaml @@ -51,26 +51,19 @@ sink_plan: |- StreamSink { type: append-only, columns: [auction, bidder, price, date_time, bid._row_id(hidden)] } └─StreamExchange { dist: Single } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_plan: |- StreamMaterialize { columns: [auction, bidder, price, date_time, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 - StreamMaterialize { columns: [auction, bidder, price, date_time, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } - ├── tables: [ Materialize: 4294967294 ] - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamMaterialize { columns: [auction, bidder, price, date_time, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 0 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 0 { columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ auction, bidder, price, date_time, bid._row_id ], primary key: [ $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 4 ], read pk prefix len hint: 1 } @@ -92,28 +85,21 @@ StreamSink { type: append-only, columns: [auction, bidder, price, date_time, bid._row_id(hidden)] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [bid.auction, bid.bidder, (0.908:Decimal * bid.price::Decimal) as $expr1, bid.date_time, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_plan: |- StreamMaterialize { columns: [auction, bidder, price, date_time, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [bid.auction, bid.bidder, (0.908:Decimal * bid.price::Decimal) as $expr1, bid.date_time, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 - StreamMaterialize { columns: [auction, bidder, price, date_time, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } - ├── tables: [ Materialize: 4294967294 ] + StreamMaterialize { columns: [auction, bidder, price, date_time, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [bid.auction, bid.bidder, (0.908:Decimal * bid.price::Decimal) as $expr1, bid.date_time, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 0 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 0 { columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ auction, bidder, price, date_time, bid._row_id ], primary key: [ $4 ASC ], value indices: [ 0, 1, 2, 3, 4 ], distribution key: [ 4 ], read pk prefix len hint: 1 } @@ -129,16 +115,16 @@ StreamSink { type: append-only, columns: [auction, price, bid._row_id(hidden)] } └─StreamExchange { dist: Single } └─StreamFilter { predicate: (((((bid.auction = 1007:Int32) OR (bid.auction = 1020:Int32)) OR (bid.auction = 2001:Int32)) OR (bid.auction = 2019:Int32)) OR (bid.auction = 2087:Int32)) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_plan: |- StreamMaterialize { columns: [auction, price, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } └─StreamFilter { predicate: (((((bid.auction = 1007:Int32) OR (bid.auction = 1020:Int32)) OR (bid.auction = 2001:Int32)) OR (bid.auction = 2019:Int32)) OR (bid.auction = 2087:Int32)) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, price, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamFilter { predicate: (((((bid.auction = 1007:Int32) OR (bid.auction = 1020:Int32)) OR (bid.auction = 2001:Int32)) OR (bid.auction = 2019:Int32)) OR (bid.auction = 2087:Int32)) } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 0 ] ├── Upstream └── BatchPlanNode @@ -177,10 +163,10 @@ ├─StreamExchange { dist: HashShard(auction.seller) } │ └─StreamProject { exprs: [auction.id, auction.seller] } │ └─StreamFilter { predicate: (auction.category = 10:Int32) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.seller, auction.category], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.seller, auction.category], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamExchange { dist: HashShard(person.id) } └─StreamFilter { predicate: (((person.state = 'or':Varchar) OR (person.state = 'id':Varchar)) OR (person.state = 'ca':Varchar)) } - └─StreamTableScan { table: person, columns: [person.id, person.name, person.city, person.state], stream_scan_type: Backfill, pk: [person.id], dist: UpstreamHashShard(person.id) } + └─StreamTableScan { table: person, columns: [person.id, person.name, person.city, person.state], stream_scan_type: ArrangementBackfill, pk: [person.id], dist: UpstreamHashShard(person.id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [name, city, state, id, auction.seller(hidden), person.id(hidden)], stream_key: [id, auction.seller], pk_columns: [id, auction.seller], pk_conflict: NoCheck } @@ -196,14 +182,14 @@ Fragment 2 StreamProject { exprs: [auction.id, auction.seller] } └── StreamFilter { predicate: (auction.category = 10:Int32) } - └── StreamTableScan { table: auction, columns: [auction.id, auction.seller, auction.category], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + └── StreamTableScan { table: auction, columns: [auction.id, auction.seller, auction.category], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode Fragment 3 StreamFilter { predicate: (((person.state = 'or':Varchar) OR (person.state = 'id':Varchar)) OR (person.state = 'ca':Varchar)) } - └── StreamTableScan { table: person, columns: [person.id, person.name, person.city, person.state], stream_scan_type: Backfill, pk: [person.id], dist: UpstreamHashShard(person.id) } + └── StreamTableScan { table: person, columns: [person.id, person.name, person.city, person.state], stream_scan_type: ArrangementBackfill, pk: [person.id], dist: UpstreamHashShard(person.id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode @@ -275,9 +261,9 @@ └─StreamFilter { predicate: (bid.date_time >= auction.date_time) AND (bid.date_time <= auction.expires) } └─StreamHashJoin { type: Inner, predicate: auction.id = bid.auction, output: all } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.category], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.category], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [category, avg], stream_key: [category], pk_columns: [category], pk_conflict: NoCheck } @@ -299,23 +285,18 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.category], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.category], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 7 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 8 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ auction_category, sum(max(bid_price)), count(max(bid_price)), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ auction_category, sum(max(bid_price)), count(max(bid_price)), count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 ├── columns: [ auction_id, auction_category, bid_price, bid__row_id ] @@ -326,12 +307,7 @@ Table 2 { columns: [ auction_id, auction_category, max(bid_price), count ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 2 } - Table 3 - ├── columns: [ auction_id, auction_date_time, auction_expires, auction_category ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 0, 1, 2, 3 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 3 { columns: [ auction_id, auction_date_time, auction_expires, auction_category ], primary key: [ $0 ASC ], value indices: [ 0, 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 4 { columns: [ auction_id, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -347,13 +323,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 8 - ├── columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 8 { columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ category, avg ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -434,7 +404,7 @@ │ └─StreamExchange { dist: HashShard(bid.auction, window_start) } │ └─StreamHopWindow { time_col: bid.date_time, slide: 00:00:02, size: 00:00:10, output: [bid.auction, window_start, bid._row_id] } │ └─StreamFilter { predicate: IsNotNull(bid.date_time) } - │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } └─StreamProject { exprs: [window_start, max(count)] } └─StreamHashAgg { group_key: [window_start], aggs: [max(count), count] } └─StreamExchange { dist: HashShard(window_start) } @@ -443,7 +413,7 @@ └─StreamExchange { dist: HashShard(bid.auction, window_start) } └─StreamHopWindow { time_col: bid.date_time, slide: 00:00:02, size: 00:00:10, output: [bid.auction, window_start, bid._row_id] } └─StreamFilter { predicate: IsNotNull(bid.date_time) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, num, window_start(hidden), window_start#1(hidden)], stream_key: [auction, window_start], pk_columns: [auction, window_start], pk_conflict: NoCheck } @@ -468,7 +438,7 @@ Fragment 3 StreamHopWindow { time_col: bid.date_time, slide: 00:00:02, size: 00:00:10, output: [bid.auction, window_start, bid._row_id] } └── StreamFilter { predicate: IsNotNull(bid.date_time) } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode @@ -533,9 +503,9 @@ └─StreamFilter { predicate: (bid.date_time >= auction.date_time) AND (bid.date_time <= auction.expires) } └─StreamHashJoin { type: Inner, predicate: auction.id = bid.auction, output: all } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.seller], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.seller], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [seller, avg, auction.id(hidden)], stream_key: [auction.id, seller], pk_columns: [auction.id, seller], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -557,12 +527,12 @@ └── StreamExchange Hash([0]) from 4 Fragment 3 - StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.seller], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } { tables: [ StreamScan: 6 ] } + StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.seller], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } { tables: [ StreamScan: 6 ] } ├── Upstream └── BatchPlanNode Fragment 4 - StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 7 ] } + StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 7 ] } ├── Upstream └── BatchPlanNode @@ -625,9 +595,9 @@ └─StreamFilter { predicate: (bid.date_time >= auction.date_time) AND (bid.date_time <= auction.expires) } └─StreamHashJoin { type: Inner, predicate: auction.id = bid.auction, output: all } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.seller], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.seller], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [seller, avg, auction.id(hidden)], stream_key: [auction.id, seller], pk_columns: [auction.id, seller], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -649,12 +619,12 @@ └── StreamExchange Hash([0]) from 4 Fragment 3 - StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.seller], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } { tables: [ StreamScan: 6 ] } + StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires, auction.seller], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } { tables: [ StreamScan: 6 ] } ├── Upstream └── BatchPlanNode Fragment 4 - StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 7 ] } + StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 7 ] } ├── Upstream └── BatchPlanNode @@ -716,13 +686,13 @@ └─StreamFilter { predicate: (bid.date_time >= $expr2) AND (bid.date_time <= $expr1) } └─StreamHashJoin { type: Inner, predicate: bid.price = max(bid.price), output: all } ├─StreamExchange { dist: HashShard(bid.price) } - │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } └─StreamExchange { dist: HashShard(max(bid.price)) } └─StreamProject { exprs: [$expr1, max(bid.price), ($expr1 - '00:00:10':Interval) as $expr2] } └─StreamHashAgg [append_only] { group_key: [$expr1], aggs: [max(bid.price), count] } └─StreamExchange { dist: HashShard($expr1) } └─StreamProject { exprs: [(TumbleStart(bid.date_time, '00:00:10':Interval) + '00:00:10':Interval) as $expr1, bid.price, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, price, bidder, date_time, bid._row_id(hidden), $expr1(hidden)], stream_key: [bid._row_id, $expr1, price], pk_columns: [bid._row_id, $expr1, price], pk_conflict: NoCheck } @@ -734,7 +704,7 @@ └── StreamExchange Hash([1]) from 2 Fragment 1 - StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode @@ -746,7 +716,8 @@ Fragment 3 StreamProject { exprs: [(TumbleStart(bid.date_time, '00:00:10':Interval) + '00:00:10':Interval) as $expr1, bid.price, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 6 ] } + └── StreamTableScan { table: bid, columns: [bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -829,13 +800,13 @@ │ └─StreamHashAgg { group_key: [person.id, $expr1, $expr2], aggs: [internal_last_seen_value(person.name), count] } │ └─StreamProject { exprs: [person.id, person.name, $expr1, ($expr1 + '00:00:10':Interval) as $expr2] } │ └─StreamProject { exprs: [person.id, person.name, person.date_time, TumbleStart(person.date_time, '00:00:10':Interval) as $expr1] } - │ └─StreamTableScan { table: person, columns: [person.id, person.name, person.date_time], stream_scan_type: Backfill, pk: [person.id], dist: UpstreamHashShard(person.id) } + │ └─StreamTableScan { table: person, columns: [person.id, person.name, person.date_time], stream_scan_type: ArrangementBackfill, pk: [person.id], dist: UpstreamHashShard(person.id) } └─StreamProject { exprs: [auction.seller, $expr3, $expr4] } └─StreamHashAgg { group_key: [auction.seller, $expr3, $expr4], aggs: [count] } └─StreamExchange { dist: HashShard(auction.seller, $expr3, $expr4) } └─StreamProject { exprs: [auction.seller, $expr3, ($expr3 + '00:00:10':Interval) as $expr4, auction.id] } └─StreamProject { exprs: [auction.date_time, auction.seller, TumbleStart(auction.date_time, '00:00:10':Interval) as $expr3, auction.id] } - └─StreamTableScan { table: auction, columns: [auction.date_time, auction.seller, auction.id], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + └─StreamTableScan { table: auction, columns: [auction.date_time, auction.seller, auction.id], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [id, name, starttime, $expr2(hidden), auction.seller(hidden), $expr3(hidden), $expr4(hidden)], stream_key: [id, starttime, $expr2], pk_columns: [id, starttime, $expr2], pk_conflict: NoCheck } @@ -855,14 +826,16 @@ └── StreamHashAgg { group_key: [person.id, $expr1, $expr2], aggs: [internal_last_seen_value(person.name), count] } { tables: [ HashAggState: 4 ] } └── StreamProject { exprs: [person.id, person.name, $expr1, ($expr1 + '00:00:10':Interval) as $expr2] } └── StreamProject { exprs: [person.id, person.name, person.date_time, TumbleStart(person.date_time, '00:00:10':Interval) as $expr1] } - └── StreamTableScan { table: person, columns: [person.id, person.name, person.date_time], stream_scan_type: Backfill, pk: [person.id], dist: UpstreamHashShard(person.id) } { tables: [ StreamScan: 5 ] } + └── StreamTableScan { table: person, columns: [person.id, person.name, person.date_time], stream_scan_type: ArrangementBackfill, pk: [person.id], dist: UpstreamHashShard(person.id) } + ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 StreamProject { exprs: [auction.seller, $expr3, ($expr3 + '00:00:10':Interval) as $expr4, auction.id] } └── StreamProject { exprs: [auction.date_time, auction.seller, TumbleStart(auction.date_time, '00:00:10':Interval) as $expr3, auction.id] } - └── StreamTableScan { table: auction, columns: [auction.date_time, auction.seller, auction.id], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } { tables: [ StreamScan: 7 ] } + └── StreamTableScan { table: auction, columns: [auction.date_time, auction.seller, auction.id], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + ├── tables: [ StreamScan: 7 ] ├── Upstream └── BatchPlanNode @@ -932,9 +905,9 @@ └─StreamFilter { predicate: (bid.date_time >= auction.date_time) AND (bid.date_time <= auction.expires) } └─StreamHashJoin { type: Inner, predicate: auction.id = bid.auction, output: all } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [id, item_name, description, initial_bid, reserve, date_time, expires, seller, category, auction, bidder, price, bid_date_time, bid._row_id(hidden)], stream_key: [id], pk_columns: [id], pk_conflict: NoCheck } @@ -946,13 +919,13 @@ └── StreamExchange Hash([0]) from 2 Fragment 1 - StreamTableScan { table: auction, columns: [auction.id, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + StreamTableScan { table: auction, columns: [auction.id, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 2 - StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 6 ] } + StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 6 ] } ├── Upstream └── BatchPlanNode @@ -1000,28 +973,22 @@ StreamSink { type: append-only, columns: [auction, bidder, price, date_time, date, time, bid._row_id(hidden)] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.date_time, ToChar(bid.date_time, 'YYYY-MM-DD':Varchar) as $expr1, ToChar(bid.date_time, 'HH:MI':Varchar) as $expr2, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_plan: |- StreamMaterialize { columns: [auction, bidder, price, date_time, date, time, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.date_time, ToChar(bid.date_time, 'YYYY-MM-DD':Varchar) as $expr1, ToChar(bid.date_time, 'HH:MI':Varchar) as $expr2, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, date_time, date, time, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } ├── tables: [ Materialize: 4294967294 ] └── StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.date_time, ToChar(bid.date_time, 'YYYY-MM-DD':Varchar) as $expr1, ToChar(bid.date_time, 'HH:MI':Varchar) as $expr2, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 0 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 0 { columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 ├── columns: [ auction, bidder, price, date_time, date, time, bid._row_id ] @@ -1068,7 +1035,7 @@ └─StreamExchange { dist: HashShard(bid.bidder, $expr1, $expr2) } └─StreamProject { exprs: [bid.bidder, $expr1, AddWithTimeZone($expr1, '00:00:10':Interval, 'UTC':Varchar) as $expr2, bid._row_id], output_watermarks: [$expr1, $expr2] } └─StreamProject { exprs: [bid.bidder, bid.p_time, TumbleStart(bid.p_time, '00:00:10':Interval) as $expr1, bid._row_id], output_watermarks: [bid.p_time, $expr1] } - └─StreamTableScan { table: bid, columns: [bid.bidder, bid.p_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.bidder, bid.p_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [bidder, bid_count, window_start, window_end], stream_key: [bidder, window_start, window_end], pk_columns: [bidder, window_start, window_end], pk_conflict: NoCheck, watermark_columns: [window_start, window_end] } @@ -1080,7 +1047,7 @@ Fragment 1 StreamProject { exprs: [bid.bidder, $expr1, AddWithTimeZone($expr1, '00:00:10':Interval, 'UTC':Varchar) as $expr2, bid._row_id], output_watermarks: [$expr1, $expr2] } └── StreamProject { exprs: [bid.bidder, bid.p_time, TumbleStart(bid.p_time, '00:00:10':Interval) as $expr1, bid._row_id], output_watermarks: [bid.p_time, $expr1] } - └── StreamTableScan { table: bid, columns: [bid.bidder, bid.p_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 1 ] } + └── StreamTableScan { table: bid, columns: [bid.bidder, bid.p_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 1 ] } ├── Upstream └── BatchPlanNode @@ -1104,7 +1071,7 @@ └─StreamTemporalJoin { type: Inner, predicate: $expr1 = side_input.key, output: [bid.auction, bid.bidder, bid.price, bid.date_time, side_input.value, bid._row_id, $expr1, side_input.key] } ├─StreamExchange { dist: HashShard($expr1) } │ └─StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.date_time, (bid.auction % 10000:Int32) as $expr1, bid._row_id] } - │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(side_input.key) } └─StreamTableScan { table: side_input, columns: [side_input.key, side_input.value], stream_scan_type: UpstreamOnly, pk: [side_input.key], dist: UpstreamHashShard(side_input.key) } stream_plan: |- @@ -1113,7 +1080,7 @@ └─StreamTemporalJoin { type: Inner, predicate: $expr1 = side_input.key, output: [bid.auction, bid.bidder, bid.price, bid.date_time, side_input.value, bid._row_id, $expr1, side_input.key] } ├─StreamExchange { dist: HashShard($expr1) } │ └─StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.date_time, (bid.auction % 10000:Int32) as $expr1, bid._row_id] } - │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(side_input.key) } └─StreamTableScan { table: side_input, columns: [side_input.key, side_input.value], stream_scan_type: UpstreamOnly, pk: [side_input.key], dist: UpstreamHashShard(side_input.key) } stream_dist_plan: |+ @@ -1129,7 +1096,8 @@ Fragment 2 StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.date_time, (bid.auction % 10000:Int32) as $expr1, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 0 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + ├── tables: [ StreamScan: 0 ] ├── Upstream └── BatchPlanNode @@ -1184,18 +1152,18 @@ └─StreamExchange { dist: Single } └─StreamProject { exprs: [bid.auction, bid.bidder, (0.908:Decimal * bid.price::Decimal) as $expr1, Case(((Extract('HOUR':Varchar, bid.date_time) >= 8:Decimal) AND (Extract('HOUR':Varchar, bid.date_time) <= 18:Decimal)), 'dayTime':Varchar, ((Extract('HOUR':Varchar, bid.date_time) <= 6:Decimal) OR (Extract('HOUR':Varchar, bid.date_time) >= 20:Decimal)), 'nightTime':Varchar, 'otherTime':Varchar) as $expr2, bid.date_time, bid.extra, bid._row_id] } └─StreamFilter { predicate: ((0.908:Decimal * bid.price::Decimal) > 1000000:Decimal) AND ((0.908:Decimal * bid.price::Decimal) < 50000000:Decimal) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid.extra, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid.extra, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_plan: |- StreamMaterialize { columns: [auction, bidder, price, bidtimetype, date_time, extra, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [bid.auction, bid.bidder, (0.908:Decimal * bid.price::Decimal) as $expr1, Case(((Extract('HOUR':Varchar, bid.date_time) >= 8:Decimal) AND (Extract('HOUR':Varchar, bid.date_time) <= 18:Decimal)), 'dayTime':Varchar, ((Extract('HOUR':Varchar, bid.date_time) <= 6:Decimal) OR (Extract('HOUR':Varchar, bid.date_time) >= 20:Decimal)), 'nightTime':Varchar, 'otherTime':Varchar) as $expr2, bid.date_time, bid.extra, bid._row_id] } └─StreamFilter { predicate: ((0.908:Decimal * bid.price::Decimal) > 1000000:Decimal) AND ((0.908:Decimal * bid.price::Decimal) < 50000000:Decimal) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid.extra, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid.extra, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, bidtimetype, date_time, extra, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [bid.auction, bid.bidder, (0.908:Decimal * bid.price::Decimal) as $expr1, Case(((Extract('HOUR':Varchar, bid.date_time) >= 8:Decimal) AND (Extract('HOUR':Varchar, bid.date_time) <= 18:Decimal)), 'dayTime':Varchar, ((Extract('HOUR':Varchar, bid.date_time) <= 6:Decimal) OR (Extract('HOUR':Varchar, bid.date_time) >= 20:Decimal)), 'nightTime':Varchar, 'otherTime':Varchar) as $expr2, bid.date_time, bid.extra, bid._row_id] } └── StreamFilter { predicate: ((0.908:Decimal * bid.price::Decimal) > 1000000:Decimal) AND ((0.908:Decimal * bid.price::Decimal) < 50000000:Decimal) } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid.extra, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 0 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid.extra, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 0 ] } ├── Upstream └── BatchPlanNode @@ -1237,7 +1205,7 @@ └─StreamHashAgg [append_only] { group_key: [$expr1], aggs: [count, count filter((bid.price < 10000:Int32)), count filter((bid.price >= 10000:Int32) AND (bid.price < 1000000:Int32)), count filter((bid.price >= 1000000:Int32)), count(distinct bid.bidder), count(distinct bid.bidder) filter((bid.price < 10000:Int32)), count(distinct bid.bidder) filter((bid.price >= 10000:Int32) AND (bid.price < 1000000:Int32)), count(distinct bid.bidder) filter((bid.price >= 1000000:Int32)), count(distinct bid.auction), count(distinct bid.auction) filter((bid.price < 10000:Int32)), count(distinct bid.auction) filter((bid.price >= 10000:Int32) AND (bid.price < 1000000:Int32)), count(distinct bid.auction) filter((bid.price >= 1000000:Int32))] } └─StreamExchange { dist: HashShard($expr1) } └─StreamProject { exprs: [ToChar(bid.date_time, 'yyyy-MM-dd':Varchar) as $expr1, bid.price, bid.bidder, bid.auction, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [day, total_bids, rank1_bids, rank2_bids, rank3_bids, total_bidders, rank1_bidders, rank2_bidders, rank3_bidders, total_auctions, rank1_auctions, rank2_auctions, rank3_auctions], stream_key: [day], pk_columns: [day], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -1247,7 +1215,7 @@ Fragment 1 StreamProject { exprs: [ToChar(bid.date_time, 'yyyy-MM-dd':Varchar) as $expr1, bid.price, bid.bidder, bid.auction, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 3 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 3 ] } ├── Upstream └── BatchPlanNode @@ -1295,7 +1263,7 @@ └─StreamExchange { dist: HashShard($expr1_expanded, bid.bidder_expanded, bid.auction_expanded, flag) } └─StreamExpand { column_subsets: [[$expr1], [$expr1, bid.bidder], [$expr1, bid.auction]] } └─StreamProject { exprs: [ToChar(bid.date_time, 'yyyy-MM-dd':Varchar) as $expr1, bid.price, bid.bidder, bid.auction, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [day, total_bids, rank1_bids, rank2_bids, rank3_bids, total_bidders, rank1_bidders, rank2_bidders, rank3_bidders, total_auctions, rank1_auctions, rank2_auctions, rank3_auctions], stream_key: [day], pk_columns: [day], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -1311,7 +1279,7 @@ Fragment 2 StreamExpand { column_subsets: [[$expr1], [$expr1, bid.bidder], [$expr1, bid.auction]] } └── StreamProject { exprs: [ToChar(bid.date_time, 'yyyy-MM-dd':Varchar) as $expr1, bid.price, bid.bidder, bid.auction, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 2 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 2 ] } ├── Upstream └── BatchPlanNode @@ -1366,7 +1334,7 @@ └─StreamHashAgg [append_only] { group_key: [bid.channel, $expr1], aggs: [max($expr2), count, count filter((bid.price < 10000:Int32)), count filter((bid.price >= 10000:Int32) AND (bid.price < 1000000:Int32)), count filter((bid.price >= 1000000:Int32)), count(distinct bid.bidder), count(distinct bid.bidder) filter((bid.price < 10000:Int32)), count(distinct bid.bidder) filter((bid.price >= 10000:Int32) AND (bid.price < 1000000:Int32)), count(distinct bid.bidder) filter((bid.price >= 1000000:Int32)), count(distinct bid.auction), count(distinct bid.auction) filter((bid.price < 10000:Int32)), count(distinct bid.auction) filter((bid.price >= 10000:Int32) AND (bid.price < 1000000:Int32)), count(distinct bid.auction) filter((bid.price >= 1000000:Int32))] } └─StreamExchange { dist: HashShard(bid.channel, $expr1) } └─StreamProject { exprs: [bid.channel, ToChar(bid.date_time, 'yyyy-MM-dd':Varchar) as $expr1, ToChar(bid.date_time, 'HH:mm':Varchar) as $expr2, bid.price, bid.bidder, bid.auction, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [channel, day, minute, total_bids, rank1_bids, rank2_bids, rank3_bids, total_bidders, rank1_bidders, rank2_bidders, rank3_bidders, total_auctions, rank1_auctions, rank2_auctions, rank3_auctions], stream_key: [channel, day], pk_columns: [channel, day], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -1376,7 +1344,7 @@ Fragment 1 StreamProject { exprs: [bid.channel, ToChar(bid.date_time, 'yyyy-MM-dd':Varchar) as $expr1, ToChar(bid.date_time, 'HH:mm':Varchar) as $expr2, bid.price, bid.bidder, bid.auction, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 3 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 3 ] } ├── Upstream └── BatchPlanNode @@ -1426,7 +1394,7 @@ └─StreamExchange { dist: HashShard(bid.channel_expanded, $expr1_expanded, bid.bidder_expanded, bid.auction_expanded, flag) } └─StreamExpand { column_subsets: [[bid.channel, $expr1, $expr2], [bid.channel, $expr1, bid.bidder], [bid.channel, $expr1, bid.auction]] } └─StreamProject { exprs: [bid.channel, ToChar(bid.date_time, 'yyyy-MM-dd':Varchar) as $expr1, ToChar(bid.date_time, 'HH:mm':Varchar) as $expr2, bid.price, bid.bidder, bid.auction, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [channel, day, minute, total_bids, rank1_bids, rank2_bids, rank3_bids, total_bidders, rank1_bidders, rank2_bidders, rank3_bidders, total_auctions, rank1_auctions, rank2_auctions, rank3_auctions], stream_key: [channel, day], pk_columns: [channel, day], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -1442,7 +1410,7 @@ Fragment 2 StreamExpand { column_subsets: [[bid.channel, $expr1, $expr2], [bid.channel, $expr1, bid.bidder], [bid.channel, $expr1, bid.auction]] } └── StreamProject { exprs: [bid.channel, ToChar(bid.date_time, 'yyyy-MM-dd':Varchar) as $expr1, ToChar(bid.date_time, 'HH:mm':Varchar) as $expr2, bid.price, bid.bidder, bid.auction, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 3 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 3 ] } ├── Upstream └── BatchPlanNode @@ -1493,7 +1461,7 @@ └─StreamHashAgg [append_only] { group_key: [bid.auction, $expr1], aggs: [count, count filter((bid.price < 10000:Int32)), count filter((bid.price >= 10000:Int32) AND (bid.price < 1000000:Int32)), count filter((bid.price >= 1000000:Int32)), min(bid.price), max(bid.price), sum(bid.price), count(bid.price)] } └─StreamExchange { dist: HashShard(bid.auction, $expr1) } └─StreamProject { exprs: [bid.auction, ToChar(bid.date_time, 'YYYY-MM-DD':Varchar) as $expr1, bid.price, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, day, total_bids, rank1_bids, rank2_bids, rank3_bids, min_price, max_price, avg_price, sum_price], stream_key: [auction, day], pk_columns: [auction, day], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } @@ -1504,7 +1472,7 @@ Fragment 1 StreamProject { exprs: [bid.auction, ToChar(bid.date_time, 'YYYY-MM-DD':Varchar) as $expr1, bid.price, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 1 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 1 ] } ├── Upstream └── BatchPlanNode @@ -1543,7 +1511,7 @@ StreamMaterialize { columns: [auction, bidder, price, channel, url, date_time, extra, bid._row_id(hidden)], stream_key: [bidder, auction], pk_columns: [bidder, auction], pk_conflict: NoCheck } └─StreamGroupTopN [append_only] { order: [bid.date_time DESC], limit: 1, offset: 0, group_key: [bid.bidder, bid.auction] } └─StreamExchange { dist: HashShard(bid.bidder, bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, channel, url, date_time, extra, bid._row_id(hidden)], stream_key: [bidder, auction], pk_columns: [bidder, auction], pk_conflict: NoCheck } @@ -1552,7 +1520,7 @@ └── StreamExchange Hash([1, 0]) from 1 Fragment 1 - StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -1566,12 +1534,7 @@ Table 1 { columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 - ├── columns: [ auction, bidder, price, channel, url, date_time, extra, bid._row_id ] - ├── primary key: [ $1 ASC, $0 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ] - ├── distribution key: [ 1, 0 ] - └── read pk prefix len hint: 2 + Table 4294967294 { columns: [ auction, bidder, price, channel, url, date_time, extra, bid._row_id ], primary key: [ $1 ASC, $0 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ], distribution key: [ 1, 0 ], read pk prefix len hint: 2 } - id: nexmark_q18_rank before: @@ -1597,7 +1560,7 @@ StreamMaterialize { columns: [auction, bidder, price, channel, url, date_time, extra, bid._row_id(hidden)], stream_key: [bidder, auction, bid._row_id], pk_columns: [bidder, auction, bid._row_id], pk_conflict: NoCheck } └─StreamGroupTopN [append_only] { order: [bid.date_time DESC], limit: 1, offset: 0, with_ties: true, group_key: [bid.bidder, bid.auction] } └─StreamExchange { dist: HashShard(bid.bidder, bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, channel, url, date_time, extra, bid._row_id(hidden)], stream_key: [bidder, auction, bid._row_id], pk_columns: [bidder, auction, bid._row_id], pk_conflict: NoCheck } @@ -1606,7 +1569,7 @@ └── StreamExchange Hash([1, 0]) from 1 Fragment 1 - StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -1650,7 +1613,7 @@ StreamMaterialize { columns: [auction, bidder, price, channel, url, date_time, extra, p_time, bid._row_id(hidden)], stream_key: [auction, bid._row_id], pk_columns: [auction, bid._row_id], pk_conflict: NoCheck, watermark_columns: [p_time] } └─StreamGroupTopN [append_only] { order: [bid.price DESC], limit: 10, offset: 0, group_key: [bid.auction], output_watermarks: [bid.p_time] } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid.p_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid.p_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, channel, url, date_time, extra, p_time, bid._row_id(hidden)], stream_key: [auction, bid._row_id], pk_columns: [auction, bid._row_id], pk_conflict: NoCheck, watermark_columns: [p_time] } @@ -1659,7 +1622,7 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid.p_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid.p_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -1673,12 +1636,7 @@ Table 1 { columns: [ vnode, _row_id, bid_backfill_finished, bid_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4294967294 - ├── columns: [ auction, bidder, price, channel, url, date_time, extra, p_time, bid._row_id ] - ├── primary key: [ $0 ASC, $8 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 2 + Table 4294967294 { columns: [ auction, bidder, price, channel, url, date_time, extra, p_time, bid._row_id ], primary key: [ $0 ASC, $8 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ], distribution key: [ 0 ], read pk prefix len hint: 2 } - id: nexmark_q19 before: @@ -1706,7 +1664,7 @@ └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY bid.auction ORDER BY bid.price DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamGroupTopN [append_only] { order: [bid.price DESC], limit: 10, offset: 0, group_key: [bid.auction], output_watermarks: [bid.p_time] } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid.p_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid.p_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, channel, url, date_time, extra, p_time, bid._row_id(hidden), rank_number], stream_key: [auction, bid._row_id], pk_columns: [auction, bid._row_id], pk_conflict: NoCheck } @@ -1716,7 +1674,7 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid.p_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid.extra, bid.p_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -1764,10 +1722,10 @@ └─StreamExchange { dist: HashShard(bid.auction, bid._row_id) } └─StreamHashJoin { type: Inner, predicate: bid.auction = auction.id, output: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category, bid._row_id, auction.id] } ├─StreamExchange { dist: HashShard(bid.auction) } - │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + │ └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } └─StreamExchange { dist: HashShard(auction.id) } └─StreamFilter { predicate: (auction.category = 10:Int32) } - └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, channel, url, date_timeb, item_name, description, initial_bid, reserve, date_timea, expires, seller, category, bid._row_id(hidden), auction.id(hidden)], stream_key: [bid._row_id, auction], pk_columns: [bid._row_id, auction], pk_conflict: NoCheck } @@ -1781,13 +1739,14 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 4 ] } + StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 4 ] } ├── Upstream └── BatchPlanNode Fragment 3 StreamFilter { predicate: (auction.category = 10:Int32) } - └── StreamTableScan { table: auction, columns: [auction.id, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } { tables: [ StreamScan: 5 ] } + └── StreamTableScan { table: auction, columns: [auction.id, auction.item_name, auction.description, auction.initial_bid, auction.reserve, auction.date_time, auction.expires, auction.seller, auction.category], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode @@ -1835,13 +1794,13 @@ StreamMaterialize { columns: [auction, bidder, price, channel, channel_id, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.channel, Case((Lower(bid.channel) = 'apple':Varchar), '0':Varchar, (Lower(bid.channel) = 'google':Varchar), '1':Varchar, (Lower(bid.channel) = 'facebook':Varchar), '2':Varchar, (Lower(bid.channel) = 'baidu':Varchar), '3':Varchar, ArrayAccess(RegexpMatch(bid.url, '(&|^)channel_id=([^&]*)':Varchar), 2:Int32)) as $expr1, bid._row_id] } └─StreamFilter { predicate: (IsNotNull(ArrayAccess(RegexpMatch(bid.url, '(&|^)channel_id=([^&]*)':Varchar), 2:Int32)) OR In(Lower(bid.channel), 'apple':Varchar, 'google':Varchar, 'facebook':Varchar, 'baidu':Varchar)) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, channel, channel_id, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.channel, Case((Lower(bid.channel) = 'apple':Varchar), '0':Varchar, (Lower(bid.channel) = 'google':Varchar), '1':Varchar, (Lower(bid.channel) = 'facebook':Varchar), '2':Varchar, (Lower(bid.channel) = 'baidu':Varchar), '3':Varchar, ArrayAccess(RegexpMatch(bid.url, '(&|^)channel_id=([^&]*)':Varchar), 2:Int32)) as $expr1, bid._row_id] } └── StreamFilter { predicate: (IsNotNull(ArrayAccess(RegexpMatch(bid.url, '(&|^)channel_id=([^&]*)':Varchar), 2:Int32)) OR In(Lower(bid.channel), 'apple':Varchar, 'google':Varchar, 'facebook':Varchar, 'baidu':Varchar)) } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 0 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 0 ] } ├── Upstream └── BatchPlanNode @@ -1866,16 +1825,16 @@ StreamSink { type: append-only, columns: [auction, bidder, price, channel, dir1, dir2, dir3, bid._row_id(hidden)] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.channel, SplitPart(bid.url, '/':Varchar, 4:Int32) as $expr1, SplitPart(bid.url, '/':Varchar, 5:Int32) as $expr2, SplitPart(bid.url, '/':Varchar, 6:Int32) as $expr3, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_plan: |- StreamMaterialize { columns: [auction, bidder, price, channel, dir1, dir2, dir3, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.channel, SplitPart(bid.url, '/':Varchar, 4:Int32) as $expr1, SplitPart(bid.url, '/':Varchar, 5:Int32) as $expr2, SplitPart(bid.url, '/':Varchar, 6:Int32) as $expr3, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction, bidder, price, channel, dir1, dir2, dir3, bid._row_id(hidden)], stream_key: [bid._row_id], pk_columns: [bid._row_id], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [bid.auction, bid.bidder, bid.price, bid.channel, SplitPart(bid.url, '/':Varchar, 4:Int32) as $expr1, SplitPart(bid.url, '/':Varchar, 5:Int32) as $expr2, SplitPart(bid.url, '/':Varchar, 6:Int32) as $expr3, bid._row_id] } - └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 0 ] } + └── StreamTableScan { table: bid, columns: [bid.auction, bid.bidder, bid.price, bid.channel, bid.url, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } { tables: [ StreamScan: 0 ] } ├── Upstream └── BatchPlanNode @@ -1916,11 +1875,11 @@ └─StreamExchange { dist: HashShard(auction.id) } └─StreamHashJoin { type: LeftOuter, predicate: auction.id = bid.auction, output: [auction.id, auction.item_name, max(bid.price), bid.auction] } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamProject { exprs: [bid.auction, max(bid.price)] } └─StreamHashAgg [append_only] { group_key: [bid.auction], aggs: [max(bid.price), count] } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction_id, auction_item_name, current_highest_bid, bid.auction(hidden)], stream_key: [auction_id], pk_columns: [auction_id], pk_conflict: NoCheck } @@ -1936,13 +1895,13 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -2021,9 +1980,9 @@ │ └─StreamHashAgg { group_key: [auction.id], aggs: [internal_last_seen_value(auction.item_name), count(bid.auction), count] } │ └─StreamHashJoin { type: Inner, predicate: auction.id = bid.auction, output: all } │ ├─StreamExchange { dist: HashShard(auction.id) } - │ │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } │ └─StreamExchange { dist: HashShard(bid.auction) } - │ └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + │ └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [(sum0(sum0(count)) / sum0(count(bid.auction))) as $expr1] } └─StreamSimpleAgg { aggs: [sum0(sum0(count)), sum0(count(bid.auction)), count] } @@ -2031,7 +1990,7 @@ └─StreamStatelessSimpleAgg { aggs: [sum0(count), count(bid.auction)] } └─StreamHashAgg [append_only] { group_key: [bid.auction], aggs: [count] } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction_id, auction_item_name, bid_count], stream_key: [auction_id], pk_columns: [auction_id], pk_conflict: NoCheck } @@ -2048,13 +2007,13 @@ └── StreamExchange Broadcast from 3 Fragment 1 - StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 7 ] ├── Upstream └── BatchPlanNode Fragment 2 - StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 8 ] ├── Upstream └── BatchPlanNode @@ -2070,7 +2029,7 @@ └── StreamExchange Hash([0]) from 5 Fragment 5 - StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 11 ] ├── Upstream └── BatchPlanNode @@ -2095,19 +2054,9 @@ Table 4 { columns: [ auction_id, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 5 - ├── columns: [ bid_auction, bid__row_id ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 5 { columns: [ bid_auction, bid__row_id ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 6 - ├── columns: [ bid_auction, bid__row_id, _degree ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 6 { columns: [ bid_auction, bid__row_id, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 7 ├── columns: [ vnode, id, auction_backfill_finished, auction_row_count ] @@ -2125,12 +2074,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 9 - ├── columns: [ sum0(sum0(count)), sum0(count(bid_auction)), count ] - ├── primary key: [] - ├── value indices: [ 0, 1, 2 ] - ├── distribution key: [] - └── read pk prefix len hint: 0 + Table 9 { columns: [ sum0(sum0(count)), sum0(count(bid_auction)), count ], primary key: [], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 0 } Table 10 { columns: [ bid_auction, count ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2180,12 +2124,12 @@ └─StreamExchange { dist: HashShard(auction.id) } └─StreamHashJoin { type: LeftSemi, predicate: auction.id = bid.auction, output: all } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamProject { exprs: [bid.auction] } └─StreamFilter { predicate: (count >= 20:Int32) } └─StreamHashAgg [append_only] { group_key: [bid.auction], aggs: [count] } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction_id, auction_item_name], stream_key: [auction_id], pk_columns: [auction_id], pk_conflict: NoCheck } @@ -2202,13 +2146,13 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -2239,12 +2183,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 - ├── columns: [ auction_id, auction_item_name ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ auction_id, auction_item_name ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - id: nexmark_q104 before: @@ -2277,12 +2216,12 @@ └─StreamExchange { dist: HashShard(auction.id) } └─StreamHashJoin { type: LeftAnti, predicate: auction.id = bid.auction, output: all } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamProject { exprs: [bid.auction] } └─StreamFilter { predicate: (count < 20:Int32) } └─StreamHashAgg [append_only] { group_key: [bid.auction], aggs: [count] } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction_id, auction_item_name], stream_key: [auction_id], pk_columns: [auction_id], pk_conflict: NoCheck } @@ -2299,13 +2238,13 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -2336,12 +2275,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 - ├── columns: [ auction_id, auction_item_name ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ auction_id, auction_item_name ], primary key: [ $0 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - id: nexmark_q105 before: @@ -2379,9 +2313,9 @@ └─StreamHashAgg { group_key: [auction.id], aggs: [internal_last_seen_value(auction.item_name), count(bid.auction), count] } └─StreamHashJoin { type: Inner, predicate: auction.id = bid.auction, output: all } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [auction_id, auction_item_name, bid_count], stream_key: [auction_id], pk_columns: [bid_count, auction_id], pk_conflict: NoCheck } @@ -2401,13 +2335,13 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + StreamTableScan { table: auction, columns: [auction.id, auction.item_name], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 7 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 8 ] ├── Upstream └── BatchPlanNode @@ -2438,19 +2372,9 @@ Table 4 { columns: [ auction_id, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 5 - ├── columns: [ bid_auction, bid__row_id ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 5 { columns: [ bid_auction, bid__row_id ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 6 - ├── columns: [ bid_auction, bid__row_id, _degree ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 6 { columns: [ bid_auction, bid__row_id, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 7 ├── columns: [ vnode, id, auction_backfill_finished, auction_row_count ] @@ -2520,9 +2444,9 @@ └─StreamFilter { predicate: (bid.date_time >= auction.date_time) AND (bid.date_time <= auction.expires) } └─StreamHashJoin { type: Inner, predicate: auction.id = bid.auction, output: all } ├─StreamExchange { dist: HashShard(auction.id) } - │ └─StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + │ └─StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } └─StreamExchange { dist: HashShard(bid.auction) } - └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [min_final], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -2546,13 +2470,13 @@ └── StreamExchange Hash([0]) from 3 Fragment 2 - StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires], stream_scan_type: Backfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } + StreamTableScan { table: auction, columns: [auction.id, auction.date_time, auction.expires], stream_scan_type: ArrangementBackfill, pk: [auction.id], dist: UpstreamHashShard(auction.id) } ├── tables: [ StreamScan: 10 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + StreamTableScan { table: bid, columns: [bid.auction, bid.price, bid.date_time, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } ├── tables: [ StreamScan: 11 ] ├── Upstream └── BatchPlanNode @@ -2569,20 +2493,9 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 3 - ├── columns: [ $expr1, min(max(bid_price)), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 3 { columns: [ $expr1, min(max(bid_price)), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [], read pk prefix len hint: 1, vnode column idx: 0 } - Table 4 - ├── columns: [ auction_id, bid_price, bid__row_id ] - ├── primary key: [ $0 ASC, $1 DESC, $2 ASC ] - ├── value indices: [ 0, 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 4 { columns: [ auction_id, bid_price, bid__row_id ], primary key: [ $0 ASC, $1 DESC, $2 ASC ], value indices: [ 0, 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 5 { columns: [ auction_id, max(bid_price), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } diff --git a/src/frontend/planner_test/tests/testdata/output/order_by.yaml b/src/frontend/planner_test/tests/testdata/output/order_by.yaml index 6ad554b4d51d8..2600909742df1 100644 --- a/src/frontend/planner_test/tests/testdata/output/order_by.yaml +++ b/src/frontend/planner_test/tests/testdata/output/order_by.yaml @@ -9,7 +9,7 @@ └─BatchScan { table: t, columns: [t.v1, t.v2], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [v1, v2, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [v1, t._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: output names are not qualified after table names sql: | create table t (v1 bigint, v2 double precision); @@ -66,7 +66,7 @@ stream_plan: |- StreamMaterialize { columns: [v1, v2, 2:Int32(hidden), t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [2:Int32, t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [t.v1, t.v2, 2:Int32, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 bigint, v2 double precision); select * from t order by v; @@ -90,7 +90,7 @@ └─StreamExchange { dist: Single } └─StreamGroupTopN { order: [t.v1 DESC], limit: 5, offset: 0, group_key: [$expr1] } └─StreamProject { exprs: [t.v1, t.v2, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 bigint, v2 double precision); select * from t limit 3 offset 4; @@ -122,7 +122,7 @@ └─StreamExchange { dist: Single } └─StreamGroupTopN { order: [t.v1 DESC], limit: 12, offset: 0, group_key: [$expr1] } └─StreamProject { exprs: [t.v1, t.v2, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: order by expression that would be valid in select list sql: | create table t (x int, y int, z int); @@ -139,7 +139,7 @@ stream_plan: |- StreamMaterialize { columns: [x, y, $expr1(hidden), t.z(hidden), t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [$expr1, t.z, t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [t.x, t.y, (t.x + t.y) as $expr1, t.z, t._row_id] } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: order by the number of an output column sql: | create table t (x int, y int); diff --git a/src/frontend/planner_test/tests/testdata/output/over_window_function.yaml b/src/frontend/planner_test/tests/testdata/output/over_window_function.yaml index bfda34c4a28e0..1b4e960972fbe 100644 --- a/src/frontend/planner_test/tests/testdata/output/over_window_function.yaml +++ b/src/frontend/planner_test/tests/testdata/output/over_window_function.yaml @@ -122,7 +122,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden), lag], stream_key: [t._row_id, y], pk_columns: [t._row_id, y], pk_conflict: NoCheck } └─StreamOverWindow { window_functions: [first_value(t.x) OVER(PARTITION BY t.y ORDER BY t.x ASC ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: lag with over clause, ignoring frame definition sql: | create table t(x int, y int); @@ -142,7 +142,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden), lag], stream_key: [t._row_id, y], pk_columns: [t._row_id, y], pk_conflict: NoCheck } └─StreamOverWindow { window_functions: [first_value(t.x) OVER(PARTITION BY t.y ORDER BY t.x ASC ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: aggregate with empty over clause sql: | create table t(x int); @@ -189,12 +189,12 @@ └─StreamHashJoin { type: Inner, predicate: t.y = t.y, output: [t.x, t.y, sum(t.x), max(t.x), min(t.w), t._row_id, t.y] } ├─StreamExchange { dist: HashShard(t.y) } │ └─StreamShare { id: 1 } - │ └─StreamTableScan { table: t, columns: [t.x, t.y, t.w, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.y, t.w, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProject { exprs: [t.y, sum(t.x), max(t.x), min(t.w)] } └─StreamHashAgg { group_key: [t.y], aggs: [sum(t.x), max(t.x), min(t.w), count] } └─StreamExchange { dist: HashShard(t.y) } └─StreamShare { id: 1 } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.w, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.w, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: aggregate with over clause, rows frame definition with implicit current row, without ORDER BY sql: | create table t(x int, y int); @@ -214,7 +214,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden), min], stream_key: [t._row_id, y], pk_columns: [t._row_id, y], pk_conflict: NoCheck } └─StreamOverWindow { window_functions: [min(t.x) OVER(PARTITION BY t.y ROWS BETWEEN 10 PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: aggregate with over clause, rows frame definition with between, without ORDER BY sql: | create table t(x int, y int); @@ -234,7 +234,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden), min], stream_key: [t._row_id, y], pk_columns: [t._row_id, y], pk_conflict: NoCheck } └─StreamOverWindow { window_functions: [min(t.x) OVER(PARTITION BY t.y ROWS BETWEEN 1 PRECEDING AND 2 FOLLOWING)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: aggregate with over clause, invalid frame definition case 1 sql: | create table t(x int, y int); @@ -274,7 +274,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden), max], stream_key: [t._row_id, y], pk_columns: [t._row_id, y], pk_conflict: NoCheck } └─StreamOverWindow { window_functions: [max(t.x) OVER(PARTITION BY t.y ORDER BY t.x ASC RANGE BETWEEN 100 PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: aggregate with over clause, range frame definition with between sql: | create table t(x int, y int); @@ -294,7 +294,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden), max], stream_key: [t._row_id, y], pk_columns: [t._row_id, y], pk_conflict: NoCheck } └─StreamOverWindow { window_functions: [max(t.x) OVER(PARTITION BY t.y ORDER BY t.x ASC RANGE BETWEEN 100 PRECEDING AND UNBOUNDED FOLLOWING)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: aggregate with over clause, unbounded range, with ORDER BY sql: | create table t(x int, y int); @@ -314,7 +314,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden), first_value], stream_key: [t._row_id, y], pk_columns: [t._row_id, y], pk_conflict: NoCheck } └─StreamOverWindow { window_functions: [first_value(t.x) OVER(PARTITION BY t.y ORDER BY t.x DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: more aggregate functions sql: | create table t(x int, y int, z int, w int); @@ -351,7 +351,7 @@ └─StreamProject { exprs: [t.x, t.y, t.z, ($expr1 * $expr1) as $expr2, $expr1, (t.x * t.x) as $expr3, t._row_id] } └─StreamProject { exprs: [t.x, t.y, t.z, (t.x - t.y) as $expr1, t._row_id] } └─StreamFilter { predicate: (t.z > 0:Int32) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: aggregate with expression in func arguments and over clause sql: | create table t(x int, y int, z int, w int); @@ -388,7 +388,7 @@ └─StreamOverWindow { window_functions: [sum($expr1) OVER(PARTITION BY $expr2 ORDER BY $expr3 ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), count($expr1) OVER(PARTITION BY $expr2 ORDER BY $expr3 ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard($expr2) } └─StreamProject { exprs: [t.x, t.y, t.z, (t.z * t.z) as $expr1, (t.y + 1:Int32) as $expr2, Abs(t.w) as $expr3, t._row_id] } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t.w, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t.w, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: row_number with empty over clause sql: | create table t(x int); @@ -419,7 +419,7 @@ └─StreamProject { exprs: [row_number, rank, dense_rank, t._row_id, t.x] } └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY t.x ORDER BY t.x ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), rank() OVER(PARTITION BY t.x ORDER BY t.x ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), dense_rank() OVER(PARTITION BY t.x ORDER BY t.x ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: row_number with valid over clause sql: | create table t(x int, y int); @@ -441,7 +441,7 @@ └─StreamProject { exprs: [row_number, t._row_id, t.x] } └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY t.x ORDER BY t.y ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: TopN by row_number with rank output sql: | create table t(x int); @@ -467,7 +467,7 @@ └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY t.x ORDER BY t.x ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamGroupTopN { order: [t.x ASC], limit: 2, offset: 0, group_key: [t.x] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: TopN by row_number without rank output, 1 sql: | create table t(x int, y int); @@ -493,7 +493,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden)], stream_key: [y, t._row_id], pk_columns: [y, t._row_id], pk_conflict: NoCheck } └─StreamGroupTopN { order: [t.x ASC], limit: 3, offset: 0, group_key: [t.y] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: TopN by row_number without rank output, 2 sql: | create table t(x int, y int); @@ -522,7 +522,7 @@ └─StreamFilter { predicate: (t.x > t.y) } └─StreamGroupTopN { order: [t.x ASC], limit: 2, offset: 0, group_key: [t.y] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: TopN by rank without rank output sql: | create table t(x int, y int); @@ -548,7 +548,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden)], stream_key: [y, t._row_id], pk_columns: [y, t._row_id], pk_conflict: NoCheck } └─StreamGroupTopN { order: [t.x ASC], limit: 3, offset: 0, with_ties: true, group_key: [t.y] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: TopN by rank, with offset sql: | create table t(x int, y int); @@ -577,7 +577,7 @@ └─StreamFilter { predicate: (rank <= 3:Int32) AND (rank > 1:Int32) } └─StreamOverWindow { window_functions: [rank() OVER(PARTITION BY t.y ORDER BY t.x ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(x int, y int); select x, y from @@ -676,7 +676,7 @@ └─StreamFilter { predicate: (3:Int32 < row_number) AND (row_number = 6:Int32) AND (row_number <= 5:Int32) } └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY t.y ORDER BY t.x ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: Deduplication (Top1) by row_number sql: | create table t(x int, y int); @@ -702,7 +702,7 @@ StreamMaterialize { columns: [x, y, t._row_id(hidden)], stream_key: [x], pk_columns: [x], pk_conflict: NoCheck } └─StreamGroupTopN { order: [t.y ASC], limit: 1, offset: 0, group_key: [t.x] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: TopN with intersected partition key and order key sql: | create table t (x int, y int, z int); @@ -732,7 +732,7 @@ └─StreamProject { exprs: [t.z, t.x, t.y] } └─StreamGroupTopN { order: [t.x ASC], limit: 1, offset: 0, group_key: [t.x, t.y] } └─StreamExchange { dist: HashShard(t.x, t.y) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [z, t.x(hidden), t.y(hidden)], stream_key: [t.x, t.y], pk_columns: [t.x, t.y], pk_conflict: NoCheck } @@ -742,7 +742,7 @@ └── StreamExchange Hash([0, 1]) from 1 Fragment 1 - StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -817,7 +817,7 @@ └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY t.x, t.y ORDER BY t.z ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY t.x ORDER BY t.y ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: TopN among multiple window function calls, some not TopN sql: | create table t (x int, y int, z int); @@ -861,7 +861,7 @@ └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY t.x, t.y ORDER BY t.z ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW), rank() OVER(PARTITION BY t.x, t.y ORDER BY t.z ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY t.x ORDER BY t.y ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: create_bid sql: | /* @@ -914,7 +914,7 @@ └─StreamExchange { dist: HashShard($expr1, $expr2, bid.supplier_id) } └─StreamProject { exprs: [$expr1, ($expr1 + '00:10:00':Interval) as $expr2, bid.supplier_id, bid.price, bid._row_id] } └─StreamProject { exprs: [bid.bidtime, bid.price, bid.supplier_id, TumbleStart(bid.bidtime, '00:10:00':Interval) as $expr1, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.bidtime, bid.price, bid.supplier_id, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.bidtime, bid.price, bid.supplier_id, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } - before: - create_bid sql: | @@ -933,7 +933,7 @@ └─StreamExchange { dist: HashShard($expr1, $expr2) } └─StreamProject { exprs: [bid.price, bid.supplier_id, $expr1, ($expr1 + '00:10:00':Interval) as $expr2, bid._row_id] } └─StreamProject { exprs: [bid.bidtime, bid.price, bid.supplier_id, TumbleStart(bid.bidtime, '00:10:00':Interval) as $expr1, bid._row_id] } - └─StreamTableScan { table: bid, columns: [bid.bidtime, bid.price, bid.supplier_id, bid._row_id], stream_scan_type: Backfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } + └─StreamTableScan { table: bid, columns: [bid.bidtime, bid.price, bid.supplier_id, bid._row_id], stream_scan_type: ArrangementBackfill, pk: [bid._row_id], dist: UpstreamHashShard(bid._row_id) } - id: cte1 sql: | create table t (x int, y int, z int); @@ -962,7 +962,7 @@ └─StreamProject { exprs: [t.z, t.x, t._row_id] } └─StreamGroupTopN { order: [t.y ASC], limit: 3, offset: 0, group_key: [t.x] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: cte2 sql: | create table t (x int, y int, z int); @@ -1000,9 +1000,9 @@ └─StreamHashJoin { type: Inner, predicate: t.x = t.x, output: [t.x, t.x, t.z, t.y, t.z, t._row_id] } ├─StreamGroupTopN { order: [t.y ASC], limit: 1, offset: 0, group_key: [t.x] } │ └─StreamExchange { dist: HashShard(t.x) } - │ └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: split calls with different ORDER BY or PARTITION BY sql: | create table t(x int, y int, z int); @@ -1049,7 +1049,7 @@ └─StreamExchange { dist: HashShard(t.z) } └─StreamOverWindow { window_functions: [rank() OVER(PARTITION BY t.x ORDER BY t.y ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t.y, t.z, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (id int, cat varchar, rule varchar, at timestamptz); select * from (select cat, rule, at, lag(rule) over (partition by cat order by at) as prev_rule from t) as with_prev @@ -1206,7 +1206,7 @@ └─StreamOverWindow { window_functions: [count() OVER(PARTITION BY 1:Int32 ORDER BY t.i ASC RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(1:Int32) } └─StreamProject { exprs: [t.i, t.bi, t.d, t.f, t.ts, t.tstz, 1:Int32, t._row_id] } - └─StreamTableScan { table: t, columns: [t.i, t.bi, t.d, t.f, t.ts, t.tstz, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.i, t.bi, t.d, t.f, t.ts, t.tstz, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (i int, bi bigint, d decimal, f float, da date, t time, ts timestamp, tstz timestamptz, itv interval); select diff --git a/src/frontend/planner_test/tests/testdata/output/pk_derive.yaml b/src/frontend/planner_test/tests/testdata/output/pk_derive.yaml index 95f4c54bc67d2..7aee9ca53b51a 100644 --- a/src/frontend/planner_test/tests/testdata/output/pk_derive.yaml +++ b/src/frontend/planner_test/tests/testdata/output/pk_derive.yaml @@ -26,11 +26,11 @@ ├─StreamProject { exprs: [t1.id, max(t1.v1)] } │ └─StreamHashAgg { group_key: [t1.id], aggs: [max(t1.v1), count] } │ └─StreamExchange { dist: HashShard(t1.id) } - │ └─StreamTableScan { table: t1, columns: [t1.id, t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.id, t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamProject { exprs: [t2.id, max(t2.v2)] } └─StreamHashAgg { group_key: [t2.id], aggs: [max(t2.v2), count] } └─StreamExchange { dist: HashShard(t2.id) } - └─StreamTableScan { table: t2, columns: [t2.id, t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.id, t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - sql: | create table t (id int, v int); SELECT Tone.max_v, Ttwo.min_v @@ -57,11 +57,11 @@ ├─StreamProject { exprs: [t.id, max(t.v)] } │ └─StreamHashAgg { group_key: [t.id], aggs: [max(t.v), count] } │ └─StreamExchange { dist: HashShard(t.id) } - │ └─StreamTableScan { table: t, columns: [t.id, t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.id, t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProject { exprs: [t.id, min(t.v)] } └─StreamHashAgg { group_key: [t.id], aggs: [min(t.v), count] } └─StreamExchange { dist: HashShard(t.id) } - └─StreamTableScan { table: t, columns: [t.id, t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.id, t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 varchar, v2 varchar, v3 varchar); select @@ -80,7 +80,7 @@ └─StreamProject { exprs: [t.v1, t.v2, t.v3] } └─StreamHashAgg { group_key: [t.v1, t.v2, t.v3], aggs: [count] } └─StreamExchange { dist: HashShard(t.v1, t.v2, t.v3) } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t (v1 varchar, v2 varchar, v3 varchar); create materialized view mv as @@ -102,4 +102,4 @@ stream_plan: |- StreamMaterialize { columns: [v1, mv.v2(hidden), mv.v3(hidden)], stream_key: [v1, mv.v2, mv.v3], pk_columns: [v1, mv.v2, mv.v3], pk_conflict: NoCheck } └─StreamFilter { predicate: ((mv.v3 = 'world':Varchar) OR (mv.v3 = 'hello':Varchar)) } - └─StreamTableScan { table: mv, columns: [mv.v1, mv.v2, mv.v3], stream_scan_type: Backfill, pk: [mv.v1, mv.v2, mv.v3], dist: UpstreamHashShard(mv.v1, mv.v2, mv.v3) } + └─StreamTableScan { table: mv, columns: [mv.v1, mv.v2, mv.v3], stream_scan_type: ArrangementBackfill, pk: [mv.v1, mv.v2, mv.v3], dist: UpstreamHashShard(mv.v1, mv.v2, mv.v3) } diff --git a/src/frontend/planner_test/tests/testdata/output/predicate_pushdown.yaml b/src/frontend/planner_test/tests/testdata/output/predicate_pushdown.yaml index b87ea433bbb3e..33e8e3a3655e3 100644 --- a/src/frontend/planner_test/tests/testdata/output/predicate_pushdown.yaml +++ b/src/frontend/planner_test/tests/testdata/output/predicate_pushdown.yaml @@ -264,12 +264,12 @@ └─StreamHashJoin { type: Inner, predicate: t1.v1 = t2.v2, output: [t1.v1, t2.v2, t1._row_id, t2._row_id] } ├─StreamExchange { dist: HashShard(t1.v1) } │ └─StreamDynamicFilter { predicate: (t1.v1 > $expr1), output_watermarks: [t1.v1], output: [t1.v1, t1._row_id], cleaned_by_watermark: true } - │ ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamProject { exprs: [AddWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } │ └─StreamNow { output: [now] } └─StreamExchange { dist: HashShard(t2.v2) } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: now() in a complex cmp expr does not get pushed down sql: | create table t1(v1 timestamp with time zone); @@ -305,7 +305,7 @@ StreamMaterialize { columns: [v1, v2, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck, watermark_columns: [v1] } └─StreamDynamicFilter { predicate: (t1.v1 > $expr1), output_watermarks: [t1.v1], output: [t1.v1, t1.v2, t1._row_id], cleaned_by_watermark: true } ├─StreamFilter { predicate: (t1.v2 > 5:Int32) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [AddWithTimeZone(now, '00:30:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } └─StreamNow { output: [now] } @@ -352,11 +352,11 @@ └─StreamHashJoin { type: Inner, predicate: t1.v1 = t2.v2, output: [t1.v1, t2.v2, t1._row_id, t2._row_id] } ├─StreamExchange { dist: HashShard(t1.v1) } │ └─StreamDynamicFilter { predicate: (t1.v1 > now), output_watermarks: [t1.v1], output: [t1.v1, t1._row_id], cleaned_by_watermark: true } - │ ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ ├─StreamTableScan { table: t1, columns: [t1.v1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamNow { output: [now] } └─StreamExchange { dist: HashShard(t2.v2) } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: eq-predicate derived condition is banned for mismatching types sql: | create table t1(v1 int, v2 int); diff --git a/src/frontend/planner_test/tests/testdata/output/project_set.yaml b/src/frontend/planner_test/tests/testdata/output/project_set.yaml index 1db96753bea37..541bde09e4d6d 100644 --- a/src/frontend/planner_test/tests/testdata/output/project_set.yaml +++ b/src/frontend/planner_test/tests/testdata/output/project_set.yaml @@ -28,7 +28,7 @@ stream_plan: |- StreamMaterialize { columns: [projected_row_id(hidden), unnest, t._row_id(hidden)], stream_key: [t._row_id, projected_row_id], pk_columns: [t._row_id, projected_row_id], pk_conflict: NoCheck } └─StreamProjectSet { select_list: [Unnest($0), $1] } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: table functions used with usual expressions sql: | create table t(x int[]); @@ -72,7 +72,7 @@ └─StreamProjectSet { select_list: [($1 * $2), Unnest($0), $3, $4] } └─StreamProject { exprs: [t.x, Unnest($0), Unnest($0), t._row_id, projected_row_id] } └─StreamProjectSet { select_list: [$0, Unnest($0), Unnest($0), $1] } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: table functions as parameters of table functions sql: | create table t(x int[]); @@ -104,7 +104,7 @@ └─StreamGroupTopN { order: [projected_row_id ASC], limit: 1, offset: 0, group_key: [$expr1] } └─StreamProject { exprs: [projected_row_id, Unnest($0), t._row_id, Vnode(t._row_id) as $expr1] } └─StreamProjectSet { select_list: [Unnest($0), $1] } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: issue-7812 sql: | -- projected_row_id should be excluded from distinct @@ -122,7 +122,7 @@ └─StreamHashAgg { group_key: [Unnest($0)], aggs: [count] } └─StreamExchange { dist: HashShard(Unnest($0)) } └─StreamProjectSet { select_list: [Unnest($0), $1] } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(x int[]); select * from (select unnest(x) as unnest from t) where unnest > 1; @@ -137,7 +137,7 @@ └─StreamProject { exprs: [Unnest($0), t._row_id, projected_row_id] } └─StreamFilter { predicate: (Unnest($0) > 1:Int32) } └─StreamProjectSet { select_list: [Unnest($0), $1] } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(x int[]); select * from @@ -161,12 +161,12 @@ │ └─StreamShare { id: 3 } │ └─StreamProject { exprs: [Unnest($0), t._row_id, projected_row_id] } │ └─StreamProjectSet { select_list: [Unnest($0), $1] } - │ └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(Unnest($0)) } └─StreamShare { id: 3 } └─StreamProject { exprs: [Unnest($0), t._row_id, projected_row_id] } └─StreamProjectSet { select_list: [Unnest($0), $1] } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: issue-10080 sql: | with cte as (SELECT 1 as v1, unnest(array[1,2,3,4,5]) AS v2) select v1 from cte; @@ -191,7 +191,7 @@ StreamMaterialize { columns: [v1, t._row_id(hidden), projected_row_id(hidden)], stream_key: [t._row_id, projected_row_id], pk_columns: [t._row_id, projected_row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [1:Int32, t._row_id, projected_row_id] } └─StreamProjectSet { select_list: [1:Int32, Unnest($0), $1] } - └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.x, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: set-returning function disallowed in CASE WHEN sql: | with a(a1) as (values (array[2]), (null)) select case when a1 is not null then unnest(a1) end from a; diff --git a/src/frontend/planner_test/tests/testdata/output/select_except.yaml b/src/frontend/planner_test/tests/testdata/output/select_except.yaml index f6ce8cd295601..60d2478d526ba 100644 --- a/src/frontend/planner_test/tests/testdata/output/select_except.yaml +++ b/src/frontend/planner_test/tests/testdata/output/select_except.yaml @@ -8,7 +8,7 @@ └─BatchScan { table: t, columns: [t.v3], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [v3, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: t, columns: [t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: test multiple except sql: | create table t (v1 int, v2 int, v3 int); @@ -20,7 +20,7 @@ stream_plan: |- StreamMaterialize { columns: [v3, v1, v2, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } └─StreamProject { exprs: [t.v3, t.v1, t.v2, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: except on join sql: | create table t (v1 int, v2 int, v3 int); @@ -37,9 +37,9 @@ └─StreamExchange { dist: HashShard(t.v1, t._row_id, t._row_id) } └─StreamHashJoin { type: Inner, predicate: t.v1 = t.v1, output: [t.v1, t.v3, t.v2, t._row_id, t._row_id] } ├─StreamExchange { dist: HashShard(t.v1) } - │ └─StreamTableScan { table: t, columns: [t.v1, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.v1, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t.v1) } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: qualified wildcard sql: | create table t (v1 int, v2 int, v3 int); @@ -56,9 +56,9 @@ └─StreamExchange { dist: HashShard(t.v1, t._row_id, t._row_id) } └─StreamHashJoin { type: Inner, predicate: t.v1 = t.v1, output: [t.v1, t.v2, t.v3, t._row_id, t._row_id] } ├─StreamExchange { dist: HashShard(t.v1) } - │ └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.v1, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t.v1) } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: except with unknown column sql: | create table t (v1 int, v2 int, v3 int); diff --git a/src/frontend/planner_test/tests/testdata/output/share.yaml b/src/frontend/planner_test/tests/testdata/output/share.yaml index d196ff1b4d446..4e8b2198a4b3b 100644 --- a/src/frontend/planner_test/tests/testdata/output/share.yaml +++ b/src/frontend/planner_test/tests/testdata/output/share.yaml @@ -159,7 +159,7 @@ │ └─StreamSimpleAgg { aggs: [sum0(count), count] } │ └─StreamExchange { dist: Single } │ └─StreamStatelessSimpleAgg { aggs: [count] } - │ └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(1:Int32) } └─StreamProject { exprs: [sum0(count), 1:Int32] } └─StreamShare { id: 5 } @@ -167,7 +167,7 @@ └─StreamSimpleAgg { aggs: [sum0(count), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [count] } - └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | set rw_enable_share_plan=false; create table t(a int, b int); @@ -180,13 +180,13 @@ │ └─StreamSimpleAgg { aggs: [sum0(count), count] } │ └─StreamExchange { dist: Single } │ └─StreamStatelessSimpleAgg { aggs: [count] } - │ └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(1:Int32) } └─StreamProject { exprs: [sum0(count), 1:Int32] } └─StreamSimpleAgg { aggs: [sum0(count), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [count] } - └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - id: force_share_source_for_self_join before: - create_sources @@ -225,12 +225,12 @@ │ └─StreamProject { exprs: [t.a] } │ └─StreamHashAgg { group_key: [t.a], aggs: [count] } │ └─StreamExchange { dist: HashShard(t.a) } - │ └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamShare { id: 4 } └─StreamProject { exprs: [t.a] } └─StreamHashAgg { group_key: [t.a], aggs: [count] } └─StreamExchange { dist: HashShard(t.a) } - └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [count], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -256,7 +256,7 @@ └── StreamExchange Hash([0]) from 3 Fragment 3 - StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 6 ] ├── Upstream └── BatchPlanNode @@ -269,21 +269,11 @@ Table 1 { columns: [ t_a ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 2 - ├── columns: [ t_a, _degree ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 2 { columns: [ t_a, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 3 { columns: [ t_a ], primary key: [ $0 ASC ], value indices: [ 0 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 4 - ├── columns: [ t_a, _degree ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 4 { columns: [ t_a, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 5 { columns: [ t_a, count ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } diff --git a/src/frontend/planner_test/tests/testdata/output/shared_views.yaml b/src/frontend/planner_test/tests/testdata/output/shared_views.yaml index e97bd2cf67596..ca68efc0ca708 100644 --- a/src/frontend/planner_test/tests/testdata/output/shared_views.yaml +++ b/src/frontend/planner_test/tests/testdata/output/shared_views.yaml @@ -29,14 +29,14 @@ │ └─StreamShare { id: 3 } │ └─StreamProject { exprs: [(t1.x + t1.y) as $expr1, t1._row_id] } │ └─StreamFilter { predicate: (t1.y > 0:Int32) } - │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard($expr2) } └─StreamProject { exprs: [(t1.x * $expr1) as $expr2, (t1.y * $expr1) as $expr3, t1._row_id, t1._row_id, t1.x] } └─StreamHashJoin { type: Inner, predicate: t1.x = $expr1, output: [t1.x, t1.y, $expr1, t1._row_id, t1._row_id] } ├─StreamExchange { dist: HashShard(t1.x) } - │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard($expr1) } └─StreamShare { id: 3 } └─StreamProject { exprs: [(t1.x + t1.y) as $expr1, t1._row_id] } └─StreamFilter { predicate: (t1.y > 0:Int32) } - └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/sink.yaml b/src/frontend/planner_test/tests/testdata/output/sink.yaml index 8ee6700ecd619..08599d4ac48d4 100644 --- a/src/frontend/planner_test/tests/testdata/output/sink.yaml +++ b/src/frontend/planner_test/tests/testdata/output/sink.yaml @@ -45,7 +45,7 @@ └── stream key: [ t1.v3, t1.v4 ] Fragment 1 - StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1.v3, t1.v5, t1.v4], stream_scan_type: Backfill, pk: [t1.v3, t1.v4], dist: UpstreamHashShard(t1.v3, t1.v4) } + StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1.v3, t1.v5, t1.v4], stream_scan_type: ArrangementBackfill, pk: [t1.v3, t1.v4], dist: UpstreamHashShard(t1.v3, t1.v4) } ├── tables: [ StreamScan: 1 ] ├── output: [ t1.v1, t1.v2, t1.v3, t1.v5, t1.v4 ] ├── stream key: [ t1.v3, t1.v4 ] @@ -97,7 +97,7 @@ └── stream key: [ t1.v1, t1.v2 ] Fragment 1 - StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1.v3, t1.v5], stream_scan_type: Backfill, pk: [t1.v1, t1.v2], dist: UpstreamHashShard(t1.v1, t1.v2) } + StreamTableScan { table: t1, columns: [t1.v1, t1.v2, t1.v3, t1.v5], stream_scan_type: ArrangementBackfill, pk: [t1.v1, t1.v2], dist: UpstreamHashShard(t1.v1, t1.v2) } ├── tables: [ StreamScan: 1 ] ├── output: [ t1.v1, t1.v2, t1.v3, t1.v5 ] ├── stream key: [ t1.v1, t1.v2 ] diff --git a/src/frontend/planner_test/tests/testdata/output/stream_dist_agg.yaml b/src/frontend/planner_test/tests/testdata/output/stream_dist_agg.yaml index 0c48b41fde43b..336e36aa2ee9e 100644 --- a/src/frontend/planner_test/tests/testdata/output/stream_dist_agg.yaml +++ b/src/frontend/planner_test/tests/testdata/output/stream_dist_agg.yaml @@ -19,13 +19,13 @@ StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } └─StreamProject { exprs: [max(s.v)] } └─StreamSimpleAgg { aggs: [max(s.v), count] } - └─StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └─StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [max(s.v)] } └── StreamSimpleAgg { aggs: [max(s.v), count] } { tables: [ SimpleAggState: 1, SimpleAggCall0: 0 ] } - └── StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └── StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -62,13 +62,13 @@ StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } └─StreamProject { exprs: [sum(s.v)] } └─StreamSimpleAgg { aggs: [sum(s.v), count] } - └─StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └─StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [sum(s.v)] } └── StreamSimpleAgg { aggs: [sum(s.v), count] } { tables: [ SimpleAggState: 0 ] } - └── StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └── StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -98,13 +98,13 @@ StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } └─StreamProject { exprs: [count(s.v)] } └─StreamSimpleAgg { aggs: [count(s.v), count] } - └─StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └─StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [count(s.v)] } └── StreamSimpleAgg { aggs: [count(s.v), count] } { tables: [ SimpleAggState: 0 ] } - └── StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └── StreamTableScan { table: s, columns: [s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -136,14 +136,14 @@ └─StreamProject { exprs: [string_agg(s.s, ',':Varchar order_by(s.v ASC))] } └─StreamSimpleAgg { aggs: [string_agg(s.s, ',':Varchar order_by(s.v ASC)), count] } └─StreamProject { exprs: [s.s, ',':Varchar, s.v, s.t._row_id] } - └─StreamTableScan { table: s, columns: [s.v, s.s, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └─StreamTableScan { table: s, columns: [s.v, s.s, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [string_agg(s.s, ',':Varchar order_by(s.v ASC))] } └── StreamSimpleAgg { aggs: [string_agg(s.s, ',':Varchar order_by(s.v ASC)), count] } { tables: [ SimpleAggState: 1, SimpleAggCall0: 0 ] } └── StreamProject { exprs: [s.s, ',':Varchar, s.v, s.t._row_id] } - └── StreamTableScan { table: s, columns: [s.v, s.s, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └── StreamTableScan { table: s, columns: [s.v, s.s, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -189,7 +189,7 @@ └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [max(t.v), count] } └─StreamProject { exprs: [t.v, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -202,17 +202,12 @@ Fragment 1 StreamHashAgg { group_key: [$expr1], aggs: [max(t.v), count] } { tables: [ HashAggState: 3, HashAggCall0: 2 ] } └── StreamProject { exprs: [t.v, t._row_id, Vnode(t._row_id) as $expr1] } - └── StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ max(t_v), $expr1 ] - ├── primary key: [ $0 DESC, $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [] - └── read pk prefix len hint: 0 + Table 0 { columns: [ max(t_v), $expr1 ], primary key: [ $0 DESC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } Table 1 { columns: [ max(max(t_v)), count ], primary key: [], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } @@ -253,7 +248,7 @@ └─StreamSimpleAgg [append_only] { aggs: [max(max(ao.v)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [max(ao.v)] } - └─StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -265,7 +260,7 @@ Fragment 1 StreamStatelessSimpleAgg { aggs: [max(ao.v)] } - └── StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └── StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -298,7 +293,7 @@ └─StreamSimpleAgg { aggs: [sum(sum(t.v)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum(t.v)] } - └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -310,7 +305,7 @@ Fragment 1 StreamStatelessSimpleAgg { aggs: [sum(t.v)] } - └── StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -338,7 +333,7 @@ └─StreamSimpleAgg [append_only] { aggs: [sum(sum(ao.v)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [sum(ao.v)] } - └─StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -350,7 +345,7 @@ Fragment 1 StreamStatelessSimpleAgg { aggs: [sum(ao.v)] } - └── StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └── StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -383,7 +378,7 @@ └─StreamSimpleAgg { aggs: [sum0(count(t.v)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [count(t.v)] } - └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -395,7 +390,7 @@ Fragment 1 StreamStatelessSimpleAgg { aggs: [count(t.v)] } - └── StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -423,7 +418,7 @@ └─StreamSimpleAgg [append_only] { aggs: [sum0(count(ao.v)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [count(ao.v)] } - └─StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -435,7 +430,7 @@ Fragment 1 StreamStatelessSimpleAgg { aggs: [count(ao.v)] } - └── StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └── StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -468,7 +463,7 @@ └─StreamSimpleAgg { aggs: [string_agg(t.s, ',':Varchar order_by(t.o ASC)), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [t.s, ',':Varchar, t.o, t._row_id] } - └─StreamTableScan { table: t, columns: [t.o, t.s, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.o, t.s, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -480,7 +475,7 @@ Fragment 1 StreamProject { exprs: [t.s, ',':Varchar, t.o, t._row_id] } - └── StreamTableScan { table: t, columns: [t.o, t.s, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.o, t.s, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -520,7 +515,7 @@ └─StreamSimpleAgg [append_only] { aggs: [string_agg(ao.s, ',':Varchar order_by(ao.o ASC)), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [ao.s, ',':Varchar, ao.o, ao._row_id] } - └─StreamTableScan { table: ao, columns: [ao.o, ao.s, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.o, ao.s, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -532,7 +527,7 @@ Fragment 1 StreamProject { exprs: [ao.s, ',':Varchar, ao.o, ao._row_id] } - └── StreamTableScan { table: ao, columns: [ao.o, ao.s, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └── StreamTableScan { table: ao, columns: [ao.o, ao.s, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -578,7 +573,7 @@ └─StreamExchange { dist: Single } └─StreamHashAgg { group_key: [$expr1], aggs: [max(t.v), count(t.v), count] } └─StreamProject { exprs: [t.v, t._row_id, Vnode(t._row_id) as $expr1] } - └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, a2], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -591,17 +586,12 @@ Fragment 1 StreamHashAgg { group_key: [$expr1], aggs: [max(t.v), count(t.v), count] } { tables: [ HashAggState: 3, HashAggCall0: 2 ] } └── StreamProject { exprs: [t.v, t._row_id, Vnode(t._row_id) as $expr1] } - └── StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ max(t_v), $expr1 ] - ├── primary key: [ $0 DESC, $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [] - └── read pk prefix len hint: 0 + Table 0 { columns: [ max(t_v), $expr1 ], primary key: [ $0 DESC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 0 } Table 1 ├── columns: [ max(max(t_v)), sum0(count(t_v)), count ] @@ -647,7 +637,7 @@ └─StreamSimpleAgg [append_only] { aggs: [max(max(ao.v)), sum0(count(ao.v)), count] } └─StreamExchange { dist: Single } └─StreamStatelessSimpleAgg { aggs: [max(ao.v), count(ao.v)] } - └─StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, a2], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -659,7 +649,7 @@ Fragment 1 StreamStatelessSimpleAgg { aggs: [max(ao.v), count(ao.v)] } - └── StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └── StreamTableScan { table: ao, columns: [ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -697,7 +687,7 @@ └─StreamSimpleAgg { aggs: [count(t.v), string_agg(t.s, ',':Varchar order_by(t.o ASC)), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [t.v, t.s, ',':Varchar, t.o, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v, t.o, t.s, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v, t.o, t.s, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, a2], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -709,7 +699,7 @@ Fragment 1 StreamProject { exprs: [t.v, t.s, ',':Varchar, t.o, t._row_id] } - └── StreamTableScan { table: t, columns: [t.v, t.o, t.s, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.v, t.o, t.s, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -749,7 +739,7 @@ └─StreamSimpleAgg [append_only] { aggs: [count(ao.v), string_agg(ao.s, ',':Varchar order_by(ao.o ASC)), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [ao.v, ao.s, ',':Varchar, ao.o, ao._row_id] } - └─StreamTableScan { table: ao, columns: [ao.v, ao.o, ao.s, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.v, ao.o, ao.s, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, a2], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -761,7 +751,7 @@ Fragment 1 StreamProject { exprs: [ao.v, ao.s, ',':Varchar, ao.o, ao._row_id] } - └── StreamTableScan { table: ao, columns: [ao.v, ao.o, ao.s, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └── StreamTableScan { table: ao, columns: [ao.v, ao.o, ao.s, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -806,7 +796,7 @@ └─StreamSimpleAgg { aggs: [max(t.v), string_agg(t.s, ',':Varchar order_by(t.o ASC)), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [t.v, t.s, ',':Varchar, t.o, t._row_id] } - └─StreamTableScan { table: t, columns: [t.v, t.o, t.s, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v, t.o, t.s, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, a2], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -818,7 +808,7 @@ Fragment 1 StreamProject { exprs: [t.v, t.s, ',':Varchar, t.o, t._row_id] } - └── StreamTableScan { table: t, columns: [t.v, t.o, t.s, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.v, t.o, t.s, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 3 ] ├── Upstream └── BatchPlanNode @@ -860,7 +850,7 @@ └─StreamSimpleAgg [append_only] { aggs: [max(ao.v), string_agg(ao.s, ',':Varchar order_by(ao.o ASC)), count] } └─StreamExchange { dist: Single } └─StreamProject { exprs: [ao.v, ao.s, ',':Varchar, ao.o, ao._row_id] } - └─StreamTableScan { table: ao, columns: [ao.v, ao.o, ao.s, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.v, ao.o, ao.s, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, a2], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -872,7 +862,7 @@ Fragment 1 StreamProject { exprs: [ao.v, ao.s, ',':Varchar, ao.o, ao._row_id] } - └── StreamTableScan { table: ao, columns: [ao.v, ao.o, ao.s, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └── StreamTableScan { table: ao, columns: [ao.v, ao.o, ao.s, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -917,7 +907,7 @@ └─StreamProject { exprs: [max(t.v), t.k] } └─StreamHashAgg { group_key: [t.k], aggs: [max(t.v), count] } └─StreamExchange { dist: HashShard(t.k) } - └─StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, t.k(hidden)], stream_key: [t.k], pk_columns: [t.k], pk_conflict: NoCheck } @@ -928,7 +918,7 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -940,12 +930,7 @@ ├── distribution key: [ 0 ] └── read pk prefix len hint: 1 - Table 1 - ├── columns: [ t_k, max(t_v), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 1 { columns: [ t_k, max(t_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 2 ├── columns: [ vnode, _row_id, t_backfill_finished, t_row_count ] @@ -955,12 +940,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 - ├── columns: [ a1, t.k ] - ├── primary key: [ $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ a1, t.k ], primary key: [ $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - id: extreme_on_Tk_by_k before: @@ -976,13 +956,13 @@ StreamMaterialize { columns: [a1, tk.k(hidden)], stream_key: [tk.k], pk_columns: [tk.k], pk_conflict: NoCheck } └─StreamProject { exprs: [max(tk.v), tk.k] } └─StreamHashAgg { group_key: [tk.k], aggs: [max(tk.v), count] } - └─StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: Backfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } + └─StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: ArrangementBackfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, tk.k(hidden)], stream_key: [tk.k], pk_columns: [tk.k], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [max(tk.v), tk.k] } └── StreamHashAgg { group_key: [tk.k], aggs: [max(tk.v), count] } { tables: [ HashAggState: 1, HashAggCall0: 0 ] } - └── StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: Backfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } + └── StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: ArrangementBackfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -1022,7 +1002,7 @@ └─StreamProject { exprs: [max(s.v), s.k] } └─StreamHashAgg { group_key: [s.k], aggs: [max(s.v), count] } └─StreamExchange { dist: HashShard(s.k) } - └─StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └─StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, s.k(hidden)], stream_key: [s.k], pk_columns: [s.k], pk_conflict: NoCheck } @@ -1033,7 +1013,7 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -1077,7 +1057,7 @@ └─StreamProject { exprs: [max(ao.v), ao.k] } └─StreamHashAgg [append_only] { group_key: [ao.k], aggs: [max(ao.v), count] } └─StreamExchange { dist: HashShard(ao.k) } - └─StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, ao.k(hidden)], stream_key: [ao.k], pk_columns: [ao.k], pk_conflict: NoCheck } @@ -1088,17 +1068,12 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ ao_k, max(ao_v), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ ao_k, max(ao_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 ├── columns: [ vnode, _row_id, ao_backfill_finished, ao_row_count ] @@ -1126,7 +1101,7 @@ └─StreamProject { exprs: [sum(t.v), t.k] } └─StreamHashAgg { group_key: [t.k], aggs: [sum(t.v), count] } └─StreamExchange { dist: HashShard(t.k) } - └─StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, t.k(hidden)], stream_key: [t.k], pk_columns: [t.k], pk_conflict: NoCheck } @@ -1136,17 +1111,12 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ t_k, sum(t_v), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ t_k, sum(t_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 ├── columns: [ vnode, _row_id, t_backfill_finished, t_row_count ] @@ -1156,12 +1126,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 - ├── columns: [ a1, t.k ] - ├── primary key: [ $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ a1, t.k ], primary key: [ $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - id: sum_on_Tk_by_k before: @@ -1177,13 +1142,13 @@ StreamMaterialize { columns: [a1, tk.k(hidden)], stream_key: [tk.k], pk_columns: [tk.k], pk_conflict: NoCheck } └─StreamProject { exprs: [sum(tk.v), tk.k] } └─StreamHashAgg { group_key: [tk.k], aggs: [sum(tk.v), count] } - └─StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: Backfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } + └─StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: ArrangementBackfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, tk.k(hidden)], stream_key: [tk.k], pk_columns: [tk.k], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [sum(tk.v), tk.k] } └── StreamHashAgg { group_key: [tk.k], aggs: [sum(tk.v), count] } { tables: [ HashAggState: 0 ] } - └── StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: Backfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } + └── StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: ArrangementBackfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -1216,7 +1181,7 @@ └─StreamProject { exprs: [sum(s.v), s.k] } └─StreamHashAgg { group_key: [s.k], aggs: [sum(s.v), count] } └─StreamExchange { dist: HashShard(s.k) } - └─StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └─StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, s.k(hidden)], stream_key: [s.k], pk_columns: [s.k], pk_conflict: NoCheck } @@ -1226,7 +1191,7 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -1263,7 +1228,7 @@ └─StreamProject { exprs: [sum(ao.v), ao.k] } └─StreamHashAgg [append_only] { group_key: [ao.k], aggs: [sum(ao.v), count] } └─StreamExchange { dist: HashShard(ao.k) } - └─StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, ao.k(hidden)], stream_key: [ao.k], pk_columns: [ao.k], pk_conflict: NoCheck } @@ -1274,17 +1239,12 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ ao_k, sum(ao_v), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ ao_k, sum(ao_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 ├── columns: [ vnode, _row_id, ao_backfill_finished, ao_row_count ] @@ -1312,7 +1272,7 @@ └─StreamProject { exprs: [count(t.v), t.k] } └─StreamHashAgg { group_key: [t.k], aggs: [count(t.v), count] } └─StreamExchange { dist: HashShard(t.k) } - └─StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, t.k(hidden)], stream_key: [t.k], pk_columns: [t.k], pk_conflict: NoCheck } @@ -1322,17 +1282,12 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + StreamTableScan { table: t, columns: [t.k, t.v, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ t_k, count(t_v), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ t_k, count(t_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 ├── columns: [ vnode, _row_id, t_backfill_finished, t_row_count ] @@ -1342,12 +1297,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 4294967294 - ├── columns: [ a1, t.k ] - ├── primary key: [ $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ a1, t.k ], primary key: [ $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - id: cnt_on_Tk_by_k before: @@ -1363,13 +1313,13 @@ StreamMaterialize { columns: [a1, tk.k(hidden)], stream_key: [tk.k], pk_columns: [tk.k], pk_conflict: NoCheck } └─StreamProject { exprs: [count(tk.v), tk.k] } └─StreamHashAgg { group_key: [tk.k], aggs: [count(tk.v), count] } - └─StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: Backfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } + └─StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: ArrangementBackfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, tk.k(hidden)], stream_key: [tk.k], pk_columns: [tk.k], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [count(tk.v), tk.k] } └── StreamHashAgg { group_key: [tk.k], aggs: [count(tk.v), count] } { tables: [ HashAggState: 0 ] } - └── StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: Backfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } + └── StreamTableScan { table: tk, columns: [tk.k, tk.v, tk.t._row_id], stream_scan_type: ArrangementBackfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -1402,7 +1352,7 @@ └─StreamProject { exprs: [count(s.v), s.k] } └─StreamHashAgg { group_key: [s.k], aggs: [count(s.v), count] } └─StreamExchange { dist: HashShard(s.k) } - └─StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └─StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, s.k(hidden)], stream_key: [s.k], pk_columns: [s.k], pk_conflict: NoCheck } @@ -1412,7 +1362,7 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + StreamTableScan { table: s, columns: [s.k, s.v, s.o, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -1449,7 +1399,7 @@ └─StreamProject { exprs: [count(ao.v), ao.k] } └─StreamHashAgg [append_only] { group_key: [ao.k], aggs: [count(ao.v), count] } └─StreamExchange { dist: HashShard(ao.k) } - └─StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, ao.k(hidden)], stream_key: [ao.k], pk_columns: [ao.k], pk_conflict: NoCheck } @@ -1460,17 +1410,12 @@ └── StreamExchange Hash([0]) from 1 Fragment 1 - StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + StreamTableScan { table: ao, columns: [ao.k, ao.v, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode - Table 0 - ├── columns: [ ao_k, count(ao_v), count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2 ] - ├── distribution key: [ 0 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ ao_k, count(ao_v), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } Table 1 ├── columns: [ vnode, _row_id, ao_backfill_finished, ao_row_count ] @@ -1500,7 +1445,7 @@ └─StreamHashAgg { group_key: [t.k], aggs: [string_agg(t.s, ',':Varchar order_by(t.o ASC)), count] } └─StreamExchange { dist: HashShard(t.k) } └─StreamProject { exprs: [t.k, t.s, ',':Varchar, t.o, t._row_id] } - └─StreamTableScan { table: t, columns: [t.k, t.o, t.s, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.k, t.o, t.s, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, t.k(hidden)], stream_key: [t.k], pk_columns: [t.k], pk_conflict: NoCheck } @@ -1512,7 +1457,7 @@ Fragment 1 StreamProject { exprs: [t.k, t.s, ',':Varchar, t.o, t._row_id] } - └── StreamTableScan { table: t, columns: [t.k, t.o, t.s, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └── StreamTableScan { table: t, columns: [t.k, t.o, t.s, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -1557,14 +1502,14 @@ └─StreamProject { exprs: [string_agg(tk.s, ',':Varchar order_by(tk.o ASC)), tk.k] } └─StreamHashAgg { group_key: [tk.k], aggs: [string_agg(tk.s, ',':Varchar order_by(tk.o ASC)), count] } └─StreamProject { exprs: [tk.k, tk.s, ',':Varchar, tk.o, tk.t._row_id] } - └─StreamTableScan { table: tk, columns: [tk.k, tk.o, tk.s, tk.t._row_id], stream_scan_type: Backfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } + └─StreamTableScan { table: tk, columns: [tk.k, tk.o, tk.s, tk.t._row_id], stream_scan_type: ArrangementBackfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, tk.k(hidden)], stream_key: [tk.k], pk_columns: [tk.k], pk_conflict: NoCheck } { tables: [ Materialize: 4294967294 ] } └── StreamProject { exprs: [string_agg(tk.s, ',':Varchar order_by(tk.o ASC)), tk.k] } └── StreamHashAgg { group_key: [tk.k], aggs: [string_agg(tk.s, ',':Varchar order_by(tk.o ASC)), count] } { tables: [ HashAggState: 1, HashAggCall0: 0 ] } └── StreamProject { exprs: [tk.k, tk.s, ',':Varchar, tk.o, tk.t._row_id] } - └── StreamTableScan { table: tk, columns: [tk.k, tk.o, tk.s, tk.t._row_id], stream_scan_type: Backfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } + └── StreamTableScan { table: tk, columns: [tk.k, tk.o, tk.s, tk.t._row_id], stream_scan_type: ArrangementBackfill, pk: [tk.t._row_id], dist: UpstreamHashShard(tk.k) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -1611,7 +1556,7 @@ └─StreamHashAgg { group_key: [s.k], aggs: [string_agg(s.s, ',':Varchar order_by(s.o ASC)), count] } └─StreamExchange { dist: HashShard(s.k) } └─StreamProject { exprs: [s.k, s.s, ',':Varchar, s.o, s.t._row_id] } - └─StreamTableScan { table: s, columns: [s.k, s.o, s.s, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └─StreamTableScan { table: s, columns: [s.k, s.o, s.s, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, s.k(hidden)], stream_key: [s.k], pk_columns: [s.k], pk_conflict: NoCheck } @@ -1623,7 +1568,7 @@ Fragment 1 StreamProject { exprs: [s.k, s.s, ',':Varchar, s.o, s.t._row_id] } - └── StreamTableScan { table: s, columns: [s.k, s.o, s.s, s.t._row_id], stream_scan_type: Backfill, pk: [s.t._row_id], dist: Single } + └── StreamTableScan { table: s, columns: [s.k, s.o, s.s, s.t._row_id], stream_scan_type: ArrangementBackfill, pk: [s.t._row_id], dist: Single } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -1668,7 +1613,7 @@ └─StreamHashAgg [append_only] { group_key: [ao.k], aggs: [string_agg(ao.s, ',':Varchar order_by(ao.o ASC)), count] } └─StreamExchange { dist: HashShard(ao.k) } └─StreamProject { exprs: [ao.k, ao.s, ',':Varchar, ao.o, ao._row_id] } - └─StreamTableScan { table: ao, columns: [ao.k, ao.o, ao.s, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └─StreamTableScan { table: ao, columns: [ao.k, ao.o, ao.s, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a1, ao.k(hidden)], stream_key: [ao.k], pk_columns: [ao.k], pk_conflict: NoCheck } @@ -1680,7 +1625,7 @@ Fragment 1 StreamProject { exprs: [ao.k, ao.s, ',':Varchar, ao.o, ao._row_id] } - └── StreamTableScan { table: ao, columns: [ao.k, ao.o, ao.s, ao._row_id], stream_scan_type: Backfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } + └── StreamTableScan { table: ao, columns: [ao.k, ao.o, ao.s, ao._row_id], stream_scan_type: ArrangementBackfill, pk: [ao._row_id], dist: UpstreamHashShard(ao._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode diff --git a/src/frontend/planner_test/tests/testdata/output/struct_query.yaml b/src/frontend/planner_test/tests/testdata/output/struct_query.yaml index b869a494207f2..44058d741fcdc 100644 --- a/src/frontend/planner_test/tests/testdata/output/struct_query.yaml +++ b/src/frontend/planner_test/tests/testdata/output/struct_query.yaml @@ -7,7 +7,7 @@ └─BatchScan { table: t, columns: [t.country], distribution: SomeShard } stream_plan: |- StreamMaterialize { columns: [country, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck } - └─StreamTableScan { table: t, columns: [t.country, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.country, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } create_source: format: plain encode: protobuf diff --git a/src/frontend/planner_test/tests/testdata/output/subquery.yaml b/src/frontend/planner_test/tests/testdata/output/subquery.yaml index 68f2b0da1fb08..5324806ba2e7e 100644 --- a/src/frontend/planner_test/tests/testdata/output/subquery.yaml +++ b/src/frontend/planner_test/tests/testdata/output/subquery.yaml @@ -324,7 +324,7 @@ │ └─StreamShare { id: 3 } │ └─StreamHopWindow { time_col: auction.date_time, slide: 00:00:01, size: 01:00:00, output: [auction.date_time, window_start, window_end, auction._row_id] } │ └─StreamFilter { predicate: IsNotNull(auction.date_time) } - │ └─StreamTableScan { table: auction, columns: [auction.date_time, auction._row_id], stream_scan_type: Backfill, pk: [auction._row_id], dist: UpstreamHashShard(auction._row_id) } + │ └─StreamTableScan { table: auction, columns: [auction.date_time, auction._row_id], stream_scan_type: ArrangementBackfill, pk: [auction._row_id], dist: UpstreamHashShard(auction._row_id) } └─StreamProject { exprs: [auction.date_time] } └─StreamHashAgg { group_key: [auction.date_time], aggs: [count] } └─StreamProject { exprs: [auction.date_time] } @@ -333,7 +333,7 @@ └─StreamShare { id: 3 } └─StreamHopWindow { time_col: auction.date_time, slide: 00:00:01, size: 01:00:00, output: [auction.date_time, window_start, window_end, auction._row_id] } └─StreamFilter { predicate: IsNotNull(auction.date_time) } - └─StreamTableScan { table: auction, columns: [auction.date_time, auction._row_id], stream_scan_type: Backfill, pk: [auction._row_id], dist: UpstreamHashShard(auction._row_id) } + └─StreamTableScan { table: auction, columns: [auction.date_time, auction._row_id], stream_scan_type: ArrangementBackfill, pk: [auction._row_id], dist: UpstreamHashShard(auction._row_id) } - sql: | CREATE TABLE t (v int); SELECT 1 FROM t AS t_inner WHERE EXISTS ( SELECT 1 HAVING t_inner.v > 1); @@ -519,20 +519,20 @@ └─StreamExchange { dist: HashShard(t.x, t.k) } └─StreamHashJoin { type: LeftOuter, predicate: t.x IS NOT DISTINCT FROM t.x, output: [t.x, t.y, t.k, sum(Unnest($0)), t.x] } ├─StreamExchange { dist: HashShard(t.x) } - │ └─StreamTableScan { table: t, columns: [t.x, t.y, t.k], stream_scan_type: Backfill, pk: [t.k], dist: UpstreamHashShard(t.k) } + │ └─StreamTableScan { table: t, columns: [t.x, t.y, t.k], stream_scan_type: ArrangementBackfill, pk: [t.k], dist: UpstreamHashShard(t.k) } └─StreamProject { exprs: [t.x, sum(Unnest($0))] } └─StreamHashAgg { group_key: [t.x], aggs: [sum(Unnest($0)), count] } └─StreamHashJoin { type: LeftOuter, predicate: t.x IS NOT DISTINCT FROM t.x, output: [t.x, Unnest($0), t.x, projected_row_id] } ├─StreamProject { exprs: [t.x] } │ └─StreamHashAgg { group_key: [t.x], aggs: [count] } │ └─StreamExchange { dist: HashShard(t.x) } - │ └─StreamTableScan { table: t, columns: [t.x, t.k], stream_scan_type: Backfill, pk: [t.k], dist: UpstreamHashShard(t.k) } + │ └─StreamTableScan { table: t, columns: [t.x, t.k], stream_scan_type: ArrangementBackfill, pk: [t.k], dist: UpstreamHashShard(t.k) } └─StreamProject { exprs: [t.x, Unnest($0), projected_row_id] } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.x] } └─StreamHashAgg { group_key: [t.x], aggs: [count] } └─StreamExchange { dist: HashShard(t.x) } - └─StreamTableScan { table: t, columns: [t.x, t.k], stream_scan_type: Backfill, pk: [t.k], dist: UpstreamHashShard(t.k) } + └─StreamTableScan { table: t, columns: [t.x, t.k], stream_scan_type: ArrangementBackfill, pk: [t.k], dist: UpstreamHashShard(t.k) } - name: CorrelatedInputRef in ProjectSet and apply on condition is true. sql: | create table t(x int[], y int[], k int primary key); @@ -617,14 +617,14 @@ └─StreamExchange { dist: HashShard(integers.i, integers._row_id) } └─StreamHashJoin { type: LeftOuter, predicate: integers.i IS NOT DISTINCT FROM integers.i, output: [integers.i, row_number, integers._row_id, integers.i] } ├─StreamExchange { dist: HashShard(integers.i) } - │ └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], stream_scan_type: Backfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + │ └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], stream_scan_type: ArrangementBackfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } └─StreamGroupTopN { order: [integers.i ASC], limit: 1, offset: 0, group_key: [integers.i] } └─StreamProject { exprs: [integers.i, row_number, integers._row_id] } └─StreamOverWindow { window_functions: [row_number() OVER(PARTITION BY integers.i ORDER BY integers.i ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(integers.i) } └─StreamProject { exprs: [integers.i, integers.i, integers._row_id] } └─StreamFilter { predicate: IsNotNull(integers.i) } - └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], stream_scan_type: Backfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], stream_scan_type: ArrangementBackfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - name: test over window subquery 2 (with nested loop join so cannot be transformed into a stream plan) sql: | CREATE TABLE integers(i INTEGER); @@ -676,14 +676,14 @@ └─StreamExchange { dist: HashShard(integers.i, integers._row_id) } └─StreamHashJoin { type: LeftOuter, predicate: integers.i IS NOT DISTINCT FROM integers.i, output: [integers.i, sum, integers._row_id, integers.i] } ├─StreamExchange { dist: HashShard(integers.i) } - │ └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], stream_scan_type: Backfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + │ └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], stream_scan_type: ArrangementBackfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } └─StreamGroupTopN { order: [integers.i ASC], limit: 1, offset: 0, group_key: [integers.i] } └─StreamProject { exprs: [integers.i, sum, integers._row_id] } └─StreamOverWindow { window_functions: [sum(integers.i) OVER(PARTITION BY integers.i ORDER BY integers.i ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(integers.i) } └─StreamProject { exprs: [integers.i, integers.i, integers._row_id] } └─StreamFilter { predicate: IsNotNull(integers.i) } - └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], stream_scan_type: Backfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + └─StreamTableScan { table: integers, columns: [integers.i, integers._row_id], stream_scan_type: ArrangementBackfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } - name: test over window subquery 4 (with nested loop join so cannot be transformed into a stream plan) sql: | CREATE TABLE integers(i INTEGER); @@ -735,14 +735,14 @@ └─StreamHashJoin { type: LeftSemi, predicate: $expr1 = sum AND integers.correlated_col IS NOT DISTINCT FROM rows.correlated_col, output: [integers.i, integers._row_id, $expr1, integers.correlated_col] } ├─StreamExchange { dist: HashShard(integers.correlated_col, $expr1) } │ └─StreamProject { exprs: [integers.i, integers.correlated_col, integers.i::Int64 as $expr1, integers._row_id] } - │ └─StreamTableScan { table: integers, columns: [integers.i, integers.correlated_col, integers._row_id], stream_scan_type: Backfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + │ └─StreamTableScan { table: integers, columns: [integers.i, integers.correlated_col, integers._row_id], stream_scan_type: ArrangementBackfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } └─StreamExchange { dist: HashShard(rows.correlated_col, sum) } └─StreamProject { exprs: [rows.correlated_col, sum, rows._row_id, rows.k] } └─StreamOverWindow { window_functions: [sum(rows.v) OVER(PARTITION BY rows.correlated_col, rows.k ORDER BY rows.v ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] } └─StreamExchange { dist: HashShard(rows.correlated_col, rows.k) } └─StreamProject { exprs: [rows.correlated_col, rows.k, rows.v, rows._row_id] } └─StreamFilter { predicate: IsNotNull(rows.correlated_col) } - └─StreamTableScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col, rows._row_id], stream_scan_type: Backfill, pk: [rows._row_id], dist: UpstreamHashShard(rows._row_id) } + └─StreamTableScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col, rows._row_id], stream_scan_type: ArrangementBackfill, pk: [rows._row_id], dist: UpstreamHashShard(rows._row_id) } - name: test cardinality visitor with correlated filter sql: | CREATE TABLE t1(i INT); @@ -807,18 +807,18 @@ └─StreamHashJoin { type: LeftSemi, predicate: integers.correlated_col IS NOT DISTINCT FROM integers.correlated_col AND 2:Int64 = $expr1, output: [integers.i, integers.correlated_col, integers._row_id, 2:Int64] } ├─StreamExchange { dist: HashShard(integers.correlated_col) } │ └─StreamProject { exprs: [integers.i, integers.correlated_col, 2:Int64, integers._row_id] } - │ └─StreamTableScan { table: integers, columns: [integers.i, integers.correlated_col, integers._row_id], stream_scan_type: Backfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + │ └─StreamTableScan { table: integers, columns: [integers.i, integers.correlated_col, integers._row_id], stream_scan_type: ArrangementBackfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } └─StreamProject { exprs: [integers.correlated_col, (count(distinct rows.k) + count(distinct rows.v)) as $expr1] } └─StreamHashAgg { group_key: [integers.correlated_col], aggs: [count(distinct rows.k), count(distinct rows.v), count] } └─StreamHashJoin { type: LeftOuter, predicate: integers.correlated_col IS NOT DISTINCT FROM rows.correlated_col, output: [integers.correlated_col, rows.k, rows.v, rows._row_id] } ├─StreamProject { exprs: [integers.correlated_col] } │ └─StreamHashAgg { group_key: [integers.correlated_col], aggs: [count] } │ └─StreamExchange { dist: HashShard(integers.correlated_col) } - │ └─StreamTableScan { table: integers, columns: [integers.correlated_col, integers._row_id], stream_scan_type: Backfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } + │ └─StreamTableScan { table: integers, columns: [integers.correlated_col, integers._row_id], stream_scan_type: ArrangementBackfill, pk: [integers._row_id], dist: UpstreamHashShard(integers._row_id) } └─StreamExchange { dist: HashShard(rows.correlated_col) } └─StreamProject { exprs: [rows.correlated_col, rows.k, rows.v, rows._row_id] } └─StreamFilter { predicate: IsNotNull(rows.correlated_col) } - └─StreamTableScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col, rows._row_id], stream_scan_type: Backfill, pk: [rows._row_id], dist: UpstreamHashShard(rows._row_id) } + └─StreamTableScan { table: rows, columns: [rows.k, rows.v, rows.correlated_col, rows._row_id], stream_scan_type: ArrangementBackfill, pk: [rows._row_id], dist: UpstreamHashShard(rows._row_id) } - name: test hop window subquery 1 sql: | create table t1 (k int primary key, ts timestamp); @@ -843,7 +843,7 @@ └─StreamExchange { dist: HashShard(t1.k) } └─StreamHopWindow { time_col: t1.ts, slide: 00:10:00, size: 00:30:00, output: all } └─StreamFilter { predicate: IsNotNull(t1.ts) } - └─StreamTableScan { table: t1, columns: [t1.k, t1.ts], stream_scan_type: Backfill, pk: [t1.k], dist: UpstreamHashShard(t1.k) } + └─StreamTableScan { table: t1, columns: [t1.k, t1.ts], stream_scan_type: ArrangementBackfill, pk: [t1.k], dist: UpstreamHashShard(t1.k) } - name: Only table-in-out functions can have subquery parameters. sql: | SELECT * FROM generate_series(1, (select 1)); diff --git a/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml b/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml index 2b3eb4b33541e..7600a68b6dd27 100644 --- a/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml +++ b/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml @@ -734,13 +734,13 @@ └─StreamExchange { dist: HashShard(t1.x, t1._row_id) } └─StreamHashJoin { type: LeftSemi, predicate: t1.x IS NOT DISTINCT FROM t2.x, output: all } ├─StreamExchange { dist: HashShard(t1.x) } - │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamProject { exprs: [t2.x] } └─StreamGroupTopN { order: [t2.x ASC], limit: 1, offset: 0, group_key: [t2.x] } └─StreamExchange { dist: HashShard(t2.x) } └─StreamProject { exprs: [t2.x, t2.x, t2._row_id] } └─StreamFilter { predicate: IsNotNull(t2.x) } - └─StreamTableScan { table: t2, columns: [t2.x, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.x, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - sql: | create table t1(x int, y int); create table t2(x int, y int); @@ -901,12 +901,12 @@ └─StreamExchange { dist: HashShard(t1.v1, t1.k1, t1._row_id) } └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2 AND t1.k1 IS NOT DISTINCT FROM t2.k2, output: all } ├─StreamExchange { dist: HashShard(t1.k1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamGroupTopN { order: [t2.v2 ASC], limit: 1, offset: 0, group_key: [t2.k2] } └─StreamExchange { dist: HashShard(t2.k2) } └─StreamProject { exprs: [t2.k2, t2.v2, t2._row_id] } └─StreamFilter { predicate: IsNotNull(t2.k2) } - └─StreamTableScan { table: t2, columns: [t2.v2, t2.k2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2.k2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: test ApplyTopNTransposeRule case 2 sql: | create table t1 (v1 int, k1 int); @@ -927,14 +927,14 @@ └─StreamExchange { dist: HashShard(t1.v1, t1._row_id) } └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2, output: all } ├─StreamExchange { dist: HashShard(t1.v1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.v2) } └─StreamProject { exprs: [t2.v2, t2._row_id] } └─StreamTopN { order: [t2.v2 ASC], limit: 1, offset: 0 } └─StreamExchange { dist: Single } └─StreamGroupTopN { order: [t2.v2 ASC], limit: 1, offset: 0, group_key: [$expr1] } └─StreamProject { exprs: [t2.v2, t2._row_id, Vnode(t2._row_id) as $expr1] } - └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: test ApplyLimitTransposeRule case 1 sql: | create table t1 (v1 int, k1 int); @@ -955,12 +955,12 @@ └─StreamExchange { dist: HashShard(t1.v1, t1.k1, t1._row_id) } └─StreamHashJoin { type: LeftSemi, predicate: t1.v1 = t2.v2 AND t1.k1 IS NOT DISTINCT FROM t2.k2, output: all } ├─StreamExchange { dist: HashShard(t1.k1) } - │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.v1, t1.k1, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamGroupTopN { order: [t2.k2 ASC], limit: 1, offset: 0, group_key: [t2.k2] } └─StreamExchange { dist: HashShard(t2.k2) } └─StreamProject { exprs: [t2.k2, t2.v2, t2._row_id] } └─StreamFilter { predicate: IsNotNull(t2.k2) } - └─StreamTableScan { table: t2, columns: [t2.v2, t2.k2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.v2, t2.k2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: test correlated input ref predicate and share operator sql: | create table t (a int, b int, c int); @@ -1004,11 +1004,11 @@ └─StreamExchange { dist: HashShard(t1._row_id, t1.b) } └─StreamHashJoin { type: LeftOuter, predicate: t1.b = t2.d, output: [$expr1, t1._row_id, t1.b, t2.d] } ├─StreamExchange { dist: HashShard(t1.b) } - │ └─StreamTableScan { table: t1, columns: [t1.b, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.b, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamProject { exprs: [Coalesce(array_agg(t2.c), ARRAY[]:List(Int32)) as $expr1, t2.d] } └─StreamHashAgg { group_key: [t2.d], aggs: [array_agg(t2.c), count] } └─StreamExchange { dist: HashShard(t2.d) } - └─StreamTableScan { table: t2, columns: [t2.c, t2.d, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.c, t2.d, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: correlated array subquery \du sql: | SELECT r.rolname, r.rolsuper, r.rolinherit, @@ -1092,18 +1092,18 @@ └─StreamHashJoin { type: Inner, predicate: t.b IS NOT DISTINCT FROM t.b, output: all } ├─StreamExchange { dist: HashShard(t.b) } │ └─StreamProject { exprs: [t.a, t.b, t.a::Int64 as $expr1, t._row_id] } - │ └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProject { exprs: [t.b, count(1:Int32)] } └─StreamHashAgg { group_key: [t.b], aggs: [count(1:Int32), count] } └─StreamHashJoin { type: LeftOuter, predicate: t.b IS NOT DISTINCT FROM t2.d, output: [t.b, 1:Int32, t2._row_id] } ├─StreamProject { exprs: [t.b] } │ └─StreamHashAgg { group_key: [t.b], aggs: [count] } │ └─StreamExchange { dist: HashShard(t.b) } - │ └─StreamTableScan { table: t, columns: [t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange { dist: HashShard(t2.d) } └─StreamProject { exprs: [t2.d, 1:Int32, t2._row_id] } └─StreamFilter { predicate: IsNotNull(t2.d) } - └─StreamTableScan { table: t2, columns: [t2.d, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.d, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: a case could be optimized by PullUpCorrelatedPredicateAggRule sql: | CREATE TABLE T (A INT, B INT); @@ -1127,8 +1127,8 @@ └─StreamHashJoin { type: Inner, predicate: t.b = t2.d, output: all } ├─StreamExchange { dist: HashShard(t.b) } │ └─StreamProject { exprs: [t.a, t.b, t.a::Decimal as $expr1, t._row_id] } - │ └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.a, t.b, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProject { exprs: [(sum(t2.c)::Decimal / count(t2.c)::Decimal) as $expr2, t2.d] } └─StreamHashAgg { group_key: [t2.d], aggs: [sum(t2.c), count(t2.c), count] } └─StreamExchange { dist: HashShard(t2.d) } - └─StreamTableScan { table: t2, columns: [t2.c, t2.d, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.c, t2.d, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/temporal_filter.yaml b/src/frontend/planner_test/tests/testdata/output/temporal_filter.yaml index 160f34103faf7..3ade534c505be 100644 --- a/src/frontend/planner_test/tests/testdata/output/temporal_filter.yaml +++ b/src/frontend/planner_test/tests/testdata/output/temporal_filter.yaml @@ -8,7 +8,7 @@ └─StreamProject { exprs: [t1.ts, t1._row_id] } └─StreamDynamicFilter { predicate: ($expr1 > now), output_watermarks: [$expr1], output: [t1.ts, $expr1, t1._row_id], cleaned_by_watermark: true } ├─StreamProject { exprs: [t1.ts, AddWithTimeZone(t1.ts, '01:00:00':Interval, 'UTC':Varchar) as $expr1, t1._row_id] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamNow { output: [now] } - name: Temporal filter works on complex columns on LHS (part 2) @@ -20,7 +20,7 @@ └─StreamProject { exprs: [t1.ts, t1.time_to_live, t1._row_id] } └─StreamDynamicFilter { predicate: ($expr1 > now), output_watermarks: [$expr1], output: [t1.ts, t1.time_to_live, $expr1, t1._row_id], cleaned_by_watermark: true } ├─StreamProject { exprs: [t1.ts, t1.time_to_live, AddWithTimeZone(t1.ts, (t1.time_to_live * 1.5:Decimal), 'UTC':Varchar) as $expr1, t1._row_id] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.time_to_live, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.time_to_live, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamNow { output: [now] } - name: Temporal filter works on complex columns on LHS (part 2, flipped) @@ -32,7 +32,7 @@ └─StreamProject { exprs: [t1.ts, t1.additional_time_to_live, t1._row_id] } └─StreamDynamicFilter { predicate: ($expr1 > $expr2), output_watermarks: [$expr1], output: [t1.ts, t1.additional_time_to_live, $expr1, t1._row_id], cleaned_by_watermark: true } ├─StreamProject { exprs: [t1.ts, t1.additional_time_to_live, AddWithTimeZone(t1.ts, (t1.additional_time_to_live * 1.5:Decimal), 'UTC':Varchar) as $expr1, t1._row_id] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.additional_time_to_live, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.additional_time_to_live, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [SubtractWithTimeZone(now, '00:15:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } └─StreamNow { output: [now] } @@ -43,7 +43,7 @@ stream_plan: |- StreamMaterialize { columns: [ts, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } └─StreamDynamicFilter { predicate: (t1.ts < $expr1), output: [t1.ts, t1._row_id], condition_always_relax: true } - ├─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [SubtractWithTimeZone(now, '00:15:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } └─StreamNow { output: [now] } @@ -53,7 +53,7 @@ ├── tables: [ Materialize: 4294967294 ] └── StreamDynamicFilter { predicate: (t1.ts < $expr1), output: [t1.ts, t1._row_id], condition_always_relax: true } ├── tables: [ DynamicFilterLeftNotSatisfy: 0, DynamicFilterRight: 1 ] - ├── StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├── StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ ├── tables: [ StreamScan: 2 ] │ ├── Upstream │ └── BatchPlanNode @@ -63,12 +63,7 @@ StreamProject { exprs: [SubtractWithTimeZone(now, '00:15:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } └── StreamNow { output: [now] } { tables: [ Now: 3 ] } - Table 0 - ├── columns: [ t1_ts, t1__row_id ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ t1_ts, t1__row_id ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } Table 1 { columns: [ $expr1 ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } @@ -82,12 +77,7 @@ Table 3 { columns: [ now ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } - Table 4294967294 - ├── columns: [ ts, t1._row_id ] - ├── primary key: [ $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ ts, t1._row_id ], primary key: [ $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - name: Temporal filter with equal condition sql: |- @@ -99,7 +89,7 @@ └─StreamHashJoin { type: LeftSemi, predicate: $expr1 = $expr2, output: [t1.ts, t1._row_id, $expr1] } ├─StreamExchange { dist: HashShard($expr1) } │ └─StreamProject { exprs: [t1.ts, DateTrunc('week':Varchar, t1.ts, 'UTC':Varchar) as $expr1, t1._row_id] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard($expr2) } └─StreamProject { exprs: [DateTrunc('week':Varchar, now, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } └─StreamNow { output: [now] } @@ -117,7 +107,7 @@ Fragment 2 StreamProject { exprs: [t1.ts, DateTrunc('week':Varchar, t1.ts, 'UTC':Varchar) as $expr1, t1._row_id] } - └── StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └── StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode @@ -163,7 +153,7 @@ stream_plan: |- StreamMaterialize { columns: [ts, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck } └─StreamDynamicFilter [append_only] { predicate: (t1.ts < $expr1), output: [t1.ts, t1._row_id], condition_always_relax: true } - ├─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [SubtractWithTimeZone(now, '00:15:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } └─StreamNow { output: [now] } @@ -173,7 +163,7 @@ ├── tables: [ Materialize: 4294967294 ] └── StreamDynamicFilter [append_only] { predicate: (t1.ts < $expr1), output: [t1.ts, t1._row_id], condition_always_relax: true } ├── tables: [ DynamicFilterLeftNotSatisfy: 0, DynamicFilterRight: 1 ] - ├── StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + ├── StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ ├── tables: [ StreamScan: 2 ] │ ├── Upstream │ └── BatchPlanNode @@ -183,12 +173,7 @@ StreamProject { exprs: [SubtractWithTimeZone(now, '00:15:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } └── StreamNow { output: [now] } { tables: [ Now: 3 ] } - Table 0 - ├── columns: [ t1_ts, t1__row_id ] - ├── primary key: [ $0 ASC, $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 0 { columns: [ t1_ts, t1__row_id ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } Table 1 { columns: [ $expr1 ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } @@ -202,12 +187,7 @@ Table 3 { columns: [ now ], primary key: [], value indices: [ 0 ], distribution key: [], read pk prefix len hint: 0 } - Table 4294967294 - ├── columns: [ ts, t1._row_id ] - ├── primary key: [ $1 ASC ] - ├── value indices: [ 0, 1 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 4294967294 { columns: [ ts, t1._row_id ], primary key: [ $1 ASC ], value indices: [ 0, 1 ], distribution key: [ 1 ], read pk prefix len hint: 1 } - name: Temporal filter reorders now expressions correctly sql: | @@ -217,7 +197,7 @@ StreamMaterialize { columns: [ts, t1._row_id(hidden)], stream_key: [t1._row_id], pk_columns: [t1._row_id], pk_conflict: NoCheck, watermark_columns: [ts] } └─StreamDynamicFilter { predicate: (t1.ts >= $expr2), output_watermarks: [t1.ts], output: [t1.ts, t1._row_id], cleaned_by_watermark: true } ├─StreamDynamicFilter { predicate: (t1.ts < $expr1), output: [t1.ts, t1._row_id], condition_always_relax: true } - │ ├─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ ├─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } │ └─StreamNow { output: [now] } @@ -232,7 +212,7 @@ ├── tables: [ DynamicFilterLeft: 0, DynamicFilterRight: 1 ] ├── StreamDynamicFilter { predicate: (t1.ts < $expr1), output: [t1.ts, t1._row_id], condition_always_relax: true } │ ├── tables: [ DynamicFilterLeftNotSatisfy: 2, DynamicFilterRight: 3 ] - │ ├── StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ ├── StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ │ ├── tables: [ StreamScan: 4 ] │ │ ├── Upstream │ │ └── BatchPlanNode @@ -281,7 +261,7 @@ ├─StreamExchange { dist: HashShard(t1.a) } │ └─StreamDynamicFilter { predicate: (t1.ta >= $expr2), output_watermarks: [t1.ta], output: [t1.a, t1.ta, t1._row_id], cleaned_by_watermark: true } │ ├─StreamDynamicFilter { predicate: (t1.ta < $expr1), output: [t1.a, t1.ta, t1._row_id], condition_always_relax: true } - │ │ ├─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ ├─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ │ └─StreamExchange { dist: Broadcast } │ │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } │ │ └─StreamNow { output: [now] } @@ -289,7 +269,7 @@ │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '02:00:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } │ └─StreamNow { output: [now] } └─StreamExchange { dist: HashShard(t2.b) } - └─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: Temporal filter in on clause for left join's left side sql: | create table t1 (a int, ta timestamp with time zone); @@ -308,11 +288,11 @@ └─StreamExchange { dist: HashShard(t2.b, t2._row_id, t1._row_id) } └─StreamHashJoin { type: LeftOuter, predicate: t2.b = t1.a, output: [t1.a, t1.ta, t2.b, t2.tb, t2._row_id, t1._row_id] } ├─StreamExchange { dist: HashShard(t2.b) } - │ └─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } └─StreamExchange { dist: HashShard(t1.a) } └─StreamDynamicFilter { predicate: (t1.ta >= $expr2), output_watermarks: [t1.ta], output: [t1.a, t1.ta, t1._row_id], cleaned_by_watermark: true } ├─StreamDynamicFilter { predicate: (t1.ta < $expr1), output: [t1.a, t1.ta, t1._row_id], condition_always_relax: true } - │ ├─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ ├─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } │ └─StreamNow { output: [now] } @@ -337,11 +317,11 @@ └─StreamExchange { dist: HashShard(t1.a, t1._row_id, t2._row_id) } └─StreamHashJoin { type: LeftOuter, predicate: t1.a = t2.b, output: [t1.a, t1.ta, t2.b, t2.tb, t1._row_id, t2._row_id] } ├─StreamExchange { dist: HashShard(t1.a) } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.ta, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.b) } └─StreamDynamicFilter { predicate: (t2.tb >= $expr2), output_watermarks: [t2.tb], output: [t2.b, t2.tb, t2._row_id], cleaned_by_watermark: true } ├─StreamDynamicFilter { predicate: (t2.tb < $expr1), output: [t2.b, t2.tb, t2._row_id], condition_always_relax: true } - │ ├─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ ├─StreamTableScan { table: t2, columns: [t2.b, t2.tb, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } │ └─StreamNow { output: [now] } @@ -367,7 +347,7 @@ └─StreamDynamicFilter { predicate: (stream.v1 > now), output_watermarks: [stream.v1], output: [stream.id1, stream.a1, stream.v1, version.id2, stream._row_id], cleaned_by_watermark: true } ├─StreamTemporalJoin { type: LeftOuter, predicate: stream.id1 = version.id2, output: [stream.id1, stream.a1, stream.v1, version.id2, stream._row_id] } │ ├─StreamExchange { dist: HashShard(stream.id1) } - │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.v1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.v1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } │ └─StreamTableScan { table: version, columns: [version.id2], stream_scan_type: UpstreamOnly, pk: [version.id2], dist: UpstreamHashShard(version.id2) } └─StreamExchange { dist: Broadcast } @@ -386,7 +366,7 @@ │ │ └─StreamFilter { predicate: Not((t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) } │ │ └─StreamShare { id: 2 } │ │ └─StreamFilter { predicate: (Not((t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) OR (t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) } - │ │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamNow { output: [now] } └─StreamExchange { dist: HashShard(t1._row_id, 1:Int32) } @@ -394,7 +374,7 @@ └─StreamFilter { predicate: (t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz) } └─StreamShare { id: 2 } └─StreamFilter { predicate: (Not((t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) OR (t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) } - └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - name: Temporal filter with or is null sql: | create table t1 (ts timestamp with time zone); @@ -409,7 +389,7 @@ │ │ └─StreamFilter { predicate: Not(IsNull(t1.ts)) } │ │ └─StreamShare { id: 2 } │ │ └─StreamFilter { predicate: (Not(IsNull(t1.ts)) OR IsNull(t1.ts)) } - │ │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamNow { output: [now] } └─StreamExchange { dist: HashShard(t1._row_id, 1:Int32) } @@ -417,7 +397,7 @@ └─StreamFilter { predicate: IsNull(t1.ts) } └─StreamShare { id: 2 } └─StreamFilter { predicate: (Not(IsNull(t1.ts)) OR IsNull(t1.ts)) } - └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - name: Temporal filter with or predicate sql: | create table t1 (ts timestamp with time zone); @@ -432,7 +412,7 @@ │ │ └─StreamFilter { predicate: Not((t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) AND Not(IsNull(t1.ts)) } │ │ └─StreamShare { id: 2 } │ │ └─StreamFilter { predicate: (((Not((t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) AND Not(IsNull(t1.ts))) OR (t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) OR IsNull(t1.ts)) } - │ │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ │ └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamNow { output: [now] } └─StreamExchange { dist: HashShard(t1._row_id, 1:Int32) } @@ -440,7 +420,7 @@ └─StreamFilter { predicate: ((t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz) OR IsNull(t1.ts)) } └─StreamShare { id: 2 } └─StreamFilter { predicate: (((Not((t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) AND Not(IsNull(t1.ts))) OR (t1.ts > '2023-12-18 00:00:00+00:00':Timestamptz)) OR IsNull(t1.ts)) } - └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.ts, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - name: Many Temporal filter with or predicate sql: | create table t (t timestamp with time zone, a int); @@ -460,7 +440,7 @@ │ │ │ ├─StreamFilter { predicate: (Not((t.a > 1:Int32)) OR (t.a > 1:Int32)) AND Not(IsNull(t.t)) AND Not((t.a < 1:Int32)) } │ │ │ │ └─StreamShare { id: 2 } │ │ │ │ └─StreamFilter { predicate: (Not((t.a > 1:Int32)) OR (t.a > 1:Int32)) AND (((Not(IsNull(t.t)) AND Not((t.a < 1:Int32))) OR IsNull(t.t)) OR (t.a < 1:Int32)) } - │ │ │ │ └─StreamTableScan { table: t, columns: [t.t, t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ │ │ └─StreamTableScan { table: t, columns: [t.t, t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ │ │ └─StreamExchange { dist: Broadcast } │ │ │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } │ │ │ └─StreamNow { output: [now] } @@ -469,7 +449,7 @@ │ │ └─StreamFilter { predicate: (Not((t.a > 1:Int32)) OR (t.a > 1:Int32)) AND (IsNull(t.t) OR (t.a < 1:Int32)) } │ │ └─StreamShare { id: 2 } │ │ └─StreamFilter { predicate: (Not((t.a > 1:Int32)) OR (t.a > 1:Int32)) AND (((Not(IsNull(t.t)) AND Not((t.a < 1:Int32))) OR IsNull(t.t)) OR (t.a < 1:Int32)) } - │ │ └─StreamTableScan { table: t, columns: [t.t, t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ └─StreamTableScan { table: t, columns: [t.t, t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr2], output_watermarks: [$expr2] } │ └─StreamNow { output: [now] } @@ -484,7 +464,7 @@ │ ├─StreamFilter { predicate: (Not((t.a > 1:Int32)) OR (t.a > 1:Int32)) AND Not(IsNull(t.t)) AND Not((t.a < 1:Int32)) } │ │ └─StreamShare { id: 2 } │ │ └─StreamFilter { predicate: (Not((t.a > 1:Int32)) OR (t.a > 1:Int32)) AND (((Not(IsNull(t.t)) AND Not((t.a < 1:Int32))) OR IsNull(t.t)) OR (t.a < 1:Int32)) } - │ │ └─StreamTableScan { table: t, columns: [t.t, t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ └─StreamTableScan { table: t, columns: [t.t, t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ └─StreamExchange { dist: Broadcast } │ └─StreamProject { exprs: [SubtractWithTimeZone(now, '01:00:00':Interval, 'UTC':Varchar) as $expr1], output_watermarks: [$expr1] } │ └─StreamNow { output: [now] } @@ -493,4 +473,4 @@ └─StreamFilter { predicate: (Not((t.a > 1:Int32)) OR (t.a > 1:Int32)) AND (IsNull(t.t) OR (t.a < 1:Int32)) } └─StreamShare { id: 2 } └─StreamFilter { predicate: (Not((t.a > 1:Int32)) OR (t.a > 1:Int32)) AND (((Not(IsNull(t.t)) AND Not((t.a < 1:Int32))) OR IsNull(t.t)) OR (t.a < 1:Int32)) } - └─StreamTableScan { table: t, columns: [t.t, t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.t, t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } diff --git a/src/frontend/planner_test/tests/testdata/output/temporal_join.yaml b/src/frontend/planner_test/tests/testdata/output/temporal_join.yaml index 0a8845ecfbe42..ff481af78664d 100644 --- a/src/frontend/planner_test/tests/testdata/output/temporal_join.yaml +++ b/src/frontend/planner_test/tests/testdata/output/temporal_join.yaml @@ -9,7 +9,7 @@ └─StreamExchange { dist: HashShard(stream.id1, stream._row_id) } └─StreamTemporalJoin { type: LeftOuter, predicate: stream.id1 = version.id2, output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } └─StreamTableScan { table: version, columns: [version.id2, version.a2], stream_scan_type: UpstreamOnly, pk: [version.id2], dist: UpstreamHashShard(version.id2) } batch_error: |- @@ -25,7 +25,7 @@ └─StreamExchange { dist: HashShard(stream.id1, stream._row_id) } └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND (version.a2 < 10:Int32), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } └─StreamTableScan { table: version, columns: [version.id2, version.a2], stream_scan_type: UpstreamOnly, pk: [version.id2], dist: UpstreamHashShard(version.id2) } - name: implicit join with temporal tables @@ -38,7 +38,7 @@ └─StreamExchange { dist: HashShard(stream.id1, stream._row_id) } └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND (version.a2 < 10:Int32), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } └─StreamTableScan { table: version, columns: [version.id2, version.a2], stream_scan_type: UpstreamOnly, pk: [version.id2], dist: UpstreamHashShard(version.id2) } - name: Multi join key for temporal join @@ -51,7 +51,7 @@ └─StreamExchange { dist: HashShard(stream.id1, stream.a1, stream._row_id) } └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND stream.a1 = version.a2 AND (version.b2 <> version.a2), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id] } ├─StreamExchange { dist: HashShard(stream.id1, stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2, version.a2) } └─StreamTableScan { table: version, columns: [version.id2, version.a2, version.b2], stream_scan_type: UpstreamOnly, pk: [version.id2, version.a2], dist: UpstreamHashShard(version.id2, version.a2) } - name: Temporal join with Aggregation @@ -67,7 +67,7 @@ └─StreamStatelessSimpleAgg { aggs: [count] } └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version.id2 AND (version.a2 < 10:Int32), output: [stream._row_id, stream.id1, version.id2] } ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } └─StreamTableScan { table: version, columns: [version.id2, version.a2], stream_scan_type: UpstreamOnly, pk: [version.id2], dist: UpstreamHashShard(version.id2) } - name: Temporal join join keys requirement test @@ -111,7 +111,7 @@ │ └─StreamTemporalJoin { type: Inner, predicate: stream.k = version1.k, output: [stream.k, stream.a1, stream.b1, version1.x1, stream._row_id, version1.k] } │ ├─StreamExchange { dist: HashShard(stream.k) } │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } - │ │ └─StreamTableScan { table: stream, columns: [stream.k, stream.a1, stream.b1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ │ └─StreamTableScan { table: stream, columns: [stream.k, stream.a1, stream.b1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.k) } │ └─StreamTableScan { table: version1, columns: [version1.k, version1.x1], stream_scan_type: UpstreamOnly, pk: [version1.k], dist: UpstreamHashShard(version1.k) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.k) } @@ -133,7 +133,7 @@ │ └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version1.id1, output: [stream.id1, stream.id2, stream.a1, stream.b1, version1.x1, stream._row_id, version1.id1] } │ ├─StreamExchange { dist: HashShard(stream.id1) } │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } - │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.id2, stream.a1, stream.b1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.id2, stream.a1, stream.b1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.id1) } │ └─StreamTableScan { table: version1, columns: [version1.id1, version1.x1], stream_scan_type: UpstreamOnly, pk: [version1.id1], dist: UpstreamHashShard(version1.id1) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.id2) } @@ -155,7 +155,7 @@ │ └─StreamTemporalJoin { type: Inner, predicate: stream.id1 = version1.id1, output: [stream.id1, stream.id2, stream.a1, stream.b1, version1.x1, stream._row_id, version1.id1] } │ ├─StreamExchange { dist: HashShard(stream.id1) } │ │ └─StreamFilter { predicate: (stream.a1 < 10:Int32) } - │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.id2, stream.a1, stream.b1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.id2, stream.a1, stream.b1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } │ └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version1.id1) } │ └─StreamTableScan { table: version1, columns: [version1.id1, version1.x1], stream_scan_type: UpstreamOnly, pk: [version1.id1], dist: UpstreamHashShard(version1.id1) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version2.id2) } @@ -171,7 +171,7 @@ └─StreamExchange { dist: HashShard(stream.a1, idx2.id2, stream._row_id, stream.b1) } └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } ├─StreamExchange { dist: HashShard(stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } └─StreamTableScan { table: idx2, columns: [idx2.a2, idx2.b2, idx2.id2], stream_scan_type: UpstreamOnly, pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } - name: temporal join with an index (distribution key size = 2) @@ -185,7 +185,7 @@ └─StreamExchange { dist: HashShard(stream.a1, idx2.id2, stream._row_id, stream.b1) } └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } ├─StreamExchange { dist: HashShard(stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } └─StreamTableScan { table: idx2, columns: [idx2.a2, idx2.b2, idx2.id2], stream_scan_type: UpstreamOnly, pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } - name: temporal join with an index (index column size = 1) @@ -199,7 +199,7 @@ └─StreamExchange { dist: HashShard(stream.a1, idx2.id2, stream._row_id, stream.b1) } └─StreamTemporalJoin { type: LeftOuter, predicate: stream.b1 = idx2.b2 AND (stream.a1 = idx2.a2), output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } ├─StreamExchange { dist: HashShard(stream.b1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.b2) } └─StreamTableScan { table: idx2, columns: [idx2.b2, idx2.id2, idx2.a2], stream_scan_type: UpstreamOnly, pk: [idx2.id2], dist: UpstreamHashShard(idx2.b2) } - name: temporal join with singleton table @@ -212,7 +212,7 @@ └─StreamTemporalJoin { type: LeftOuter, predicate: AND ($expr1 = v.count), output: [t.a, v.count, t._row_id, $expr1] } ├─StreamExchange { dist: Single } │ └─StreamProject { exprs: [t.a, t.a::Int64 as $expr1, t._row_id] } - │ └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.a, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamExchange [no_shuffle] { dist: Single } └─StreamTableScan { table: v, columns: [v.count], stream_scan_type: UpstreamOnly, pk: [], dist: Single } - name: index selection for temporal join (with one index). @@ -226,7 +226,7 @@ └─StreamExchange { dist: HashShard(stream.a1, idx.id2, stream._row_id, stream.b1) } └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx.a2 AND stream.b1 = idx.b2, output: [stream.id1, stream.a1, idx.id2, idx.a2, stream._row_id, stream.b1] } ├─StreamExchange { dist: HashShard(stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx.a2) } └─StreamTableScan { table: idx, columns: [idx.id2, idx.a2, idx.b2], stream_scan_type: UpstreamOnly, pk: [idx.id2], dist: UpstreamHashShard(idx.a2) } - name: index selection for temporal join (with two indexes) and should choose the index with a longer prefix.. @@ -241,7 +241,7 @@ └─StreamExchange { dist: HashShard(stream.a1, idx2.id2, stream._row_id, stream.b1) } └─StreamTemporalJoin { type: LeftOuter, predicate: stream.a1 = idx2.a2 AND stream.b1 = idx2.b2, output: [stream.id1, stream.a1, idx2.id2, idx2.a2, stream._row_id, stream.b1] } ├─StreamExchange { dist: HashShard(stream.a1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(idx2.a2) } └─StreamTableScan { table: idx2, columns: [idx2.id2, idx2.a2, idx2.b2], stream_scan_type: UpstreamOnly, pk: [idx2.id2], dist: UpstreamHashShard(idx2.a2) } - name: index selection for temporal join (with three indexes) and should choose primary table. @@ -257,7 +257,7 @@ └─StreamExchange { dist: HashShard(stream.id1, stream.a1, stream._row_id, stream.b1, stream.c1) } └─StreamTemporalJoin { type: LeftOuter, predicate: stream.id1 = version.id2 AND (stream.a1 = version.a2) AND (stream.b1 = version.b2) AND (stream.c1 = version.c2), output: [stream.id1, stream.a1, version.id2, version.a2, stream._row_id, stream.b1, stream.c1] } ├─StreamExchange { dist: HashShard(stream.id1) } - │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream.c1, stream._row_id], stream_scan_type: Backfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } + │ └─StreamTableScan { table: stream, columns: [stream.id1, stream.a1, stream.b1, stream.c1, stream._row_id], stream_scan_type: ArrangementBackfill, pk: [stream._row_id], dist: UpstreamHashShard(stream._row_id) } └─StreamExchange [no_shuffle] { dist: UpstreamHashShard(version.id2) } └─StreamTableScan { table: version, columns: [version.id2, version.a2, version.b2, version.c2], stream_scan_type: UpstreamOnly, pk: [version.id2], dist: UpstreamHashShard(version.id2) } - name: index selection for temporal join (two index) and no one matches. diff --git a/src/frontend/planner_test/tests/testdata/output/time_window.yaml b/src/frontend/planner_test/tests/testdata/output/time_window.yaml index 69422637c0375..55e42e015d7f9 100644 --- a/src/frontend/planner_test/tests/testdata/output/time_window.yaml +++ b/src/frontend/planner_test/tests/testdata/output/time_window.yaml @@ -70,7 +70,7 @@ StreamMaterialize { columns: [id, created_at, window_start, window_end, t1._row_id(hidden)], stream_key: [t1._row_id, window_start, window_end], pk_columns: [t1._row_id, window_start, window_end], pk_conflict: NoCheck } └─StreamHopWindow { time_col: t1.created_at, slide: 1 day, size: 3 days, output: [t1.id, t1.created_at, window_start, window_end, t1._row_id] } └─StreamFilter { predicate: IsNotNull(t1.created_at) } - └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (id int, created_at date); select id, created_at, window_start from hop(t1, created_at, interval '1' day, interval '3' day); @@ -83,7 +83,7 @@ StreamMaterialize { columns: [id, created_at, window_start, t1._row_id(hidden)], stream_key: [t1._row_id, window_start], pk_columns: [t1._row_id, window_start], pk_conflict: NoCheck } └─StreamHopWindow { time_col: t1.created_at, slide: 1 day, size: 3 days, output: [t1.id, t1.created_at, window_start, t1._row_id] } └─StreamFilter { predicate: IsNotNull(t1.created_at) } - └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (id int, created_at date); select id, created_at, window_end from hop(t1, created_at, interval '1' day, interval '3' day); @@ -96,7 +96,7 @@ StreamMaterialize { columns: [id, created_at, window_end, t1._row_id(hidden)], stream_key: [t1._row_id, window_end], pk_columns: [t1._row_id, window_end], pk_conflict: NoCheck } └─StreamHopWindow { time_col: t1.created_at, slide: 1 day, size: 3 days, output: [t1.id, t1.created_at, window_end, t1._row_id] } └─StreamFilter { predicate: IsNotNull(t1.created_at) } - └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (id int, created_at date); select id, created_at from hop(t1, created_at, interval '1' day, interval '3' day); @@ -114,7 +114,7 @@ StreamMaterialize { columns: [id, created_at, window_start(hidden), t1._row_id(hidden)], stream_key: [t1._row_id, window_start], pk_columns: [t1._row_id, window_start], pk_conflict: NoCheck } └─StreamHopWindow { time_col: t1.created_at, slide: 1 day, size: 3 days, output: [t1.id, t1.created_at, window_start, t1._row_id] } └─StreamFilter { predicate: IsNotNull(t1.created_at) } - └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (id int, created_at date); select t_hop.id, t_hop.created_at from hop(t1, created_at, interval '1' day, interval '3' day) as t_hop; @@ -132,7 +132,7 @@ StreamMaterialize { columns: [id, created_at, window_start(hidden), t1._row_id(hidden)], stream_key: [t1._row_id, window_start], pk_columns: [t1._row_id, window_start], pk_conflict: NoCheck } └─StreamHopWindow { time_col: t1.created_at, slide: 1 day, size: 3 days, output: [t1.id, t1.created_at, window_start, t1._row_id] } └─StreamFilter { predicate: IsNotNull(t1.created_at) } - └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.id, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t (v1 varchar, v2 timestamp, v3 float); select v1, window_end, avg(v3) as avg from hop( t, v2, interval '1' minute, interval '10' minute) group by v1, window_end; @@ -158,7 +158,7 @@ └─StreamExchange { dist: HashShard(t.v1, window_end) } └─StreamHopWindow { time_col: t.v2, slide: 00:01:00, size: 00:10:00, output: [t.v1, t.v3, window_end, t._row_id] } └─StreamFilter { predicate: IsNotNull(t.v2) } - └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.v1, t.v2, t.v3, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t1 (id int, v1 int, created_at date); with t2 as (select * from t1 where v1 >= 10) @@ -181,7 +181,7 @@ └─StreamProject { exprs: [t1.id, t1.v1, t1.created_at, $expr1, ($expr1 + '3 days':Interval) as $expr2, t1._row_id] } └─StreamProject { exprs: [t1.id, t1.v1, t1.created_at, TumbleStart(t1.created_at, '3 days':Interval) as $expr1, t1._row_id] } └─StreamFilter { predicate: (t1.v1 >= 10:Int32) } - └─StreamTableScan { table: t1, columns: [t1.id, t1.v1, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.id, t1.v1, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | create table t1 (id int, v1 int, created_at date); with t2 as (select * from t1 where v1 >= 10) @@ -203,7 +203,7 @@ StreamMaterialize { columns: [id, v1, created_at, window_start, window_end, t1._row_id(hidden)], stream_key: [t1._row_id, window_start, window_end], pk_columns: [t1._row_id, window_start, window_end], pk_conflict: NoCheck } └─StreamHopWindow { time_col: t1.created_at, slide: 1 day, size: 3 days, output: [t1.id, t1.v1, t1.created_at, window_start, window_end, t1._row_id] } └─StreamFilter { predicate: IsNotNull(t1.created_at) AND (t1.v1 >= 10:Int32) } - └─StreamTableScan { table: t1, columns: [t1.id, t1.v1, t1.created_at, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamTableScan { table: t1, columns: [t1.id, t1.v1, t1.created_at, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } - sql: | with t(ts) as (values ('2020-01-01 12:00:00'::timestamp)) select * from tumble(t, ts, interval '10' second) as z; logical_plan: |- diff --git a/src/frontend/planner_test/tests/testdata/output/tpch.yaml b/src/frontend/planner_test/tests/testdata/output/tpch.yaml index c7f614e2abfde..93c289488a0c4 100644 --- a/src/frontend/planner_test/tests/testdata/output/tpch.yaml +++ b/src/frontend/planner_test/tests/testdata/output/tpch.yaml @@ -156,7 +156,7 @@ └─StreamProject { exprs: [lineitem.l_returnflag, lineitem.l_linestatus, lineitem.l_quantity, lineitem.l_extendedprice, $expr1, ($expr1 * (1:Decimal + lineitem.l_tax)) as $expr2, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber] } └─StreamProject { exprs: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_tax, lineitem.l_returnflag, lineitem.l_linestatus, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber] } └─StreamFilter { predicate: (lineitem.l_shipdate <= '1998-09-21 00:00:00':Timestamp) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_tax, lineitem.l_returnflag, lineitem.l_linestatus, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_tax, lineitem.l_returnflag, lineitem.l_linestatus, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [l_returnflag, l_linestatus, sum_qty, sum_base_price, sum_disc_price, sum_charge, avg_qty, avg_price, avg_disc, count_order], stream_key: [], pk_columns: [l_returnflag, l_linestatus], pk_conflict: NoCheck } @@ -175,7 +175,7 @@ StreamProject { exprs: [lineitem.l_returnflag, lineitem.l_linestatus, lineitem.l_quantity, lineitem.l_extendedprice, $expr1, ($expr1 * (1:Decimal + lineitem.l_tax)) as $expr2, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber] } └── StreamProject { exprs: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_tax, lineitem.l_returnflag, lineitem.l_linestatus, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_shipdate <= '1998-09-21 00:00:00':Timestamp) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_tax, lineitem.l_returnflag, lineitem.l_linestatus, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 3 ] } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_tax, lineitem.l_returnflag, lineitem.l_linestatus, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 3 ] } ├── Upstream └── BatchPlanNode @@ -322,7 +322,7 @@ │ ├─StreamExchange { dist: HashShard(part.p_partkey) } │ │ └─StreamProject { exprs: [part.p_partkey, part.p_mfgr] } │ │ └─StreamFilter { predicate: (part.p_size = 4:Int32) AND Like(part.p_type, '%TIN':Varchar) } - │ │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_mfgr, part.p_type, part.p_size], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } + │ │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_mfgr, part.p_type, part.p_size], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } │ └─StreamProject { exprs: [partsupp.ps_partkey, min(partsupp.ps_supplycost)] } │ └─StreamHashAgg { group_key: [partsupp.ps_partkey], aggs: [min(partsupp.ps_supplycost), count] } │ └─StreamExchange { dist: HashShard(partsupp.ps_partkey) } @@ -330,17 +330,17 @@ │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } │ │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: [partsupp.ps_partkey, partsupp.ps_supplycost, supplier.s_nationkey, partsupp.ps_suppkey, supplier.s_suppkey] } │ │ ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } - │ │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + │ │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } │ │ └─StreamExchange { dist: HashShard(supplier.s_suppkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } │ └─StreamHashJoin { type: Inner, predicate: region.r_regionkey = nation.n_regionkey, output: [nation.n_nationkey, region.r_regionkey] } │ ├─StreamExchange { dist: HashShard(region.r_regionkey) } │ │ └─StreamProject { exprs: [region.r_regionkey] } │ │ └─StreamFilter { predicate: (region.r_name = 'AFRICA':Varchar) } - │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } + │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } │ └─StreamExchange { dist: HashShard(nation.n_regionkey) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(partsupp.ps_partkey, partsupp.ps_supplycost) } └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = partsupp.ps_suppkey, output: [nation.n_name, supplier.s_name, supplier.s_address, supplier.s_phone, supplier.s_acctbal, supplier.s_comment, partsupp.ps_partkey, partsupp.ps_supplycost, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, partsupp.ps_suppkey] } ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } @@ -350,14 +350,14 @@ │ │ ├─StreamExchange { dist: HashShard(region.r_regionkey) } │ │ │ └─StreamProject { exprs: [region.r_regionkey] } │ │ │ └─StreamFilter { predicate: (region.r_name = 'AFRICA':Varchar) } - │ │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } + │ │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } │ │ └─StreamExchange { dist: HashShard(nation.n_regionkey) } - │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ └─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_acctbal, supplier.s_comment], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_acctbal, supplier.s_comment], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } └─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } └─StreamFilter { predicate: (partsupp.ps_partkey = partsupp.ps_partkey) } - └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment, min(partsupp.ps_supplycost)(hidden), partsupp.ps_partkey(hidden), region.r_regionkey(hidden), nation.n_nationkey(hidden), supplier.s_suppkey(hidden), partsupp.ps_partkey#1(hidden)], stream_key: [p_partkey, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, min(partsupp.ps_supplycost), partsupp.ps_partkey], pk_columns: [s_acctbal, n_name, s_name, p_partkey, region.r_regionkey, nation.n_nationkey, supplier.s_suppkey, min(partsupp.ps_supplycost), partsupp.ps_partkey], pk_conflict: NoCheck } @@ -383,7 +383,7 @@ Fragment 3 StreamProject { exprs: [part.p_partkey, part.p_mfgr] } └── StreamFilter { predicate: (part.p_size = 4:Int32) AND Like(part.p_type, '%TIN':Varchar) } - └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_mfgr, part.p_type, part.p_size], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 10 ] } + └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_mfgr, part.p_type, part.p_size], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 10 ] } ├── Upstream └── BatchPlanNode @@ -398,12 +398,12 @@ └── StreamExchange Hash([0]) from 7 Fragment 6 - StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } { tables: [ StreamScan: 21 ] } + StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } { tables: [ StreamScan: 21 ] } ├── Upstream └── BatchPlanNode Fragment 7 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 22 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 22 ] } ├── Upstream └── BatchPlanNode @@ -415,12 +415,12 @@ Fragment 9 StreamProject { exprs: [region.r_regionkey] } └── StreamFilter { predicate: (region.r_name = 'AFRICA':Varchar) } - └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 27 ] } + └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 27 ] } ├── Upstream └── BatchPlanNode Fragment 10 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 28 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 28 ] } ├── Upstream └── BatchPlanNode @@ -442,23 +442,23 @@ Fragment 14 StreamProject { exprs: [region.r_regionkey] } └── StreamFilter { predicate: (region.r_name = 'AFRICA':Varchar) } - └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 41 ] } + └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 41 ] } ├── Upstream └── BatchPlanNode Fragment 15 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 42 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 42 ] } ├── Upstream └── BatchPlanNode Fragment 16 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_acctbal, supplier.s_comment], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 43 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey, supplier.s_phone, supplier.s_acctbal, supplier.s_comment], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 43 ] } ├── Upstream └── BatchPlanNode Fragment 17 StreamFilter { predicate: (partsupp.ps_partkey = partsupp.ps_partkey) } - └── StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } { tables: [ StreamScan: 44 ] } + └── StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } { tables: [ StreamScan: 44 ] } ├── Upstream └── BatchPlanNode @@ -640,14 +640,14 @@ │ ├─StreamExchange { dist: HashShard(customer.c_custkey) } │ │ └─StreamProject { exprs: [customer.c_custkey] } │ │ └─StreamFilter { predicate: (customer.c_mktsegment = 'FURNITURE':Varchar) } - │ │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_mktsegment], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + │ │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_mktsegment], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } │ └─StreamExchange { dist: HashShard(orders.o_custkey) } │ └─StreamFilter { predicate: (orders.o_orderdate < '1995-03-29':Date) } - │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate, orders.o_shippriority], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate, orders.o_shippriority], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } └─StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber] } └─StreamFilter { predicate: (lineitem.l_shipdate > '1995-03-29':Date) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [l_orderkey, revenue, o_orderdate, o_shippriority], stream_key: [l_orderkey, o_orderdate, o_shippriority], pk_columns: [revenue, o_orderdate, l_orderkey, o_shippriority], pk_conflict: NoCheck } @@ -678,20 +678,20 @@ Fragment 4 StreamProject { exprs: [customer.c_custkey] } └── StreamFilter { predicate: (customer.c_mktsegment = 'FURNITURE':Varchar) } - └── StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_mktsegment], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 11 ] } + └── StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_mktsegment], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 11 ] } ├── Upstream └── BatchPlanNode Fragment 5 StreamFilter { predicate: (orders.o_orderdate < '1995-03-29':Date) } - └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate, orders.o_shippriority], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 12 ] } + └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate, orders.o_shippriority], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 12 ] } ├── Upstream └── BatchPlanNode Fragment 6 StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_shipdate > '1995-03-29':Date) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 13 ] ├── Upstream └── BatchPlanNode @@ -797,11 +797,11 @@ ├─StreamExchange { dist: HashShard(orders.o_orderkey) } │ └─StreamProject { exprs: [orders.o_orderkey, orders.o_orderpriority] } │ └─StreamFilter { predicate: (orders.o_orderdate >= '1997-07-01':Date) AND (orders.o_orderdate < '1997-10-01 00:00:00':Timestamp) } - │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } └─StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_linenumber] } └─StreamFilter { predicate: (lineitem.l_commitdate < lineitem.l_receiptdate) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [o_orderpriority, order_count], stream_key: [], pk_columns: [o_orderpriority], pk_conflict: NoCheck } @@ -825,7 +825,7 @@ Fragment 3 StreamProject { exprs: [orders.o_orderkey, orders.o_orderpriority] } └── StreamFilter { predicate: (orders.o_orderdate >= '1997-07-01':Date) AND (orders.o_orderdate < '1997-10-01 00:00:00':Timestamp) } - └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } ├── tables: [ StreamScan: 7 ] ├── Upstream └── BatchPlanNode @@ -833,7 +833,7 @@ Fragment 4 StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_commitdate < lineitem.l_receiptdate) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 8 ] ├── Upstream └── BatchPlanNode @@ -961,10 +961,10 @@ │ ├─StreamExchange { dist: HashShard(region.r_regionkey) } │ │ └─StreamProject { exprs: [region.r_regionkey] } │ │ └─StreamFilter { predicate: (region.r_name = 'MIDDLE EAST':Varchar) } - │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } + │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } │ └─StreamExchange { dist: HashShard(nation.n_regionkey) } │ └─StreamFilter { predicate: (nation.n_nationkey = nation.n_nationkey) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(customer.c_nationkey, supplier.s_nationkey) } └─StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey AND customer.c_nationkey = supplier.s_nationkey, output: [customer.c_nationkey, lineitem.l_extendedprice, lineitem.l_discount, supplier.s_nationkey, orders.o_orderkey, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_suppkey] } ├─StreamExchange { dist: HashShard(orders.o_orderkey, customer.c_nationkey) } @@ -972,15 +972,15 @@ │ ├─StreamExchange { dist: HashShard(orders.o_custkey) } │ │ └─StreamProject { exprs: [orders.o_orderkey, orders.o_custkey] } │ │ └─StreamFilter { predicate: (orders.o_orderdate >= '1994-01-01':Date) AND (orders.o_orderdate < '1995-01-01 00:00:00':Timestamp) } - │ │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + │ │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } │ └─StreamExchange { dist: HashShard(customer.c_custkey) } - │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } └─StreamExchange { dist: HashShard(lineitem.l_orderkey, supplier.s_nationkey) } └─StreamHashJoin { type: Inner, predicate: lineitem.l_suppkey = supplier.s_suppkey, output: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, supplier.s_nationkey, lineitem.l_linenumber, lineitem.l_suppkey, supplier.s_suppkey] } ├─StreamExchange { dist: HashShard(lineitem.l_suppkey) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(supplier.s_suppkey) } - └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [n_name, revenue], stream_key: [], pk_columns: [revenue], pk_conflict: NoCheck } @@ -1012,13 +1012,13 @@ Fragment 4 StreamProject { exprs: [region.r_regionkey] } └── StreamFilter { predicate: (region.r_name = 'MIDDLE EAST':Varchar) } - └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 11 ] } + └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 11 ] } ├── Upstream └── BatchPlanNode Fragment 5 StreamFilter { predicate: (nation.n_nationkey = nation.n_nationkey) } - └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 12 ] } + └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 12 ] } ├── Upstream └── BatchPlanNode @@ -1036,12 +1036,12 @@ Fragment 8 StreamProject { exprs: [orders.o_orderkey, orders.o_custkey] } └── StreamFilter { predicate: (orders.o_orderdate >= '1994-01-01':Date) AND (orders.o_orderdate < '1995-01-01 00:00:00':Timestamp) } - └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 21 ] } + └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 21 ] } ├── Upstream └── BatchPlanNode Fragment 9 - StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 22 ] } + StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 22 ] } ├── Upstream └── BatchPlanNode @@ -1051,12 +1051,12 @@ └── StreamExchange Hash([0]) from 12 Fragment 11 - StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 27 ] } + StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 27 ] } ├── Upstream └── BatchPlanNode Fragment 12 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 28 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 28 ] } ├── Upstream └── BatchPlanNode @@ -1163,7 +1163,7 @@ └─StreamStatelessSimpleAgg { aggs: [sum($expr1)] } └─StreamProject { exprs: [(lineitem.l_extendedprice * lineitem.l_discount) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber] } └─StreamFilter { predicate: (lineitem.l_shipdate >= '1994-01-01':Date) AND (lineitem.l_shipdate < '1995-01-01 00:00:00':Timestamp) AND (lineitem.l_discount >= 0.07:Decimal) AND (lineitem.l_discount <= 0.09:Decimal) AND (lineitem.l_quantity < 24:Decimal) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_quantity, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_quantity, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [revenue], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -1176,7 +1176,7 @@ StreamStatelessSimpleAgg { aggs: [sum($expr1)] } └── StreamProject { exprs: [(lineitem.l_extendedprice * lineitem.l_discount) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_shipdate >= '1994-01-01':Date) AND (lineitem.l_shipdate < '1995-01-01 00:00:00':Timestamp) AND (lineitem.l_discount >= 0.07:Decimal) AND (lineitem.l_discount <= 0.09:Decimal) AND (lineitem.l_quantity < 24:Decimal) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_quantity, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_quantity, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -1302,22 +1302,22 @@ │ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey, output: [nation.n_name, supplier.s_suppkey, nation.n_nationkey] } │ │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } - │ │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ │ └─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(lineitem.l_suppkey) } │ └─StreamFilter { predicate: (lineitem.l_shipdate >= '1983-01-01':Date) AND (lineitem.l_shipdate <= '2000-12-31':Date) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_shipdate, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_shipdate, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(orders.o_orderkey) } └─StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [nation.n_name, orders.o_orderkey, nation.n_nationkey, customer.c_custkey] } ├─StreamExchange { dist: HashShard(customer.c_custkey) } │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = customer.c_nationkey, output: [nation.n_name, customer.c_custkey, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } - │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ └─StreamExchange { dist: HashShard(customer.c_nationkey) } - │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } └─StreamExchange { dist: HashShard(orders.o_custkey) } - └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [supp_nation, cust_nation, l_year, revenue], stream_key: [], pk_columns: [supp_nation, cust_nation, l_year], pk_conflict: NoCheck } @@ -1351,18 +1351,18 @@ └── StreamExchange Hash([1]) from 6 Fragment 5 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 15 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 15 ] } ├── Upstream └── BatchPlanNode Fragment 6 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 16 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 16 ] } ├── Upstream └── BatchPlanNode Fragment 7 StreamFilter { predicate: (lineitem.l_shipdate >= '1983-01-01':Date) AND (lineitem.l_shipdate <= '2000-12-31':Date) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_shipdate, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_shipdate, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 17 ] ├── Upstream └── BatchPlanNode @@ -1378,17 +1378,17 @@ └── StreamExchange Hash([1]) from 11 Fragment 10 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 26 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 26 ] } ├── Upstream └── BatchPlanNode Fragment 11 - StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 27 ] } + StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 27 ] } ├── Upstream └── BatchPlanNode Fragment 12 - StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 28 ] } + StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 28 ] } ├── Upstream └── BatchPlanNode @@ -1398,12 +1398,7 @@ Table 2 { columns: [ nation_n_name, nation_n_name_0, $expr1, sum($expr2), count ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3, 4 ], distribution key: [ 0, 1, 2 ], read pk prefix len hint: 3 } - Table 3 - ├── columns: [ nation_n_name, lineitem_l_orderkey, lineitem_l_extendedprice, lineitem_l_discount, lineitem_l_shipdate, nation_n_nationkey, supplier_s_suppkey, lineitem_l_linenumber ] - ├── primary key: [ $1 ASC, $5 ASC, $6 ASC, $7 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 3 { columns: [ nation_n_name, lineitem_l_orderkey, lineitem_l_extendedprice, lineitem_l_discount, lineitem_l_shipdate, nation_n_nationkey, supplier_s_suppkey, lineitem_l_linenumber ], primary key: [ $1 ASC, $5 ASC, $6 ASC, $7 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6, 7 ], distribution key: [ 1 ], read pk prefix len hint: 1 } Table 4 { columns: [ lineitem_l_orderkey, nation_n_nationkey, supplier_s_suppkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -1589,11 +1584,11 @@ │ │ ├─StreamExchange { dist: HashShard(region.r_regionkey) } │ │ │ └─StreamProject { exprs: [region.r_regionkey] } │ │ │ └─StreamFilter { predicate: (region.r_name = 'ASIA':Varchar) } - │ │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } + │ │ │ └─StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } │ │ └─StreamExchange { dist: HashShard(nation.n_regionkey) } - │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ └─StreamExchange { dist: HashShard(customer.c_nationkey) } - │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } └─StreamExchange { dist: HashShard(orders.o_custkey) } └─StreamHashJoin { type: Inner, predicate: lineitem.l_orderkey = orders.o_orderkey, output: [nation.n_name, lineitem.l_extendedprice, lineitem.l_discount, orders.o_custkey, orders.o_orderdate, nation.n_nationkey, supplier.s_suppkey, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } ├─StreamExchange { dist: HashShard(lineitem.l_orderkey) } @@ -1601,20 +1596,20 @@ │ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey, output: [nation.n_name, supplier.s_suppkey, nation.n_nationkey] } │ │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } - │ │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ │ └─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(lineitem.l_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: part.p_partkey = lineitem.l_partkey, output: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, part.p_partkey, lineitem.l_linenumber] } │ ├─StreamExchange { dist: HashShard(part.p_partkey) } │ │ └─StreamProject { exprs: [part.p_partkey] } │ │ └─StreamFilter { predicate: (part.p_type = 'PROMO ANODIZED STEEL':Varchar) } - │ │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_type], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } + │ │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_type], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } │ └─StreamExchange { dist: HashShard(lineitem.l_partkey) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(orders.o_orderkey) } └─StreamFilter { predicate: (orders.o_orderdate >= '1995-01-01':Date) AND (orders.o_orderdate <= '1996-12-31':Date) } - └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [o_year, mkt_share], stream_key: [], pk_columns: [o_year], pk_conflict: NoCheck } @@ -1652,17 +1647,17 @@ Fragment 5 StreamProject { exprs: [region.r_regionkey] } └── StreamFilter { predicate: (region.r_name = 'ASIA':Varchar) } - └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: Backfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 15 ] } + └── StreamTableScan { table: region, columns: [region.r_regionkey, region.r_name], stream_scan_type: ArrangementBackfill, pk: [region.r_regionkey], dist: UpstreamHashShard(region.r_regionkey) } { tables: [ StreamScan: 15 ] } ├── Upstream └── BatchPlanNode Fragment 6 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 16 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_regionkey], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 16 ] } ├── Upstream └── BatchPlanNode Fragment 7 - StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 17 ] } + StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_nationkey], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 17 ] } ├── Upstream └── BatchPlanNode @@ -1684,12 +1679,12 @@ └── StreamExchange Hash([1]) from 12 Fragment 11 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 30 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 30 ] } ├── Upstream └── BatchPlanNode Fragment 12 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 31 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 31 ] } ├── Upstream └── BatchPlanNode @@ -1701,19 +1696,19 @@ Fragment 14 StreamProject { exprs: [part.p_partkey] } └── StreamFilter { predicate: (part.p_type = 'PROMO ANODIZED STEEL':Varchar) } - └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_type], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 36 ] } + └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_type], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 36 ] } ├── Upstream └── BatchPlanNode Fragment 15 - StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 37 ] ├── Upstream └── BatchPlanNode Fragment 16 StreamFilter { predicate: (orders.o_orderdate >= '1995-01-01':Date) AND (orders.o_orderdate <= '1996-12-31':Date) } - └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 38 ] } + └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 38 ] } ├── Upstream └── BatchPlanNode @@ -1910,24 +1905,24 @@ │ ├─StreamExchange { dist: HashShard(part.p_partkey) } │ │ └─StreamProject { exprs: [part.p_partkey] } │ │ └─StreamFilter { predicate: Like(part.p_name, '%yellow%':Varchar) } - │ │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } + │ │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } │ └─StreamExchange { dist: HashShard(partsupp.ps_partkey) } │ └─StreamFilter { predicate: (partsupp.ps_suppkey = partsupp.ps_suppkey) } - │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = lineitem.l_suppkey, output: [nation.n_name, supplier.s_suppkey, orders.o_orderdate, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, nation.n_nationkey, orders.o_orderkey, lineitem.l_linenumber] } ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = supplier.s_nationkey, output: [nation.n_name, supplier.s_suppkey, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } - │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ └─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } └─StreamExchange { dist: HashShard(lineitem.l_suppkey) } └─StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [orders.o_orderdate, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } ├─StreamExchange { dist: HashShard(orders.o_orderkey) } - │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } └─StreamFilter { predicate: (lineitem.l_partkey = lineitem.l_partkey) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [nation, o_year, sum_profit], stream_key: [], pk_columns: [nation, o_year], pk_conflict: NoCheck } @@ -1960,13 +1955,13 @@ Fragment 4 StreamProject { exprs: [part.p_partkey] } └── StreamFilter { predicate: Like(part.p_name, '%yellow%':Varchar) } - └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 11 ] } + └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 11 ] } ├── Upstream └── BatchPlanNode Fragment 5 StreamFilter { predicate: (partsupp.ps_suppkey = partsupp.ps_suppkey) } - └── StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } { tables: [ StreamScan: 12 ] } + └── StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } { tables: [ StreamScan: 12 ] } ├── Upstream └── BatchPlanNode @@ -1976,12 +1971,12 @@ └── StreamExchange Hash([1]) from 8 Fragment 7 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 21 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 21 ] } ├── Upstream └── BatchPlanNode Fragment 8 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 22 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 22 ] } ├── Upstream └── BatchPlanNode @@ -1991,13 +1986,13 @@ └── StreamExchange Hash([0]) from 11 Fragment 10 - StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 27 ] } + StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 27 ] } ├── Upstream └── BatchPlanNode Fragment 11 StreamFilter { predicate: (lineitem.l_partkey = lineitem.l_partkey) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 28 ] } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 28 ] } ├── Upstream └── BatchPlanNode @@ -2158,19 +2153,19 @@ ├─StreamExchange { dist: HashShard(customer.c_custkey) } │ └─StreamHashJoin { type: Inner, predicate: nation.n_nationkey = customer.c_nationkey, output: [nation.n_name, customer.c_custkey, customer.c_name, customer.c_address, customer.c_phone, customer.c_acctbal, customer.c_comment, nation.n_nationkey] } │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } - │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ └─StreamExchange { dist: HashShard(customer.c_nationkey) } - │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_name, customer.c_address, customer.c_nationkey, customer.c_phone, customer.c_acctbal, customer.c_comment], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_name, customer.c_address, customer.c_nationkey, customer.c_phone, customer.c_acctbal, customer.c_comment], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } └─StreamExchange { dist: HashShard(orders.o_custkey) } └─StreamHashJoin { type: Inner, predicate: lineitem.l_orderkey = orders.o_orderkey, output: [lineitem.l_extendedprice, lineitem.l_discount, orders.o_custkey, lineitem.l_orderkey, lineitem.l_linenumber, orders.o_orderkey] } ├─StreamExchange { dist: HashShard(lineitem.l_orderkey) } │ └─StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber] } │ └─StreamFilter { predicate: (lineitem.l_returnflag = 'R':Varchar) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber, lineitem.l_returnflag], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber, lineitem.l_returnflag], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(orders.o_orderkey) } └─StreamProject { exprs: [orders.o_orderkey, orders.o_custkey] } └─StreamFilter { predicate: (orders.o_orderdate >= '1994-01-01':Date) AND (orders.o_orderdate < '1994-04-01 00:00:00':Timestamp) } - └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [c_custkey, c_name, revenue, c_acctbal, n_name, c_address, c_phone, c_comment], stream_key: [c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment], pk_columns: [revenue, c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment], pk_conflict: NoCheck } @@ -2195,12 +2190,12 @@ └── StreamExchange Hash([3]) from 4 Fragment 3 - StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 11 ] } + StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 11 ] } ├── Upstream └── BatchPlanNode Fragment 4 - StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_name, customer.c_address, customer.c_nationkey, customer.c_phone, customer.c_acctbal, customer.c_comment], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 12 ] } + StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_name, customer.c_address, customer.c_nationkey, customer.c_phone, customer.c_acctbal, customer.c_comment], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 12 ] } ├── Upstream └── BatchPlanNode @@ -2212,14 +2207,15 @@ Fragment 6 StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_returnflag = 'R':Varchar) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber, lineitem.l_returnflag], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 17 ] } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_linenumber, lineitem.l_returnflag], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + ├── tables: [ StreamScan: 17 ] ├── Upstream └── BatchPlanNode Fragment 7 StreamProject { exprs: [orders.o_orderkey, orders.o_custkey] } └── StreamFilter { predicate: (orders.o_orderdate >= '1994-01-01':Date) AND (orders.o_orderdate < '1994-04-01 00:00:00':Timestamp) } - └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 18 ] } + └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 18 ] } ├── Upstream └── BatchPlanNode @@ -2386,13 +2382,13 @@ │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } │ │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, supplier.s_nationkey, partsupp.ps_suppkey, supplier.s_suppkey] } │ │ ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } - │ │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + │ │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } │ │ └─StreamExchange { dist: HashShard(supplier.s_suppkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } │ └─StreamProject { exprs: [nation.n_nationkey] } │ └─StreamFilter { predicate: (nation.n_name = 'ARGENTINA':Varchar) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [(sum(sum($expr2)) * 0.0001000000:Decimal) as $expr3] } └─StreamSimpleAgg { aggs: [sum(sum($expr2)), count] } @@ -2404,13 +2400,13 @@ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_suppkey = supplier.s_suppkey, output: [partsupp.ps_partkey, partsupp.ps_availqty, partsupp.ps_supplycost, supplier.s_nationkey, partsupp.ps_suppkey, supplier.s_suppkey] } │ ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } - │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } │ └─StreamExchange { dist: HashShard(supplier.s_suppkey) } - │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } └─StreamExchange { dist: HashShard(nation.n_nationkey) } └─StreamProject { exprs: [nation.n_nationkey] } └─StreamFilter { predicate: (nation.n_name = 'ARGENTINA':Varchar) } - └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [ps_partkey, value], stream_key: [], pk_columns: [value], pk_conflict: NoCheck } @@ -2448,20 +2444,20 @@ └── StreamExchange Hash([0]) from 6 Fragment 5 - StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty, partsupp.ps_supplycost], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty, partsupp.ps_supplycost], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } ├── tables: [ StreamScan: 13 ] ├── Upstream └── BatchPlanNode Fragment 6 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 14 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 14 ] } ├── Upstream └── BatchPlanNode Fragment 7 StreamProject { exprs: [nation.n_nationkey] } └── StreamFilter { predicate: (nation.n_name = 'ARGENTINA':Varchar) } - └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 15 ] } + └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 15 ] } ├── Upstream └── BatchPlanNode @@ -2485,12 +2481,7 @@ Table 4 { columns: [ partsupp_ps_partkey, sum($expr1), count ], primary key: [ $0 ASC ], value indices: [ 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 5 - ├── columns: [ partsupp_ps_partkey, partsupp_ps_availqty, partsupp_ps_supplycost, supplier_s_nationkey, partsupp_ps_suppkey, supplier_s_suppkey ] - ├── primary key: [ $3 ASC, $0 ASC, $4 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5 ] - ├── distribution key: [ 3 ] - └── read pk prefix len hint: 1 + Table 5 { columns: [ partsupp_ps_partkey, partsupp_ps_availqty, partsupp_ps_supplycost, supplier_s_nationkey, partsupp_ps_suppkey, supplier_s_suppkey ], primary key: [ $3 ASC, $0 ASC, $4 ASC ], value indices: [ 0, 1, 2, 3, 4, 5 ], distribution key: [ 3 ], read pk prefix len hint: 1 } Table 6 { columns: [ supplier_s_nationkey, partsupp_ps_partkey, partsupp_ps_suppkey, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2591,11 +2582,11 @@ └─StreamProject { exprs: [lineitem.l_shipmode, Case(((orders.o_orderpriority = '1-URGENT':Varchar) OR (orders.o_orderpriority = '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr1, Case(((orders.o_orderpriority <> '1-URGENT':Varchar) AND (orders.o_orderpriority <> '2-HIGH':Varchar)), 1:Int32, 0:Int32) as $expr2, orders.o_orderkey, lineitem.l_linenumber] } └─StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [orders.o_orderpriority, lineitem.l_shipmode, orders.o_orderkey, lineitem.l_orderkey, lineitem.l_linenumber] } ├─StreamExchange { dist: HashShard(orders.o_orderkey) } - │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } └─StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_shipmode, lineitem.l_linenumber] } └─StreamFilter { predicate: In(lineitem.l_shipmode, 'FOB':Varchar, 'SHIP':Varchar) AND (lineitem.l_commitdate < lineitem.l_receiptdate) AND (lineitem.l_shipdate < lineitem.l_commitdate) AND (lineitem.l_receiptdate >= '1994-01-01':Date) AND (lineitem.l_receiptdate < '1995-01-01 00:00:00':Timestamp) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_shipmode, lineitem.l_linenumber, lineitem.l_shipdate, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_shipmode, lineitem.l_linenumber, lineitem.l_shipdate, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [l_shipmode, high_line_count, low_line_count], stream_key: [], pk_columns: [l_shipmode], pk_conflict: NoCheck } @@ -2617,14 +2608,14 @@ └── StreamExchange Hash([0]) from 4 Fragment 3 - StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 7 ] } + StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderpriority], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 7 ] } ├── Upstream └── BatchPlanNode Fragment 4 StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_shipmode, lineitem.l_linenumber] } └── StreamFilter { predicate: In(lineitem.l_shipmode, 'FOB':Varchar, 'SHIP':Varchar) AND (lineitem.l_commitdate < lineitem.l_receiptdate) AND (lineitem.l_shipdate < lineitem.l_commitdate) AND (lineitem.l_receiptdate >= '1994-01-01':Date) AND (lineitem.l_receiptdate < '1995-01-01 00:00:00':Timestamp) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_shipmode, lineitem.l_linenumber, lineitem.l_shipdate, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_shipmode, lineitem.l_linenumber, lineitem.l_shipdate, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 8 ] ├── Upstream └── BatchPlanNode @@ -2719,11 +2710,11 @@ └─StreamHashAgg { group_key: [customer.c_custkey], aggs: [count(orders.o_orderkey), count] } └─StreamHashJoin { type: LeftOuter, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_custkey, orders.o_orderkey] } ├─StreamExchange { dist: HashShard(customer.c_custkey) } - │ └─StreamTableScan { table: customer, columns: [customer.c_custkey], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + │ └─StreamTableScan { table: customer, columns: [customer.c_custkey], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } └─StreamExchange { dist: HashShard(orders.o_custkey) } └─StreamProject { exprs: [orders.o_orderkey, orders.o_custkey] } └─StreamFilter { predicate: Not(Like(orders.o_comment, '%:1%:2%':Varchar)) } - └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_comment], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_comment], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [c_count, custdist], stream_key: [], pk_columns: [custdist, c_count], pk_conflict: NoCheck } @@ -2748,7 +2739,7 @@ └── StreamExchange Hash([1]) from 4 Fragment 3 - StreamTableScan { table: customer, columns: [customer.c_custkey], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + StreamTableScan { table: customer, columns: [customer.c_custkey], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } ├── tables: [ StreamScan: 8 ] ├── Upstream └── BatchPlanNode @@ -2756,20 +2747,14 @@ Fragment 4 StreamProject { exprs: [orders.o_orderkey, orders.o_custkey] } └── StreamFilter { predicate: Not(Like(orders.o_comment, '%:1%:2%':Varchar)) } - └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_comment], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_comment], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } ├── tables: [ StreamScan: 9 ] ├── Upstream └── BatchPlanNode Table 0 { columns: [ count(orders_o_orderkey), count, $expr1 ], primary key: [ $1 DESC, $0 DESC, $2 ASC ], value indices: [ 0, 1, 2 ], distribution key: [], read pk prefix len hint: 0 } - Table 1 - ├── columns: [ count(orders_o_orderkey), count, $expr1 ] - ├── primary key: [ $2 ASC, $1 DESC, $0 DESC ] - ├── value indices: [ 0, 1, 2 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 2 + Table 1 { columns: [ count(orders_o_orderkey), count, $expr1 ], primary key: [ $2 ASC, $1 DESC, $0 DESC ], value indices: [ 0, 1, 2 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 2 } Table 2 { columns: [ count(orders_o_orderkey), count ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -2791,13 +2776,7 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 0 - Table 9 - ├── columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 9 { columns: [ vnode, o_orderkey, orders_backfill_finished, orders_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 4294967294 { columns: [ c_count, custdist ], primary key: [ $1 DESC, $0 DESC ], value indices: [ 0, 1 ], distribution key: [], read pk prefix len hint: 2 } @@ -2855,9 +2834,9 @@ ├─StreamExchange { dist: HashShard(lineitem.l_partkey) } │ └─StreamProject { exprs: [lineitem.l_partkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber] } │ └─StreamFilter { predicate: (lineitem.l_shipdate >= '1995-09-01':Date) AND (lineitem.l_shipdate < '1995-10-01 00:00:00':Timestamp) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(part.p_partkey) } - └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_type], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } + └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_type], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [promo_revenue], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -2878,13 +2857,13 @@ Fragment 2 StreamProject { exprs: [lineitem.l_partkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_shipdate >= '1995-09-01':Date) AND (lineitem.l_shipdate < '1995-10-01 00:00:00':Timestamp) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 5 ] ├── Upstream └── BatchPlanNode Fragment 3 - StreamTableScan { table: part, columns: [part.p_partkey, part.p_type], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 6 ] } + StreamTableScan { table: part, columns: [part.p_partkey, part.p_type], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 6 ] } ├── Upstream └── BatchPlanNode @@ -2985,14 +2964,14 @@ ├─StreamExchange { dist: HashShard(sum($expr1)) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_suppkey = lineitem.l_suppkey, output: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone, sum($expr1), lineitem.l_suppkey] } │ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamShare { id: 7 } │ └─StreamProject { exprs: [lineitem.l_suppkey, sum($expr1)] } │ └─StreamHashAgg { group_key: [lineitem.l_suppkey], aggs: [sum($expr1), count] } │ └─StreamExchange { dist: HashShard(lineitem.l_suppkey) } │ └─StreamProject { exprs: [lineitem.l_suppkey, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber] } │ └─StreamFilter { predicate: (lineitem.l_shipdate >= '1993-01-01':Date) AND (lineitem.l_shipdate < '1993-04-01 00:00:00':Timestamp) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(max(max(sum($expr1)))) } └─StreamProject { exprs: [max(max(sum($expr1)))] } └─StreamSimpleAgg { aggs: [max(max(sum($expr1))), count] } @@ -3005,7 +2984,7 @@ └─StreamExchange { dist: HashShard(lineitem.l_suppkey) } └─StreamProject { exprs: [lineitem.l_suppkey, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber] } └─StreamFilter { predicate: (lineitem.l_shipdate >= '1993-01-01':Date) AND (lineitem.l_shipdate < '1993-04-01 00:00:00':Timestamp) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_suppkey, s_name, s_address, s_phone, total_revenue], stream_key: [], pk_columns: [s_suppkey], pk_conflict: NoCheck } @@ -3029,7 +3008,7 @@ └── StreamExchange NoShuffle from 4 Fragment 3 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_phone], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } ├── tables: [ StreamScan: 10 ] ├── Upstream └── BatchPlanNode @@ -3042,7 +3021,7 @@ Fragment 5 StreamProject { exprs: [lineitem.l_suppkey, (lineitem.l_extendedprice * (1:Decimal - lineitem.l_discount)) as $expr1, lineitem.l_orderkey, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_shipdate >= '1993-01-01':Date) AND (lineitem.l_shipdate < '1993-04-01 00:00:00':Timestamp) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_suppkey, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 12 ] ├── Upstream └── BatchPlanNode @@ -3175,14 +3154,14 @@ ├─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_partkey = part.p_partkey, output: [partsupp.ps_suppkey, part.p_brand, part.p_type, part.p_size, partsupp.ps_partkey, part.p_partkey] } │ ├─StreamExchange { dist: HashShard(partsupp.ps_partkey) } - │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } │ └─StreamExchange { dist: HashShard(part.p_partkey) } │ └─StreamFilter { predicate: (part.p_brand <> 'Brand#45':Varchar) AND Not(Like(part.p_type, 'SMALL PLATED%':Varchar)) AND In(part.p_size, 19:Int32, 17:Int32, 16:Int32, 23:Int32, 10:Int32, 4:Int32, 38:Int32, 11:Int32) } - │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_type, part.p_size], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } + │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_type, part.p_size], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } └─StreamExchange { dist: HashShard(supplier.s_suppkey) } └─StreamProject { exprs: [supplier.s_suppkey] } └─StreamFilter { predicate: Like(supplier.s_comment, '%Customer%Complaints%':Varchar) } - └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_comment], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_comment], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [p_brand, p_type, p_size, supplier_cnt], stream_key: [], pk_columns: [supplier_cnt, p_brand, p_type, p_size], pk_conflict: NoCheck } @@ -3213,21 +3192,21 @@ └── StreamExchange Hash([0]) from 5 Fragment 4 - StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } ├── tables: [ StreamScan: 12 ] ├── Upstream └── BatchPlanNode Fragment 5 StreamFilter { predicate: (part.p_brand <> 'Brand#45':Varchar) AND Not(Like(part.p_type, 'SMALL PLATED%':Varchar)) AND In(part.p_size, 19:Int32, 17:Int32, 16:Int32, 23:Int32, 10:Int32, 4:Int32, 38:Int32, 11:Int32) } - └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_type, part.p_size], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 13 ] } + └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_type, part.p_size], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 13 ] } ├── Upstream └── BatchPlanNode Fragment 6 StreamProject { exprs: [supplier.s_suppkey] } └── StreamFilter { predicate: Like(supplier.s_comment, '%Customer%Complaints%':Varchar) } - └── StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_comment], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 14 ] } + └── StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_comment], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 14 ] } ├── Upstream └── BatchPlanNode @@ -3246,19 +3225,9 @@ ├── read pk prefix len hint: 1 └── vnode column idx: 4 - Table 2 - ├── columns: [ part_p_brand, part_p_type, part_p_size, count(distinct partsupp_ps_suppkey), count ] - ├── primary key: [ $0 ASC, $1 ASC, $2 ASC ] - ├── value indices: [ 3, 4 ] - ├── distribution key: [ 0, 1, 2 ] - └── read pk prefix len hint: 3 + Table 2 { columns: [ part_p_brand, part_p_type, part_p_size, count(distinct partsupp_ps_suppkey), count ], primary key: [ $0 ASC, $1 ASC, $2 ASC ], value indices: [ 3, 4 ], distribution key: [ 0, 1, 2 ], read pk prefix len hint: 3 } - Table 3 - ├── columns: [ part_p_brand, part_p_type, part_p_size, partsupp_ps_suppkey, count_for_agg_call_0 ] - ├── primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ] - ├── value indices: [ 4 ] - ├── distribution key: [ 0, 1, 2 ] - └── read pk prefix len hint: 4 + Table 3 { columns: [ part_p_brand, part_p_type, part_p_size, partsupp_ps_suppkey, count_for_agg_call_0 ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC ], value indices: [ 4 ], distribution key: [ 0, 1, 2 ], read pk prefix len hint: 4 } Table 4 ├── columns: [ partsupp_ps_suppkey, part_p_brand, part_p_type, part_p_size, partsupp_ps_partkey, part_p_partkey ] @@ -3281,13 +3250,7 @@ Table 11 { columns: [ part_p_partkey, _degree ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 12 - ├── columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished, partsupp_row_count ] - ├── primary key: [ $0 ASC ] - ├── value indices: [ 1, 2, 3, 4 ] - ├── distribution key: [ 0 ] - ├── read pk prefix len hint: 1 - └── vnode column idx: 0 + Table 12 { columns: [ vnode, ps_partkey, ps_suppkey, partsupp_backfill_finished, partsupp_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3, 4 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } Table 13 { columns: [ vnode, p_partkey, part_backfill_finished, part_row_count ], primary key: [ $0 ASC ], value indices: [ 1, 2, 3 ], distribution key: [ 0 ], read pk prefix len hint: 1, vnode column idx: 0 } @@ -3366,15 +3329,15 @@ ├─StreamExchange { dist: HashShard(part.p_partkey) } │ └─StreamHashJoin { type: Inner, predicate: lineitem.l_partkey = part.p_partkey, output: [lineitem.l_quantity, lineitem.l_extendedprice, part.p_partkey, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_partkey] } │ ├─StreamExchange { dist: HashShard(lineitem.l_partkey) } - │ │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_orderkey, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_orderkey, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } │ └─StreamExchange { dist: HashShard(part.p_partkey) } │ └─StreamProject { exprs: [part.p_partkey] } │ └─StreamFilter { predicate: (part.p_brand = 'Brand#13':Varchar) AND (part.p_container = 'JUMBO PKG':Varchar) } - │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_container], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } + │ └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_container], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } └─StreamProject { exprs: [(0.2:Decimal * (sum(lineitem.l_quantity) / count(lineitem.l_quantity)::Decimal)) as $expr1, lineitem.l_partkey] } └─StreamHashAgg { group_key: [lineitem.l_partkey], aggs: [sum(lineitem.l_quantity), count(lineitem.l_quantity), count] } └─StreamExchange { dist: HashShard(lineitem.l_partkey) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [avg_yearly], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -3403,7 +3366,7 @@ └── StreamExchange Hash([0]) from 4 Fragment 3 - StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_orderkey, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_orderkey, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 9 ] ├── Upstream └── BatchPlanNode @@ -3411,12 +3374,12 @@ Fragment 4 StreamProject { exprs: [part.p_partkey] } └── StreamFilter { predicate: (part.p_brand = 'Brand#13':Varchar) AND (part.p_container = 'JUMBO PKG':Varchar) } - └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_container], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 10 ] } + └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_container], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 10 ] } ├── Upstream └── BatchPlanNode Fragment 5 - StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 12 ] ├── Upstream └── BatchPlanNode @@ -3550,17 +3513,17 @@ │ ├─StreamExchange { dist: HashShard(orders.o_orderkey) } │ │ └─StreamHashJoin { type: Inner, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_custkey, customer.c_name, orders.o_orderkey, orders.o_totalprice, orders.o_orderdate] } │ │ ├─StreamExchange { dist: HashShard(customer.c_custkey) } - │ │ │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_name], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + │ │ │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_name], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } │ │ └─StreamExchange { dist: HashShard(orders.o_custkey) } - │ │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_totalprice, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + │ │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_totalprice, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } │ └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_quantity, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_quantity, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamProject { exprs: [lineitem.l_orderkey] } └─StreamFilter { predicate: (sum(lineitem.l_quantity) > 1:Decimal) } └─StreamProject { exprs: [lineitem.l_orderkey, sum(lineitem.l_quantity)] } └─StreamHashAgg { group_key: [lineitem.l_orderkey], aggs: [sum(lineitem.l_quantity), count] } └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_quantity, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_quantity, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, quantity], stream_key: [c_custkey, c_name, o_orderkey, o_totalprice, o_orderdate], pk_columns: [o_totalprice, o_orderdate, c_custkey, c_name, o_orderkey], pk_conflict: NoCheck } @@ -3592,23 +3555,23 @@ └── StreamExchange Hash([1]) from 4 Fragment 3 - StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_name], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 15 ] } + StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_name], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } { tables: [ StreamScan: 15 ] } ├── Upstream └── BatchPlanNode Fragment 4 - StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_totalprice, orders.o_orderdate], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 16 ] } + StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_custkey, orders.o_totalprice, orders.o_orderdate], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 16 ] } ├── Upstream └── BatchPlanNode Fragment 5 - StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_quantity, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_quantity, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 17 ] ├── Upstream └── BatchPlanNode Fragment 6 - StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_quantity, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_quantity, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 19 ] ├── Upstream └── BatchPlanNode @@ -3751,10 +3714,10 @@ ├─StreamExchange { dist: HashShard(lineitem.l_partkey) } │ └─StreamProject { exprs: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber] } │ └─StreamFilter { predicate: In(lineitem.l_shipmode, 'AIR':Varchar, 'AIR REG':Varchar) AND (lineitem.l_shipinstruct = 'DELIVER IN PERSON':Varchar) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipinstruct, lineitem.l_shipmode], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipinstruct, lineitem.l_shipmode], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(part.p_partkey) } └─StreamFilter { predicate: (part.p_size >= 1:Int32) } - └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_size, part.p_container], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } + └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_size, part.p_container], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [revenue], stream_key: [], pk_columns: [], pk_conflict: NoCheck } @@ -3774,13 +3737,13 @@ Fragment 2 StreamProject { exprs: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber] } └── StreamFilter { predicate: In(lineitem.l_shipmode, 'AIR':Varchar, 'AIR REG':Varchar) AND (lineitem.l_shipinstruct = 'DELIVER IN PERSON':Varchar) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipinstruct, lineitem.l_shipmode], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 5 ] } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_quantity, lineitem.l_extendedprice, lineitem.l_discount, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipinstruct, lineitem.l_shipmode], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 5 ] } ├── Upstream └── BatchPlanNode Fragment 3 StreamFilter { predicate: (part.p_size >= 1:Int32) } - └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_size, part.p_container], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 6 ] } + └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_brand, part.p_size, part.p_container], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 6 ] } ├── Upstream └── BatchPlanNode @@ -3872,11 +3835,11 @@ ├─StreamExchange { dist: HashShard(supplier.s_suppkey) } │ └─StreamHashJoin { type: Inner, predicate: supplier.s_nationkey = nation.n_nationkey, output: all } │ ├─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ └─StreamExchange { dist: HashShard(nation.n_nationkey) } │ └─StreamProject { exprs: [nation.n_nationkey] } │ └─StreamFilter { predicate: (nation.n_name = 'KENYA':Varchar) } - │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } └─StreamExchange { dist: HashShard(partsupp.ps_suppkey) } └─StreamHashJoin { type: LeftSemi, predicate: partsupp.ps_partkey = part.p_partkey, output: [partsupp.ps_suppkey, partsupp.ps_partkey] } ├─StreamExchange { dist: HashShard(partsupp.ps_partkey) } @@ -3885,17 +3848,17 @@ │ └─StreamHashJoin { type: Inner, predicate: partsupp.ps_partkey = lineitem.l_partkey AND partsupp.ps_suppkey = lineitem.l_suppkey, output: all } │ ├─StreamExchange { dist: HashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } │ │ └─StreamProject { exprs: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty::Decimal as $expr1] } - │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + │ │ └─StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } │ └─StreamProject { exprs: [lineitem.l_partkey, lineitem.l_suppkey, (0.5:Decimal * sum(lineitem.l_quantity)) as $expr2] } │ └─StreamHashAgg { group_key: [lineitem.l_partkey, lineitem.l_suppkey], aggs: [sum(lineitem.l_quantity), count] } │ └─StreamExchange { dist: HashShard(lineitem.l_partkey, lineitem.l_suppkey) } │ └─StreamProject { exprs: [lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber] } │ └─StreamFilter { predicate: (lineitem.l_shipdate >= '1994-01-01':Date) AND (lineitem.l_shipdate < '1995-01-01 00:00:00':Timestamp) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(part.p_partkey) } └─StreamProject { exprs: [part.p_partkey] } └─StreamFilter { predicate: Like(part.p_name, 'forest%':Varchar) } - └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } + └─StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_name, s_address, supplier.s_suppkey(hidden), supplier.s_nationkey(hidden)], stream_key: [], pk_columns: [s_name], pk_conflict: NoCheck } @@ -3919,7 +3882,7 @@ └── StreamExchange Hash([0]) from 4 Fragment 3 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_address, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } ├── tables: [ StreamScan: 10 ] ├── Upstream └── BatchPlanNode @@ -3927,7 +3890,7 @@ Fragment 4 StreamProject { exprs: [nation.n_nationkey] } └── StreamFilter { predicate: (nation.n_name = 'KENYA':Varchar) } - └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 11 ] } + └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 11 ] } ├── Upstream └── BatchPlanNode @@ -3949,7 +3912,7 @@ Fragment 7 StreamProject { exprs: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty::Decimal as $expr1] } - └── StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty], stream_scan_type: Backfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } + └── StreamTableScan { table: partsupp, columns: [partsupp.ps_partkey, partsupp.ps_suppkey, partsupp.ps_availqty], stream_scan_type: ArrangementBackfill, pk: [partsupp.ps_partkey, partsupp.ps_suppkey], dist: UpstreamHashShard(partsupp.ps_partkey, partsupp.ps_suppkey) } ├── tables: [ StreamScan: 20 ] ├── Upstream └── BatchPlanNode @@ -3957,7 +3920,7 @@ Fragment 8 StreamProject { exprs: [lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_shipdate >= '1994-01-01':Date) AND (lineitem.l_shipdate < '1995-01-01 00:00:00':Timestamp) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_partkey, lineitem.l_suppkey, lineitem.l_quantity, lineitem.l_orderkey, lineitem.l_linenumber, lineitem.l_shipdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 22 ] ├── Upstream └── BatchPlanNode @@ -3965,7 +3928,7 @@ Fragment 9 StreamProject { exprs: [part.p_partkey] } └── StreamFilter { predicate: Like(part.p_name, 'forest%':Varchar) } - └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], stream_scan_type: Backfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 23 ] } + └── StreamTableScan { table: part, columns: [part.p_partkey, part.p_name], stream_scan_type: ArrangementBackfill, pk: [part.p_partkey], dist: UpstreamHashShard(part.p_partkey) } { tables: [ StreamScan: 23 ] } ├── Upstream └── BatchPlanNode @@ -4142,25 +4105,25 @@ │ │ │ ├─StreamExchange { dist: HashShard(nation.n_nationkey) } │ │ │ │ └─StreamProject { exprs: [nation.n_nationkey] } │ │ │ │ └─StreamFilter { predicate: (nation.n_name = 'GERMANY':Varchar) } - │ │ │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } + │ │ │ │ └─StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } │ │ │ └─StreamExchange { dist: HashShard(supplier.s_nationkey) } - │ │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } + │ │ │ └─StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } │ │ └─StreamExchange { dist: HashShard(lineitem.l_suppkey) } │ │ └─StreamHashJoin { type: Inner, predicate: orders.o_orderkey = lineitem.l_orderkey, output: [lineitem.l_orderkey, lineitem.l_suppkey, orders.o_orderkey, lineitem.l_linenumber] } │ │ ├─StreamExchange { dist: HashShard(orders.o_orderkey) } │ │ │ └─StreamProject { exprs: [orders.o_orderkey] } │ │ │ └─StreamFilter { predicate: (orders.o_orderstatus = 'F':Varchar) } - │ │ │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderstatus], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + │ │ │ └─StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderstatus], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } │ │ └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } │ │ └─StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber] } │ │ └─StreamFilter { predicate: (lineitem.l_receiptdate > lineitem.l_commitdate) } - │ │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } │ └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } - │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + │ └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } └─StreamExchange { dist: HashShard(lineitem.l_orderkey) } └─StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber] } └─StreamFilter { predicate: (lineitem.l_receiptdate > lineitem.l_commitdate) } - └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └─StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [s_name, numwait], stream_key: [s_name], pk_columns: [numwait, s_name], pk_conflict: NoCheck } @@ -4198,12 +4161,12 @@ Fragment 5 StreamProject { exprs: [nation.n_nationkey] } └── StreamFilter { predicate: (nation.n_name = 'GERMANY':Varchar) } - └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: Backfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 19 ] } + └── StreamTableScan { table: nation, columns: [nation.n_nationkey, nation.n_name], stream_scan_type: ArrangementBackfill, pk: [nation.n_nationkey], dist: UpstreamHashShard(nation.n_nationkey) } { tables: [ StreamScan: 19 ] } ├── Upstream └── BatchPlanNode Fragment 6 - StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_nationkey], stream_scan_type: Backfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 20 ] } + StreamTableScan { table: supplier, columns: [supplier.s_suppkey, supplier.s_name, supplier.s_nationkey], stream_scan_type: ArrangementBackfill, pk: [supplier.s_suppkey], dist: UpstreamHashShard(supplier.s_suppkey) } { tables: [ StreamScan: 20 ] } ├── Upstream └── BatchPlanNode @@ -4216,27 +4179,27 @@ Fragment 8 StreamProject { exprs: [orders.o_orderkey] } └── StreamFilter { predicate: (orders.o_orderstatus = 'F':Varchar) } - └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderstatus], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 25 ] } + └── StreamTableScan { table: orders, columns: [orders.o_orderkey, orders.o_orderstatus], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } { tables: [ StreamScan: 25 ] } ├── Upstream └── BatchPlanNode Fragment 9 StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_receiptdate > lineitem.l_commitdate) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 26 ] ├── Upstream └── BatchPlanNode Fragment 10 - StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 27 ] } + StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } { tables: [ StreamScan: 27 ] } ├── Upstream └── BatchPlanNode Fragment 11 StreamProject { exprs: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber] } └── StreamFilter { predicate: (lineitem.l_receiptdate > lineitem.l_commitdate) } - └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: Backfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } + └── StreamTableScan { table: lineitem, columns: [lineitem.l_orderkey, lineitem.l_suppkey, lineitem.l_linenumber, lineitem.l_commitdate, lineitem.l_receiptdate], stream_scan_type: ArrangementBackfill, pk: [lineitem.l_orderkey, lineitem.l_linenumber], dist: UpstreamHashShard(lineitem.l_orderkey, lineitem.l_linenumber) } ├── tables: [ StreamScan: 28 ] ├── Upstream └── BatchPlanNode @@ -4247,12 +4210,7 @@ Table 2 { columns: [ supplier_s_name, count ], primary key: [ $0 ASC ], value indices: [ 1 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 3 - ├── columns: [ supplier_s_name, lineitem_l_orderkey, lineitem_l_suppkey, nation_n_nationkey, supplier_s_suppkey, orders_o_orderkey, lineitem_l_linenumber ] - ├── primary key: [ $1 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5, 6 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 3 { columns: [ supplier_s_name, lineitem_l_orderkey, lineitem_l_suppkey, nation_n_nationkey, supplier_s_suppkey, orders_o_orderkey, lineitem_l_linenumber ], primary key: [ $1 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6 ], distribution key: [ 1 ], read pk prefix len hint: 1 } Table 4 { columns: [ lineitem_l_orderkey, nation_n_nationkey, supplier_s_suppkey, orders_o_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC ], value indices: [ 5 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -4260,12 +4218,7 @@ Table 6 { columns: [ lineitem_l_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC ], value indices: [ 2 ], distribution key: [ 0 ], read pk prefix len hint: 1 } - Table 7 - ├── columns: [ supplier_s_name, lineitem_l_orderkey, lineitem_l_suppkey, nation_n_nationkey, supplier_s_suppkey, orders_o_orderkey, lineitem_l_linenumber ] - ├── primary key: [ $1 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC ] - ├── value indices: [ 0, 1, 2, 3, 4, 5, 6 ] - ├── distribution key: [ 1 ] - └── read pk prefix len hint: 1 + Table 7 { columns: [ supplier_s_name, lineitem_l_orderkey, lineitem_l_suppkey, nation_n_nationkey, supplier_s_suppkey, orders_o_orderkey, lineitem_l_linenumber ], primary key: [ $1 ASC, $3 ASC, $4 ASC, $5 ASC, $6 ASC ], value indices: [ 0, 1, 2, 3, 4, 5, 6 ], distribution key: [ 1 ], read pk prefix len hint: 1 } Table 8 { columns: [ lineitem_l_orderkey, nation_n_nationkey, supplier_s_suppkey, orders_o_orderkey, lineitem_l_linenumber, _degree ], primary key: [ $0 ASC, $1 ASC, $2 ASC, $3 ASC, $4 ASC ], value indices: [ 5 ], distribution key: [ 0 ], read pk prefix len hint: 1 } @@ -4418,9 +4371,9 @@ ├─StreamHashJoin { type: LeftAnti, predicate: customer.c_custkey = orders.o_custkey, output: [customer.c_phone, customer.c_acctbal, customer.c_custkey] } │ ├─StreamExchange { dist: HashShard(customer.c_custkey) } │ │ └─StreamFilter { predicate: In(Substr(customer.c_phone, 1:Int32, 2:Int32), '30':Varchar, '24':Varchar, '31':Varchar, '38':Varchar, '25':Varchar, '34':Varchar, '37':Varchar) } - │ │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_phone, customer.c_acctbal], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + │ │ └─StreamTableScan { table: customer, columns: [customer.c_custkey, customer.c_phone, customer.c_acctbal], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } │ └─StreamExchange { dist: HashShard(orders.o_custkey) } - │ └─StreamTableScan { table: orders, columns: [orders.o_custkey, orders.o_orderkey], stream_scan_type: Backfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } + │ └─StreamTableScan { table: orders, columns: [orders.o_custkey, orders.o_orderkey], stream_scan_type: ArrangementBackfill, pk: [orders.o_orderkey], dist: UpstreamHashShard(orders.o_orderkey) } └─StreamExchange { dist: Broadcast } └─StreamProject { exprs: [(sum(sum(customer.c_acctbal)) / sum0(count(customer.c_acctbal))::Decimal) as $expr1] } └─StreamSimpleAgg { aggs: [sum(sum(customer.c_acctbal)), sum0(count(customer.c_acctbal)), count] } @@ -4428,4 +4381,4 @@ └─StreamStatelessSimpleAgg { aggs: [sum(customer.c_acctbal), count(customer.c_acctbal)] } └─StreamProject { exprs: [customer.c_acctbal, customer.c_custkey] } └─StreamFilter { predicate: (customer.c_acctbal > 0.00:Decimal) AND In(Substr(customer.c_phone, 1:Int32, 2:Int32), '30':Varchar, '24':Varchar, '31':Varchar, '38':Varchar, '25':Varchar, '34':Varchar, '37':Varchar) } - └─StreamTableScan { table: customer, columns: [customer.c_acctbal, customer.c_custkey, customer.c_phone], stream_scan_type: Backfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } + └─StreamTableScan { table: customer, columns: [customer.c_acctbal, customer.c_custkey, customer.c_phone], stream_scan_type: ArrangementBackfill, pk: [customer.c_custkey], dist: UpstreamHashShard(customer.c_custkey) } diff --git a/src/frontend/planner_test/tests/testdata/output/union.yaml b/src/frontend/planner_test/tests/testdata/output/union.yaml index 4bbb653ea7b81..4b642194476a1 100644 --- a/src/frontend/planner_test/tests/testdata/output/union.yaml +++ b/src/frontend/planner_test/tests/testdata/output/union.yaml @@ -14,10 +14,10 @@ └─StreamUnion { all: true } ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } └─StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } - └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a, b, c, t1._row_id(hidden), $src(hidden)], stream_key: [t1._row_id, $src], pk_columns: [t1._row_id, $src], pk_conflict: NoCheck } @@ -28,14 +28,14 @@ Fragment 1 StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } - └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } ├── tables: [ StreamScan: 0 ] ├── Upstream └── BatchPlanNode Fragment 2 StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } - └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode @@ -89,10 +89,10 @@ └─StreamUnion { all: true } ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } └─StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } - └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a, b, c], stream_key: [a, b, c], pk_columns: [a, b, c], pk_conflict: NoCheck } @@ -108,14 +108,14 @@ Fragment 2 StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } - └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode Fragment 3 StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } - └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -176,10 +176,10 @@ └─StreamUnion { all: true } ├─StreamExchange { dist: HashShard(t1.a, 0:Int32) } │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, 0:Int32] } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } └─StreamExchange { dist: HashShard(t2.a, 1:Int32) } └─StreamProject { exprs: [t2.a, t2.b, t2.c, 1:Int32] } - └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } + └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a, b, c], stream_key: [a, b, c], pk_columns: [a, b, c], pk_conflict: NoCheck } @@ -195,14 +195,14 @@ Fragment 2 StreamProject { exprs: [t1.a, t1.b, t1.c, 0:Int32] } - └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } + └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode Fragment 3 StreamProject { exprs: [t2.a, t2.b, t2.c, 1:Int32] } - └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } + └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.a], dist: UpstreamHashShard(t2.a) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode @@ -330,19 +330,19 @@ └─StreamUnion { all: true } ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } │ └─StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } - │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } ├─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } │ └─StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } - │ └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + │ └─StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } ├─StreamExchange { dist: HashShard(t3._row_id, 2:Int32) } │ └─StreamProject { exprs: [t3.a, t3.b, t3.c, t3._row_id, 2:Int32] } - │ └─StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + │ └─StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } ├─StreamExchange { dist: HashShard(t4._row_id, 3:Int32) } │ └─StreamProject { exprs: [t4.a, t4.b, t4.c, t4._row_id, 3:Int32] } - │ └─StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], stream_scan_type: Backfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } + │ └─StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], stream_scan_type: ArrangementBackfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } └─StreamExchange { dist: HashShard(t5._row_id, 4:Int32) } └─StreamProject { exprs: [t5.a, t5.b, t5.c, t5._row_id, 4:Int32] } - └─StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c, t5._row_id], stream_scan_type: Backfill, pk: [t5._row_id], dist: UpstreamHashShard(t5._row_id) } + └─StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c, t5._row_id], stream_scan_type: ArrangementBackfill, pk: [t5._row_id], dist: UpstreamHashShard(t5._row_id) } stream_dist_plan: |+ Fragment 0 StreamMaterialize { columns: [a, b, c, t1._row_id(hidden), $src(hidden)], stream_key: [t1._row_id, $src], pk_columns: [t1._row_id, $src], pk_conflict: NoCheck } @@ -356,35 +356,35 @@ Fragment 1 StreamProject { exprs: [t1.a, t1.b, t1.c, t1._row_id, 0:Int32] } - └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } ├── tables: [ StreamScan: 0 ] ├── Upstream └── BatchPlanNode Fragment 2 StreamProject { exprs: [t2.a, t2.b, t2.c, t2._row_id, 1:Int32] } - └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode Fragment 3 StreamProject { exprs: [t3.a, t3.b, t3.c, t3._row_id, 2:Int32] } - └── StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } + └── StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode Fragment 4 StreamProject { exprs: [t4.a, t4.b, t4.c, t4._row_id, 3:Int32] } - └── StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], stream_scan_type: Backfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } + └── StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], stream_scan_type: ArrangementBackfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } ├── tables: [ StreamScan: 3 ] ├── Upstream └── BatchPlanNode Fragment 5 StreamProject { exprs: [t5.a, t5.b, t5.c, t5._row_id, 4:Int32] } - └── StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c, t5._row_id], stream_scan_type: Backfill, pk: [t5._row_id], dist: UpstreamHashShard(t5._row_id) } + └── StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c, t5._row_id], stream_scan_type: ArrangementBackfill, pk: [t5._row_id], dist: UpstreamHashShard(t5._row_id) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode @@ -457,31 +457,31 @@ Fragment 1 StreamProject { exprs: [t1.a, t1.b, t1.c, t1.a, null:Int64, null:Decimal, null:Serial, 0:Int32] } - └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } { tables: [ StreamScan: 0 ] } + └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } { tables: [ StreamScan: 0 ] } ├── Upstream └── BatchPlanNode Fragment 2 StreamProject { exprs: [t2.a, t2.b, t2.c, null:Int32, null:Int64, t2.b, null:Serial, 1:Int32] } - └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.b], dist: UpstreamHashShard(t2.b) } { tables: [ StreamScan: 1 ] } + └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.b], dist: UpstreamHashShard(t2.b) } { tables: [ StreamScan: 1 ] } ├── Upstream └── BatchPlanNode Fragment 3 StreamProject { exprs: [t3.a, t3.b, t3.c, null:Int32, t3.c, null:Decimal, null:Serial, 2:Int32] } - └── StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c], stream_scan_type: Backfill, pk: [t3.c], dist: UpstreamHashShard(t3.c) } { tables: [ StreamScan: 2 ] } + └── StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c], stream_scan_type: ArrangementBackfill, pk: [t3.c], dist: UpstreamHashShard(t3.c) } { tables: [ StreamScan: 2 ] } ├── Upstream └── BatchPlanNode Fragment 4 StreamProject { exprs: [t4.a, t4.b, t4.c, null:Int32, null:Int64, null:Decimal, t4._row_id, 3:Int32] } - └── StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], stream_scan_type: Backfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } { tables: [ StreamScan: 3 ] } + └── StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], stream_scan_type: ArrangementBackfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } { tables: [ StreamScan: 3 ] } ├── Upstream └── BatchPlanNode Fragment 5 StreamProject { exprs: [t5.a, t5.b, t5.c, t5.a, null:Int64, t5.b, null:Serial, 4:Int32] } - └── StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c], stream_scan_type: Backfill, pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } { tables: [ StreamScan: 4 ] } + └── StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c], stream_scan_type: ArrangementBackfill, pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } { tables: [ StreamScan: 4 ] } ├── Upstream └── BatchPlanNode @@ -518,31 +518,31 @@ Fragment 1 StreamProject { exprs: [t1.a, t1.b, t1.c, t1.a, null:Decimal, null:Serial, 0:Int32] } - └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } { tables: [ StreamScan: 0 ] } + └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } { tables: [ StreamScan: 0 ] } ├── Upstream └── BatchPlanNode Fragment 2 StreamProject { exprs: [t2.a, t2.b, t2.c, null:Int32, t2.b, null:Serial, 1:Int32] } - └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.b], dist: UpstreamHashShard(t2.b) } { tables: [ StreamScan: 1 ] } + └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.b], dist: UpstreamHashShard(t2.b) } { tables: [ StreamScan: 1 ] } ├── Upstream └── BatchPlanNode Fragment 3 StreamProject { exprs: [t3.a, t3.b, t3.c, null:Int32, null:Decimal, t3._row_id, 2:Int32] } - └── StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], stream_scan_type: Backfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } { tables: [ StreamScan: 2 ] } + └── StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c, t3._row_id], stream_scan_type: ArrangementBackfill, pk: [t3._row_id], dist: UpstreamHashShard(t3._row_id) } { tables: [ StreamScan: 2 ] } ├── Upstream └── BatchPlanNode Fragment 4 StreamProject { exprs: [t4.a, t4.b, t4.c, null:Int32, null:Decimal, t4._row_id, 3:Int32] } - └── StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], stream_scan_type: Backfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } { tables: [ StreamScan: 3 ] } + └── StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c, t4._row_id], stream_scan_type: ArrangementBackfill, pk: [t4._row_id], dist: UpstreamHashShard(t4._row_id) } { tables: [ StreamScan: 3 ] } ├── Upstream └── BatchPlanNode Fragment 5 StreamProject { exprs: [t5.a, t5.b, t5.c, t5.a, t5.b, null:Serial, 4:Int32] } - └── StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c], stream_scan_type: Backfill, pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } { tables: [ StreamScan: 4 ] } + └── StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c], stream_scan_type: ArrangementBackfill, pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } { tables: [ StreamScan: 4 ] } ├── Upstream └── BatchPlanNode @@ -579,35 +579,35 @@ Fragment 1 StreamProject { exprs: [t1.a, t1.b, t1.c, 0:Int32] } - └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: Backfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } + └── StreamTableScan { table: t1, columns: [t1.a, t1.b, t1.c], stream_scan_type: ArrangementBackfill, pk: [t1.a], dist: UpstreamHashShard(t1.a) } ├── tables: [ StreamScan: 0 ] ├── Upstream └── BatchPlanNode Fragment 2 StreamProject { exprs: [t2.a, t2.b, t2.c, 1:Int32] } - └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: Backfill, pk: [t2.b], dist: UpstreamHashShard(t2.b) } + └── StreamTableScan { table: t2, columns: [t2.a, t2.b, t2.c], stream_scan_type: ArrangementBackfill, pk: [t2.b], dist: UpstreamHashShard(t2.b) } ├── tables: [ StreamScan: 1 ] ├── Upstream └── BatchPlanNode Fragment 3 StreamProject { exprs: [t3.a, t3.b, t3.c, 2:Int32] } - └── StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c], stream_scan_type: Backfill, pk: [t3.b], dist: UpstreamHashShard(t3.b) } + └── StreamTableScan { table: t3, columns: [t3.a, t3.b, t3.c], stream_scan_type: ArrangementBackfill, pk: [t3.b], dist: UpstreamHashShard(t3.b) } ├── tables: [ StreamScan: 2 ] ├── Upstream └── BatchPlanNode Fragment 4 StreamProject { exprs: [t4.a, t4.b, t4.c, 3:Int32] } - └── StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c], stream_scan_type: Backfill, pk: [t4.b, t4.a], dist: UpstreamHashShard(t4.b, t4.a) } + └── StreamTableScan { table: t4, columns: [t4.a, t4.b, t4.c], stream_scan_type: ArrangementBackfill, pk: [t4.b, t4.a], dist: UpstreamHashShard(t4.b, t4.a) } ├── tables: [ StreamScan: 3 ] ├── Upstream └── BatchPlanNode Fragment 5 StreamProject { exprs: [t5.a, t5.b, t5.c, 4:Int32] } - └── StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c], stream_scan_type: Backfill, pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } + └── StreamTableScan { table: t5, columns: [t5.a, t5.b, t5.c], stream_scan_type: ArrangementBackfill, pk: [t5.a, t5.b], dist: UpstreamHashShard(t5.a, t5.b) } ├── tables: [ StreamScan: 4 ] ├── Upstream └── BatchPlanNode diff --git a/src/frontend/planner_test/tests/testdata/output/watermark.yaml b/src/frontend/planner_test/tests/testdata/output/watermark.yaml index 77d290e21e9a2..501f7f686b37b 100644 --- a/src/frontend/planner_test/tests/testdata/output/watermark.yaml +++ b/src/frontend/planner_test/tests/testdata/output/watermark.yaml @@ -77,7 +77,7 @@ └─StreamProject { exprs: [count(t.v2), t.ts, t.v1], output_watermarks: [t.ts] } └─StreamHashAgg [append_only] { group_key: [t.ts, t.v1], aggs: [count(t.v2), count], output_watermarks: [t.ts] } └─StreamExchange { dist: HashShard(t.ts, t.v1) } - └─StreamTableScan { table: t, columns: [t.ts, t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.ts, t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: inner window join sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -88,9 +88,9 @@ └─StreamExchange { dist: HashShard(t1.ts, t1._row_id, t2._row_id) } └─StreamHashJoin [window, append_only] { type: Inner, predicate: t1.ts = t2.ts, output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t2.ts, t1.v1, t1.v2, t2.v1, t2.v2, t1._row_id, t2._row_id] } ├─StreamExchange { dist: HashShard(t1.ts) } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.ts) } - └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: left semi window join sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -101,9 +101,9 @@ └─StreamExchange { dist: HashShard(t1.ts, t1._row_id) } └─StreamHashJoin [window] { type: LeftSemi, predicate: t1.ts = t2.ts, output_watermarks: [t1.ts], output: all } ├─StreamExchange { dist: HashShard(t1.ts) } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.ts) } - └─StreamTableScan { table: t2, columns: [t2.ts, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.ts, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: interval join(left outer join) sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -120,10 +120,10 @@ └─StreamHashJoin [interval] { type: LeftOuter, predicate: t1.v1 = t2.v1 AND (t1.ts >= $expr2) AND ($expr1 <= t2.ts), conditions_to_clean_left_state_table: (t1.ts >= $expr2), conditions_to_clean_right_state_table: ($expr1 <= t2.ts), output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t1.v1, t1.v2, t2.ts, t2.v1, t2.v2, t1._row_id, t2._row_id] } ├─StreamExchange { dist: HashShard(t1.v1) } │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, AddWithTimeZone(t1.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr1, t1._row_id], output_watermarks: [t1.ts, $expr1] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.v1) } └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, AddWithTimeZone(t2.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr2, t2._row_id], output_watermarks: [t2.ts, $expr2] } - └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: interval join (inner join) sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -140,10 +140,10 @@ └─StreamHashJoin [interval, append_only] { type: Inner, predicate: t1.v1 = t2.v1 AND (t1.ts >= $expr2) AND ($expr1 <= t2.ts), conditions_to_clean_left_state_table: (t1.ts >= $expr2), conditions_to_clean_right_state_table: ($expr1 <= t2.ts), output_watermarks: [t1.ts, t2.ts], output: [t1.ts, t1.v1, t1.v2, t2.ts, t2.v1, t2.v2, t1._row_id, t2._row_id] } ├─StreamExchange { dist: HashShard(t1.v1) } │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, AddWithTimeZone(t1.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr1, t1._row_id], output_watermarks: [t1.ts, $expr1] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2.v1) } └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, AddWithTimeZone(t2.ts, '00:00:01':Interval, 'UTC':Varchar) as $expr2, t2._row_id], output_watermarks: [t2.ts, $expr2] } - └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: union all sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -154,10 +154,10 @@ └─StreamUnion { all: true, output_watermarks: [t1.ts] } ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, t1._row_id, 0:Int32], output_watermarks: [t1.ts] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, t2._row_id, 1:Int32], output_watermarks: [t2.ts] } - └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: union sql: | create table t1 (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -171,10 +171,10 @@ └─StreamUnion { all: true, output_watermarks: [t1.ts] } ├─StreamExchange { dist: HashShard(t1._row_id, 0:Int32) } │ └─StreamProject { exprs: [t1.ts, t1.v1, t1.v2, t1._row_id, 0:Int32], output_watermarks: [t1.ts] } - │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: Backfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } + │ └─StreamTableScan { table: t1, columns: [t1.ts, t1.v1, t1.v2, t1._row_id], stream_scan_type: ArrangementBackfill, pk: [t1._row_id], dist: UpstreamHashShard(t1._row_id) } └─StreamExchange { dist: HashShard(t2._row_id, 1:Int32) } └─StreamProject { exprs: [t2.ts, t2.v1, t2.v2, t2._row_id, 1:Int32], output_watermarks: [t2.ts] } - └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: Backfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } + └─StreamTableScan { table: t2, columns: [t2.ts, t2.v1, t2.v2, t2._row_id], stream_scan_type: ArrangementBackfill, pk: [t2._row_id], dist: UpstreamHashShard(t2._row_id) } - name: tumble sql: | create table t (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -183,7 +183,7 @@ StreamMaterialize { columns: [ts, v1, v2, window_start, window_end, t._row_id(hidden)], stream_key: [t._row_id], pk_columns: [t._row_id], pk_conflict: NoCheck, watermark_columns: [ts, window_start, window_end] } └─StreamProject { exprs: [t.ts, t.v1, t.v2, $expr1, AddWithTimeZone($expr1, '00:03:00':Interval, 'UTC':Varchar) as $expr2, t._row_id], output_watermarks: [t.ts, $expr1, $expr2] } └─StreamProject { exprs: [t.ts, t.v1, t.v2, TumbleStart(t.ts, '00:03:00':Interval) as $expr1, t._row_id], output_watermarks: [t.ts, $expr1] } - └─StreamTableScan { table: t, columns: [t.ts, t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.ts, t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: hop all sql: | create table t (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -192,7 +192,7 @@ StreamMaterialize { columns: [ts, v1, v2, window_start, window_end, t._row_id(hidden)], stream_key: [t._row_id, window_start, window_end], pk_columns: [t._row_id, window_start, window_end], pk_conflict: NoCheck, watermark_columns: [ts, window_start, window_end] } └─StreamHopWindow { time_col: t.ts, slide: 00:01:00, size: 00:03:00, output: [t.ts, t.v1, t.v2, window_start, window_end, t._row_id], output_watermarks: [t.ts, window_start, window_end] } └─StreamFilter { predicate: IsNotNull(t.ts) } - └─StreamTableScan { table: t, columns: [t.ts, t.v1, t.v2, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.ts, t.v1, t.v2, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: hop ts sql: | create table t (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -201,7 +201,7 @@ StreamMaterialize { columns: [ts, window_start(hidden), t._row_id(hidden)], stream_key: [t._row_id, window_start], pk_columns: [t._row_id, window_start], pk_conflict: NoCheck, watermark_columns: [ts, window_start(hidden)] } └─StreamHopWindow { time_col: t.ts, slide: 00:01:00, size: 00:03:00, output: [t.ts, window_start, t._row_id], output_watermarks: [t.ts, window_start] } └─StreamFilter { predicate: IsNotNull(t.ts) } - └─StreamTableScan { table: t, columns: [t.ts, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.ts, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: hop start sql: | create table t (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -210,7 +210,7 @@ StreamMaterialize { columns: [window_end, t._row_id(hidden)], stream_key: [t._row_id, window_end], pk_columns: [t._row_id, window_end], pk_conflict: NoCheck, watermark_columns: [window_end] } └─StreamHopWindow { time_col: t.ts, slide: 00:01:00, size: 00:03:00, output: [window_end, t._row_id], output_watermarks: [window_end] } └─StreamFilter { predicate: IsNotNull(t.ts) } - └─StreamTableScan { table: t, columns: [t.ts, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.ts, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: hop end sql: | create table t (ts timestamp with time zone, v1 int, v2 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; @@ -219,7 +219,7 @@ StreamMaterialize { columns: [window_start, t._row_id(hidden)], stream_key: [t._row_id, window_start], pk_columns: [t._row_id, window_start], pk_conflict: NoCheck, watermark_columns: [window_start] } └─StreamHopWindow { time_col: t.ts, slide: 00:01:00, size: 00:03:00, output: [window_start, t._row_id], output_watermarks: [window_start] } └─StreamFilter { predicate: IsNotNull(t.ts) } - └─StreamTableScan { table: t, columns: [t.ts, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.ts, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: unnest sql: | create table t (ts timestamp with time zone, v1 int, watermark for ts as ts - INTERVAL '1' SECOND) append only; diff --git a/src/frontend/planner_test/tests/testdata/output/with_ordinality.yaml b/src/frontend/planner_test/tests/testdata/output/with_ordinality.yaml index faae490250806..dbe3f6efdbefd 100644 --- a/src/frontend/planner_test/tests/testdata/output/with_ordinality.yaml +++ b/src/frontend/planner_test/tests/testdata/output/with_ordinality.yaml @@ -28,12 +28,12 @@ └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } ├─StreamExchange { dist: HashShard(t.arr) } - │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.arr] } └─StreamHashAgg { group_key: [t.arr], aggs: [count] } └─StreamExchange { dist: HashShard(t.arr) } - └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(x int , arr int[]); select * from t cross join unnest(arr) WITH ORDINALITY as foo; @@ -52,12 +52,12 @@ └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } ├─StreamExchange { dist: HashShard(t.arr) } - │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.arr] } └─StreamHashAgg { group_key: [t.arr], aggs: [count] } └─StreamExchange { dist: HashShard(t.arr) } - └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(x int , arr int[]); select * from t cross join unnest(arr) WITH ORDINALITY as foo(a); @@ -76,12 +76,12 @@ └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } ├─StreamExchange { dist: HashShard(t.arr) } - │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.arr] } └─StreamHashAgg { group_key: [t.arr], aggs: [count] } └─StreamExchange { dist: HashShard(t.arr) } - └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(x int , arr int[]); select * from t cross join unnest(arr) WITH ORDINALITY as foo(a,ord); @@ -100,12 +100,12 @@ └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } ├─StreamExchange { dist: HashShard(t.arr) } - │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.arr] } └─StreamHashAgg { group_key: [t.arr], aggs: [count] } └─StreamExchange { dist: HashShard(t.arr) } - └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - name: use alias columns explicitlity sql: | create table t(x int , arr int[]); @@ -125,12 +125,12 @@ └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } ├─StreamExchange { dist: HashShard(t.arr) } - │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.arr] } └─StreamHashAgg { group_key: [t.arr], aggs: [count] } └─StreamExchange { dist: HashShard(t.arr) } - └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | create table t(x int , arr int[]); select * from t cross join unnest(arr) WITH ORDINALITY as foo(a,ord,bar); @@ -166,12 +166,12 @@ │ └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } │ └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } │ ├─StreamExchange { dist: HashShard(t.arr) } - │ │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } │ └─StreamProjectSet { select_list: [$0, Unnest($0)] } │ └─StreamProject { exprs: [t.arr] } │ └─StreamHashAgg { group_key: [t.arr], aggs: [count] } │ └─StreamExchange { dist: HashShard(t.arr) } - │ └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.arr] } └─StreamHashAgg { group_key: [t.arr], aggs: [count] } @@ -179,12 +179,12 @@ └─StreamProject { exprs: [t.x, t.arr, Unnest($0), (projected_row_id + 1:Int64) as $expr1, t._row_id, projected_row_id] } └─StreamHashJoin { type: Inner, predicate: t.arr IS NOT DISTINCT FROM t.arr, output: [t.x, t.arr, projected_row_id, t.arr, Unnest($0), t._row_id] } ├─StreamExchange { dist: HashShard(t.arr) } - │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + │ └─StreamTableScan { table: t, columns: [t.x, t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } └─StreamProjectSet { select_list: [$0, Unnest($0)] } └─StreamProject { exprs: [t.arr] } └─StreamHashAgg { group_key: [t.arr], aggs: [count] } └─StreamExchange { dist: HashShard(t.arr) } - └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: Backfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } + └─StreamTableScan { table: t, columns: [t.arr, t._row_id], stream_scan_type: ArrangementBackfill, pk: [t._row_id], dist: UpstreamHashShard(t._row_id) } - sql: | select * from abs(1) WITH ORDINALITY; batch_plan: 'BatchValues { rows: [[1:Int32, 1:Int64]] }'