Skip to content

Commit

Permalink
Merge branch 'main' into dependabot/cargo/regex-1.10.0
Browse files Browse the repository at this point in the history
  • Loading branch information
xxchan authored Oct 12, 2023
2 parents 7d021bf + f9e3d99 commit ad30775
Show file tree
Hide file tree
Showing 120 changed files with 2,764 additions and 666 deletions.
7 changes: 0 additions & 7 deletions ci/scripts/backfill-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,6 @@ git config --global --add safe.directory /risingwave

download_and_prepare_rw "$profile" common

echo "--- e2e, ci-backfill, build"
cargo make ci-start ci-backfill

################ TESTS

echo "--- e2e, ci-backfill, run backfill test"
./ci/scripts/run-backfill-tests.sh

echo "--- Kill cluster"
cargo make kill
60 changes: 52 additions & 8 deletions ci/scripts/run-backfill-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,59 @@ flush() {
run_sql "FLUSH;"
}

run_sql_file "$PARENT_PATH"/sql/backfill/create_base_table.sql
basic() {
echo "--- e2e, test_backfill_basic"
cargo make ci-start ci-backfill
run_sql_file "$PARENT_PATH"/sql/backfill/create_base_table.sql

# Provide snapshot
run_sql_file "$PARENT_PATH"/sql/backfill/insert.sql
run_sql_file "$PARENT_PATH"/sql/backfill/insert.sql &
run_sql_file "$PARENT_PATH"/sql/backfill/create_mv.sql &
# Provide snapshot
run_sql_file "$PARENT_PATH"/sql/backfill/insert.sql
run_sql_file "$PARENT_PATH"/sql/backfill/insert.sql &
run_sql_file "$PARENT_PATH"/sql/backfill/create_mv.sql &

wait
wait

run_sql_file "$PARENT_PATH"/sql/backfill/select.sql </dev/null
run_sql_file "$PARENT_PATH"/sql/backfill/select.sql </dev/null

echo "Backfill tests complete"
echo "--- Kill cluster"
cargo make kill
}

# Lots of upstream tombstone, backfill should still proceed.
test_backfill_tombstone() {
echo "--- e2e, test_backfill_tombstone"
cargo make ci-start ci-backfill
./risedev psql -c "
CREATE TABLE tomb (v1 int)
WITH (
connector = 'datagen',
fields.v1._.kind = 'sequence',
datagen.rows.per.second = '10000000'
)
FORMAT PLAIN
ENCODE JSON;
"

sleep 30

bash -c '
set -euo pipefail
for i in $(seq 1 1000)
do
./risedev psql -c "DELETE FROM tomb; FLUSH;"
sleep 1
done
' 1>deletes.log 2>&1 &

./risedev psql -c "CREATE MATERIALIZED VIEW m1 as select * from tomb;"
echo "--- Kill cluster"
cargo make kill
}

main() {
basic
test_backfill_tombstone
}

main
4 changes: 2 additions & 2 deletions docker/Dockerfile.hdfs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ ENV JAVA_HOME ${JAVA_HOME_PATH}
ENV LD_LIBRARY_PATH ${JAVA_HOME_PATH}/lib/server:${LD_LIBRARY_PATH}

RUN cargo fetch && \
cargo build -p risingwave_cmd_all --release --features "rw-static-link" && \
cargo build -p risingwave_cmd_all --release -p risingwave_object_store --features hdfs-backend --features "rw-static-link" && \
mkdir -p /risingwave/bin && \
mv /risingwave/target/release/risingwave /risingwave/bin/ && \
mv /risingwave/target/release/risingwave.dwp /risingwave/bin/ && \
Expand Down Expand Up @@ -94,5 +94,5 @@ ENV RW_DASHBOARD_UI_PATH /risingwave/ui
# Set default connector libs path
ENV CONNECTOR_LIBS_PATH /risingwave/bin/connector-node/libs

ENTRYPOINT [ "/risingwave/hdfs_env.sh" ]
ENTRYPOINT [ "/risingwave/bin/risingwave" ]
CMD [ "playground" ]
2 changes: 1 addition & 1 deletion docker/dashboards/risingwave-dev-dashboard.json

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion docker/dashboards/risingwave-user-dashboard.json

Large diffs are not rendered by default.

21 changes: 21 additions & 0 deletions e2e_test/batch/basic/generate_series.slt.part
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,24 @@ SELECT * FROM generate_series(0.1::numeric, 2.1::numeric, 0.5::numeric)
1.1
1.6
2.1

statement error start value cannot be infinity
SELECT * FROM generate_series('infinity'::numeric,10::numeric);

statement error stop value cannot be infinity
SELECT * FROM generate_series(0::numeric,'-infinity'::numeric);

statement error stop value cannot be NaN
SELECT * FROM generate_series(0::numeric,'nan'::numeric);

statement error start value cannot be infinity
SELECT * FROM generate_series('infinity'::numeric,10::numeric,0::numeric);

statement error stop value cannot be infinity
SELECT * FROM generate_series(0::numeric,'-infinity'::numeric,0::numeric);

statement error step value cannot be NaN
SELECT * FROM generate_series(0::numeric,10::numeric,'nan'::numeric);

statement error start value cannot be infinity
SELECT * FROM generate_series('-infinity'::numeric,'infinity'::numeric,'nan'::numeric);
21 changes: 21 additions & 0 deletions e2e_test/batch/basic/range.slt.part
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,27 @@ SELECT * FROM range(0.1::numeric, 2.1::numeric, 0.5::numeric)
1.1
1.6

statement error start value cannot be infinity
SELECT * FROM range('infinity'::numeric,10::numeric);

statement error stop value cannot be infinity
SELECT * FROM range(0::numeric,'-infinity'::numeric);

statement error stop value cannot be NaN
SELECT * FROM range(0::numeric,'nan'::numeric);

statement error start value cannot be infinity
SELECT * FROM range('infinity'::numeric,10::numeric,0::numeric);

statement error stop value cannot be infinity
SELECT * FROM range(0::numeric,'-infinity'::numeric,0::numeric);

statement error step value cannot be NaN
SELECT * FROM range(0::numeric,10::numeric,'nan'::numeric);

statement error start value cannot be infinity
SELECT * FROM range('-infinity'::numeric,'infinity'::numeric,'nan'::numeric);

# test table function with aliases
query I
SELECT alias from range(1,2) alias;
Expand Down
42 changes: 38 additions & 4 deletions e2e_test/batch/basic/table_with_default_columns.slt.part
Original file line number Diff line number Diff line change
Expand Up @@ -72,11 +72,45 @@ select * from t2;
1 2
2 2

statement error db error: ERROR: QueryError: Bind error: impure default expr is not supported.
create table tt (v1 time default now());
# `now()` as default value
statement ok
alter table t2 add column v3 timestamp with time zone default now();

query IT
select v1, v3 >= date '2021-01-01' as later_than_2021 from t2;
----
1 t
2 t

# `now()` filled for historical data should be the same
query II
select max(v1), count(*) from t2 group by v3 order by v3;
----
2 2

statement error db error: ERROR: QueryError: Bind error: impure default expr is not supported.
alter table t2 add column v3 timestamptz default now();
statement ok
flush;

statement ok
insert into t2 values (3);

# Newly inserted record should have a later timestamp
query II
select max(v1), count(*) from t2 group by v3 order by v3;
----
2 2
3 1

# Historical data can be correctly updated
statement ok
update t2 set v3 = '2000-01-01 00:00:00+00:00' where v1 = 1;

query II
select max(v1), count(*) from t2 group by v3 order by v3;
----
1 1
2 1
3 1

statement ok
drop table t1;
Expand Down
5 changes: 5 additions & 0 deletions e2e_test/sink/remote/jdbc.check.pg.slt
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,8 @@ select * from biz.t2 order by "aBc";
----
1
2

query IT
select * from t1_uuid;
----
221 74605c5a-a7bb-4b3b-8742-2a12e9709dea hello world
16 changes: 16 additions & 0 deletions e2e_test/sink/remote/jdbc.load.slt
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,22 @@ CREATE SINK s_pg_t2 FROM tt2 WITH (
force_append_only=true
);


statement ok
create table t1_uuid (v1 int primary key, v2 varchar, v3 varchar);

statement ok
CREATE SINK s1_uuid FROM t1_uuid WITH (
connector='jdbc',
jdbc.url='jdbc:postgresql://db:5432/test?user=test&password=connector',
table.name='t1_uuid',
primary_key='v1',
type='upsert'
);

statement ok
INSERT INTO t1_uuid values (221, '74605c5a-a7bb-4b3b-8742-2a12e9709dea', 'hello world');

statement ok
INSERT INTO tt2 VALUES
(1),
Expand Down
2 changes: 2 additions & 0 deletions e2e_test/sink/remote/pg_create_table.sql
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ CREATE TABLE t_types (
jsonb_column JSONB
);

CREATE TABLE t1_uuid (v1 int primary key, v2 uuid, v3 varchar);

CREATE SCHEMA biz;
CREATE TABLE biz.t_types (
id BIGINT PRIMARY KEY,
Expand Down
Loading

0 comments on commit ad30775

Please sign in to comment.