Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DO NOT MERGE: Notes about the RW + TiDB presentation in London #16667

Closed
wants to merge 11 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions docker-compose-something.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
version: "2.1"

services:
pd:
image: pingcap/pd:latest
ports:
- "2379:2379"
volumes:
- ./config/pd.toml:/pd.toml
- ./logs:/logs
command:
- --client-urls=http://0.0.0.0:2379
- --peer-urls=http://0.0.0.0:2380
- --advertise-client-urls=http://pd:2379
- --advertise-peer-urls=http://pd:2380
- --initial-cluster=pd=http://pd:2380
- --data-dir=/data/pd
- --config=/pd.toml
- --log-file=/logs/pd.log
restart: on-failure

tikv:
image: pingcap/tikv:latest
ports:
- "20160:20160"
volumes:
- ./config/tikv.toml:/tikv.toml
- ./logs:/logs
command:
- --addr=0.0.0.0:20160
- --advertise-addr=tikv:20160
- --data-dir=/data/tikv
- --pd=pd:2379
- --config=/tikv.toml
- --log-file=/logs/tikv.log
depends_on:
- "pd"
restart: on-failure

tidb:
image: pingcap/tidb:latest
ports:
- "4000:4000"
volumes:
- ./config/tidb.toml:/tidb.toml
- ./logs:/logs
command:
- --store=tikv
- --path=pd:2379
- --config=/tidb.toml
- --log-file=/logs/tidb.log
- --advertise-address=tidb
depends_on:
- "tikv"
restart: on-failure
1 change: 1 addition & 0 deletions env_vars.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
export INTEGRATION_TEST_CASE=tidb-cdc-sink
19 changes: 9 additions & 10 deletions integration_tests/tidb-cdc-sink/create_mv.sql
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,20 @@ CREATE MATERIALIZED VIEW hot_hashtags AS WITH tags AS (
unnest(regexp_matches(tweet.text, '#\w+', 'g')) AS hashtag,
tweet.created_at AT TIME ZONE 'UTC' AS created_at
FROM
tweet JOIN user
ON
tweet.author_id = user.id
tweet
JOIN user ON tweet.author_id = user.id
)
SELECT
hashtag,
COUNT(*) AS hashtag_occurrences,
window_start
COUNT(*) AS hashtag_occurrences
FROM
TUMBLE(tags, created_at, INTERVAL '5 minute')
tags
GROUP BY
hashtag,
window_start
hashtag
ORDER BY
hashtag_occurrences;
hashtag_occurrences DESC
LIMIT
10;

CREATE MATERIALIZED VIEW datatype_c0_boolean AS
SELECT
Expand All @@ -29,4 +28,4 @@ SELECT
FROM
datatype
GROUP BY
c0_boolean;
c0_boolean;
34 changes: 18 additions & 16 deletions integration_tests/tidb-cdc-sink/create_sink.sql
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
CREATE SINK hot_hashtags_sink FROM hot_hashtags
WITH (
connector='jdbc',
jdbc.url='jdbc:mysql://tidb:4000/test?user=root&password=',
table.name='hot_hashtags',
type='upsert',
primary_key='window_start,hashtag'
);
CREATE SINK hot_hashtags_sink
FROM
hot_hashtags WITH (
connector = 'jdbc',
jdbc.url = 'jdbc:mysql://tidb:4000/test?user=root&password=',
table.name = 'hot_hashtags',
type = 'upsert',
primary_key = 'hashtag'
);

CREATE SINK tidb_sink_datatypes_sink FROM tidb_sink_datatypes
WITH (
connector='jdbc',
jdbc.url='jdbc:mysql://tidb:4000/test?user=root&password=',
table.name='tidb_sink_datatypes',
type='upsert',
primary_key='id'
);
CREATE SINK tidb_sink_datatypes_sink
FROM
tidb_sink_datatypes WITH (
connector = 'jdbc',
jdbc.url = 'jdbc:mysql://tidb:4000/test?user=root&password=',
table.name = 'tidb_sink_datatypes',
type = 'upsert',
primary_key = 'id'
);
198 changes: 146 additions & 52 deletions integration_tests/tidb-cdc-sink/create_source.sql

Large diffs are not rendered by default.

238 changes: 221 additions & 17 deletions integration_tests/tidb-cdc-sink/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,26 +1,230 @@
---
version: '3'
version: "3"
x-image: &image
image: ${RW_IMAGE:-risingwavelabs/risingwave:v1.8.1}
services:
risingwave-standalone:
extends:
file: ../../docker/docker-compose.yml
service: risingwave-standalone
<<: *image
command: "standalone --meta-opts=\" \
--listen-addr 0.0.0.0:5690 \
--advertise-addr 0.0.0.0:5690 \
--dashboard-host 0.0.0.0:5691 \
--prometheus-host 0.0.0.0:1250 \
--prometheus-endpoint http://prometheus-0:9500 \
--backend etcd \
--etcd-endpoints etcd-0:2388 \
--state-store hummock+minio://hummockadmin:hummockadmin@minio-0:9301/hummock001 \
--data-directory hummock_001 \
--config-path /risingwave.toml\" \
--compute-opts=\" \
--config-path /risingwave.toml \
--listen-addr 0.0.0.0:5688 \
--prometheus-listener-addr 0.0.0.0:1250 \
--advertise-addr 0.0.0.0:5688 \
--async-stack-trace verbose \
#--parallelism 4 \
#--total-memory-bytes 8589934592 \
--role both \
--meta-address http://0.0.0.0:5690\" \
--frontend-opts=\" \
--config-path /risingwave.toml \
--listen-addr 0.0.0.0:4566 \
--advertise-addr 0.0.0.0:4566 \
--prometheus-listener-addr 0.0.0.0:1250 \
--health-check-listener-addr 0.0.0.0:6786 \
--meta-addr http://0.0.0.0:5690\" \
--compactor-opts=\" \
--listen-addr 0.0.0.0:6660 \
--prometheus-listener-addr 0.0.0.0:1250 \
--advertise-addr 0.0.0.0:6660 \
--meta-address http://0.0.0.0:5690\""
expose:
- "6660"
- "4566"
- "5688"
- "5690"
- "1250"
- "5691"
ports:
- "4566:4566"
- "5690:5690"
- "5691:5691"
- "1250:1250"
depends_on:
- etcd-0
- minio-0
volumes:
- "./risingwave.toml:/risingwave.toml"
environment:
RUST_BACKTRACE: "1"
# If ENABLE_TELEMETRY is not set, telemetry will start by default
ENABLE_TELEMETRY: ${ENABLE_TELEMETRY:-true}
container_name: risingwave-standalone
healthcheck:
test:
- CMD-SHELL
- bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/6660; exit $$?;'
- bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/5688; exit $$?;'
- bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/4566; exit $$?;'
- bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/5690; exit $$?;'
interval: 1s
timeout: 5s
restart: always
deploy:
resources:
limits:
memory: 28G
reservations:
memory: 28G

etcd-0:
extends:
file: ../../docker/docker-compose.yml
service: etcd-0
image: "quay.io/coreos/etcd:v3.5.10"
command:
- /usr/local/bin/etcd
- "--listen-client-urls"
- "http://0.0.0.0:2388"
- "--advertise-client-urls"
- "http://etcd-0:2388"
- "--listen-peer-urls"
- "http://0.0.0.0:2389"
- "--initial-advertise-peer-urls"
- "http://etcd-0:2389"
- "--listen-metrics-urls"
- "http://0.0.0.0:2379"
- "--name"
- risedev-meta
- "--max-txn-ops"
- "999999"
- "--max-request-bytes"
- "10485760"
- "--auto-compaction-mode"
- periodic
- "--auto-compaction-retention"
- 1m
- "--snapshot-count"
- "10000"
- "--data-dir"
- /etcd-data
expose:
- "2388"
ports:
- "2388:2388"
- "2389:2389"
depends_on: []
volumes:
- "etcd-0:/etcd-data"
environment: {}
container_name: etcd-0
healthcheck:
test:
- CMD
- etcdctl
- --endpoints=http://localhost:2388
- endpoint
- health
interval: 1s
timeout: 5s
retries: 5
restart: always

grafana-0:
extends:
file: ../../docker/docker-compose.yml
service: grafana-0
image: "grafana/grafana-oss:latest"
command: []
expose:
- "3001"
ports:
- "3001:3001"
depends_on: []
volumes:
- "grafana-0:/var/lib/grafana"
- "./grafana.ini:/etc/grafana/grafana.ini"
- "./grafana-risedev-datasource.yml:/etc/grafana/provisioning/datasources/grafana-risedev-datasource.yml"
- "./grafana-risedev-dashboard.yml:/etc/grafana/provisioning/dashboards/grafana-risedev-dashboard.yml"
- "./dashboards:/dashboards"
environment: {}
container_name: grafana-0
healthcheck:
test:
- CMD-SHELL
- bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/3001; exit $$?;'
interval: 1s
timeout: 5s
retries: 5
restart: always

minio-0:
extends:
file: ../../docker/docker-compose.yml
service: minio-0
image: "quay.io/minio/minio:latest"
command:
- server
- "--address"
- "0.0.0.0:9301"
- "--console-address"
- "0.0.0.0:9400"
- /data
expose:
- "9301"
- "9400"
ports:
- "9301:9301"
- "9400:9400"
depends_on: []
volumes:
- "minio-0:/data"
entrypoint: "

/bin/sh -c '

set -e

mkdir -p \"/data/hummock001\"

/usr/bin/docker-entrypoint.sh \"$$0\" \"$$@\"

'"
environment:
MINIO_CI_CD: "1"
MINIO_PROMETHEUS_AUTH_TYPE: public
MINIO_PROMETHEUS_URL: "http://prometheus-0:9500"
MINIO_ROOT_PASSWORD: hummockadmin
MINIO_ROOT_USER: hummockadmin
MINIO_DOMAIN: "minio-0"
container_name: minio-0
healthcheck:
test:
- CMD-SHELL
- bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/9301; exit $$?;'
interval: 1s
timeout: 5s
retries: 5
restart: always

prometheus-0:
extends:
file: ../../docker/docker-compose.yml
service: prometheus-0
image: "prom/prometheus:latest"
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/usr/share/prometheus/console_libraries"
- "--web.console.templates=/usr/share/prometheus/consoles"
- "--web.listen-address=0.0.0.0:9500"
- "--storage.tsdb.retention.time=30d"
expose:
- "9500"
ports:
- "9500:9500"
depends_on: []
volumes:
- "prometheus-0:/prometheus"
- "./prometheus.yaml:/etc/prometheus/prometheus.yml"
environment: {}
container_name: prometheus-0
healthcheck:
test:
- CMD-SHELL
- sh -c 'printf "GET /-/healthy HTTP/1.0\n\n" | nc localhost 9500; exit $$?;'
interval: 1s
timeout: 5s
retries: 5
restart: always

#=================== TiDB & TiCDC components ==================
ticdc-controller:
Expand Down Expand Up @@ -163,7 +367,7 @@ services:
# Exposes 9092 for external connections to the broker
# Use kafka:29092 for connections internal on the docker network
# See https://rmoff.net/2018/08/02/kafka-listeners-explained/ for details
- 9092:9092
- 29092:29092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
Expand Down
Loading
Loading