Skip to content

Commit

Permalink
chore: run fuzz tests with kafka remote wal (#4105)
Browse files Browse the repository at this point in the history
* chore: add fuzz tests with kafka

* chore(ci): use minio

* chore: add empty line

* chore(ci): refactor

* chore: add empty line

* fix: update config

* fix: add default value for `MetaClientOptions`

* fix: remove redundant `debug_assert`

* chore: run fuzz tests with disk cache

* chore: remove redundant minio setup

* chore: cache targets

* Revert "chore: run fuzz tests with disk cache"

This reverts commit d817831.

* chore: fix typo

* chore: apply suggestions from CR

* Revert "fix: remove redundant `debug_assert`"

This reverts commit 09b899e.
  • Loading branch information
WenyXu authored Jun 7, 2024
1 parent 9c42825 commit 09e0e1b
Show file tree
Hide file tree
Showing 8 changed files with 178 additions and 49 deletions.
5 changes: 4 additions & 1 deletion .github/actions/setup-greptimedb-cluster/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ inputs:
etcd-endpoints:
default: "etcd.etcd-cluster.svc.cluster.local:2379"
description: "Etcd endpoints"
values-filename:
default: "with-minio.yaml"


runs:
using: composite
Expand Down Expand Up @@ -57,7 +60,7 @@ runs:
greptime/greptimedb-cluster \
--create-namespace \
-n my-greptimedb \
--values ./.github/actions/setup-greptimedb-cluster/values.yaml \
--values ./.github/actions/setup-greptimedb-cluster/${{ inputs.values-filename }} \
--wait \
--wait-for-jobs
- name: Wait for GreptimeDB
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ frontend:
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
bg_rt_size = 8
34 changes: 34 additions & 0 deletions .github/actions/setup-greptimedb-cluster/with-minio.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123
45 changes: 45 additions & 0 deletions .github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
meta:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3
[datanode]
[datanode.client]
timeout = "60s"
datanode:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[wal]
provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
config: |-
[runtime]
read_rt_size = 8
write_rt_size = 8
bg_rt_size = 8
[meta_client]
ddl_timeout = "60s"
objectStorage:
s3:
bucket: default
region: us-west-2
root: test-root
endpoint: http://minio.minio.svc.cluster.local
credentials:
accessKeyId: rootuser
secretAccessKey: rootpass123
24 changes: 24 additions & 0 deletions .github/actions/setup-kafka-cluster/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
name: Setup Kafka cluster
description: Deploy Kafka cluster on Kubernetes
inputs:
controller-replicas:
default: 3
description: "Kafka controller replicas"
namespace:
default: "kafka-cluster"

runs:
using: composite
steps:
- name: Install Kafka cluster
shell: bash
run: |
helm upgrade \
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
--set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \
--set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \
--create-namespace \
-n ${{ inputs.namespace }}
24 changes: 24 additions & 0 deletions .github/actions/setup-minio/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
name: Setup Minio cluster
description: Deploy Minio cluster on Kubernetes
inputs:
replicas:
default: 1
description: "replicas"

runs:
using: composite
steps:
- name: Install Etcd cluster
shell: bash
run: |
helm repo add minio https://charts.min.io/
helm upgrade --install minio \
--set resources.requests.memory=128Mi \
--set replicas=${{ inputs.replicas }} \
--set mode=standalone \
--set rootUser=rootuser,rootPassword=rootpass123 \
--set buckets[0].name=default \
--set service.port=80,service.targetPort=9000 \
minio/minio \
--create-namespace \
-n minio
43 changes: 38 additions & 5 deletions .github/workflows/develop.yml
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
cache-targets: "false"
- name: Set Rust Fuzz
shell: bash
run: |
Expand Down Expand Up @@ -203,7 +202,6 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
cache-targets: "false"
- name: Set Rust Fuzz
shell: bash
run: |
Expand Down Expand Up @@ -277,16 +275,35 @@ jobs:
version: current

distributed-fuzztest:
name: Fuzz Test (Distributed, Disk)
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest
needs: build-greptime-ci
strategy:
matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode:
- name: "Disk"
minio: false
kafka: false
values: "with-disk.yaml"
- name: "Minio"
minio: true
kafka: false
values: "with-minio.yaml"
- name: "Remote WAL"
minio: true
kafka: true
values: "with-remote-wal.yaml"
steps:
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
- if: matrix.mode.minio
name: Setup Minio
uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka
name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster
# Prepares for fuzz tests
Expand All @@ -301,7 +318,6 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
cache-targets: "false"
- name: Set Rust Fuzz
shell: bash
run: |
Expand All @@ -327,6 +343,22 @@ jobs:
pod -l app.kubernetes.io/instance=etcd \
--timeout=120s \
-n etcd-cluster
- if: matrix.mode.minio
name: Wait for minio
run: |
kubectl wait \
--for=condition=Ready \
pod -l app=minio \
--timeout=120s \
-n minio
- if: matrix.mode.kafka
name: Wait for kafka
run: |
kubectl wait \
--for=condition=Ready \
pod -l app.kubernetes.io/instance=kafka \
--timeout=120s \
-n kafka-cluster
- name: Print etcd info
shell: bash
run: kubectl get all --show-labels -n etcd-cluster
Expand All @@ -335,6 +367,7 @@ jobs:
uses: ./.github/actions/setup-greptimedb-cluster
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
Expand All @@ -360,7 +393,7 @@ jobs:
if: failure()
uses: actions/upload-artifact@v4
with:
name: fuzz-tests-kind-logs-${{ matrix.target }}
name: fuzz-tests-kind-logs-${{ matrix.mode.name }}-${{ matrix.target }}
path: /tmp/kind
retention-days: 3
- name: Delete cluster
Expand Down
50 changes: 8 additions & 42 deletions src/meta-client/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,71 +21,37 @@ pub mod error;

// Options for meta client in datanode instance.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(default)]
pub struct MetaClientOptions {
pub metasrv_addrs: Vec<String>,
#[serde(default = "default_timeout")]
#[serde(with = "humantime_serde")]
pub timeout: Duration,
#[serde(default = "default_heartbeat_timeout")]
#[serde(with = "humantime_serde")]
pub heartbeat_timeout: Duration,
#[serde(default = "default_ddl_timeout")]
#[serde(with = "humantime_serde")]
pub ddl_timeout: Duration,
#[serde(default = "default_connect_timeout")]
#[serde(with = "humantime_serde")]
pub connect_timeout: Duration,
pub tcp_nodelay: bool,
#[serde(default = "default_metadata_cache_max_capacity")]
pub metadata_cache_max_capacity: u64,
#[serde(default = "default_metadata_cache_ttl")]
#[serde(with = "humantime_serde")]
pub metadata_cache_ttl: Duration,
#[serde(default = "default_metadata_cache_tti")]
#[serde(with = "humantime_serde")]
pub metadata_cache_tti: Duration,
}

fn default_heartbeat_timeout() -> Duration {
Duration::from_millis(500u64)
}

fn default_ddl_timeout() -> Duration {
Duration::from_millis(10_000u64)
}

fn default_connect_timeout() -> Duration {
Duration::from_millis(1_000u64)
}

fn default_timeout() -> Duration {
Duration::from_millis(3_000u64)
}

fn default_metadata_cache_max_capacity() -> u64 {
100_000u64
}

fn default_metadata_cache_ttl() -> Duration {
Duration::from_secs(600u64)
}

fn default_metadata_cache_tti() -> Duration {
Duration::from_secs(300u64)
}

impl Default for MetaClientOptions {
fn default() -> Self {
Self {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: default_timeout(),
heartbeat_timeout: default_heartbeat_timeout(),
ddl_timeout: default_ddl_timeout(),
connect_timeout: default_connect_timeout(),
timeout: Duration::from_millis(3_000u64),
heartbeat_timeout: Duration::from_millis(500u64),
ddl_timeout: Duration::from_millis(10_000u64),
connect_timeout: Duration::from_millis(1_000u64),
tcp_nodelay: true,
metadata_cache_max_capacity: default_metadata_cache_max_capacity(),
metadata_cache_ttl: default_metadata_cache_ttl(),
metadata_cache_tti: default_metadata_cache_tti(),
metadata_cache_max_capacity: 100_000u64,
metadata_cache_ttl: Duration::from_secs(600u64),
metadata_cache_tti: Duration::from_secs(300u64),
}
}
}
Expand Down

0 comments on commit 09e0e1b

Please sign in to comment.