Skip to content

ci: only run BQ on pg16 & SF on pg17 #8689

ci: only run BQ on pg16 & SF on pg17

ci: only run BQ on pg16 & SF on pg17 #8689

Workflow file for this run

name: Flow build and test
on:
pull_request:
branches: [main]
push:
branches: [main]
jobs:
flow_test:
strategy:
matrix:
runner: [ubicloud-standard-16-ubuntu-2204-arm]
postgres-version: [15, 16, 17]
runs-on: ${{ matrix.runner }}
timeout-minutes: 30
services:
catalog:
image: imresamu/postgis:${{ matrix.postgres-version }}-3.5-alpine
ports:
- 5432:5432
env:
PGUSER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: postgres
POSTGRES_INITDB_ARGS: --locale=C.UTF-8
redpanda:
image: redpandadata/redpanda@sha256:f2f8bb89f1a0747cc6f86440cb3a0916e981e136e1d72392bab179f73492fb0f
ports:
- 9092:9092
- 9644:9644
elasticsearch:
image: elasticsearch:8.16.1@sha256:e5ee5f8dacbf18fa3ab59a098cc7d4d69f73e61637eb45f1c029e74b1cb200a1
ports:
- 9200:9200
env:
discovery.type: single-node
xpack.security.enabled: false
xpack.security.enrollment.enabled: false
minio:
image: bitnami/minio:2024.12.13@sha256:2a258ab6876f6ed3cd5609836d065f20927955a2ae721fd9edde8ca388b52135
ports:
- 9999:9999
env:
MINIO_ROOT_USER: minio
MINIO_ROOT_PASSWORD: miniosecret
MINIO_API_PORT_NUMBER: 9999
AWS_EC2_METADATA_DISABLED: true
MINIO_DEFAULT_BUCKETS: peerdb
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: generate or hydrate protos
uses: ./.github/actions/genprotos
- uses: ubicloud/setup-go@6ea6c4fb36acc657571a53f4a7471e75f5fa80ad # v5
with:
go-version: '1.23.0'
cache-dependency-path: flow/go.sum
- name: install lib-geos
run: |
sudo apt-get update
sudo apt-get install libgeos-dev
- run: go mod download
working-directory: ./flow
- name: setup gcp service account
id: gcp-service-account
uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3
with:
name: "bq_service_account.json"
json: ${{ secrets.GCP_GH_CI_PKEY }}
- name: setup snowflake credentials
id: sf-credentials
uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3
with:
name: "snowflake_creds.json"
json: ${{ secrets.SNOWFLAKE_GH_CI_PKEY }}
- name: setup S3 credentials
id: s3-credentials
uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3
with:
name: "s3_creds.json"
json: ${{ secrets.S3_CREDS }}
- name: setup GCS credentials
id: gcs-credentials
uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3
with:
name: "gcs_creds.json"
json: ${{ secrets.GCS_CREDS }}
- name: setup Eventhubs credentials
id: eventhubs-credentials
uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3
with:
name: "eh_creds.json"
json: ${{ secrets.EH_CREDS }}
- name: create hstore extension, increase logical replication limits, and setup catalog database
run: >
docker exec "${{ job.services.catalog.id }}" psql -U postgres -c "CREATE EXTENSION hstore;"
-c "ALTER SYSTEM SET wal_level=logical;"
-c "ALTER SYSTEM SET max_replication_slots=192;"
-c "ALTER SYSTEM SET max_wal_senders=256;"
-c "ALTER SYSTEM SET max_connections=2048;" &&
(cat ./nexus/catalog/migrations/V{?,??}__* | docker exec -i "${{ job.services.catalog.id }}" psql -U postgres) &&
docker restart "${{ job.services.catalog.id }}"
env:
PGPASSWORD: postgres
- uses: ubicloud/cache@92361f338d82d2c58a98875f1b5c95cd14cd6b2a # v4
id: cache-clickhouse
with:
path: ./clickhouse
key: ${{ runner.os }}-clickhouse
- name: Install ClickHouse
if: steps.cache-clickhouse.outputs.cache-hit != 'true'
run: |
curl https://clickhouse.com | sh
- name: Run ClickHouse
run: |
./clickhouse server &
- name: Install Temporal CLI
uses: temporalio/setup-temporal@1059a504f87e7fa2f385e3fa40d1aa7e62f1c6ca # v0
- name: run tests
run: |
temporal server start-dev --namespace default --headless &
go build -ldflags="-s -w" -o peer-flow
temporal operator search-attribute create --name MirrorName --type Text --namespace default
./peer-flow worker &
./peer-flow snapshot-worker &
go test -p 32 ./... -timeout 900s
working-directory: ./flow
env:
AWS_ENDPOINT_URL_S3: http://localhost:9999
AWS_ACCESS_KEY_ID: minio
AWS_SECRET_ACCESS_KEY: miniosecret
AWS_REGION: us-east-1
PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: minio
PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY: miniosecret
PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_REGION: us-east-1
PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3: http://localhost:9999
PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME: peerdb
PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: minio
PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY: miniosecret
PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_REGION: us-east-1
PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3: http://localhost:9999
PEERDB_SNOWFLAKE_AWS_S3_BUCKET_NAME: peerdb
TEST_BQ_CREDS: ${{ github.workspace }}/bq_service_account.json
TEST_SF_CREDS: ${{ github.workspace }}/snowflake_creds.json
TEST_S3_CREDS: ${{ github.workspace }}/s3_creds.json
TEST_GCS_CREDS: ${{ github.workspace }}/gcs_creds.json
TEST_EH_CREDS: ${{ github.workspace }}/eh_creds.json
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
ENABLE_SQLSERVER_TESTS: "true"
SQLSERVER_HOST: ${{ secrets.SQLSERVER_HOST }}
SQLSERVER_PORT: ${{ secrets.SQLSERVER_PORT }}
SQLSERVER_USER: ${{ secrets.SQLSERVER_USER }}
SQLSERVER_PASSWORD: ${{ secrets.SQLSERVER_PASSWORD }}
SQLSERVER_DB: ${{ secrets.SQLSERVER_DB }}
PEERDB_CATALOG_HOST: localhost
PEERDB_CATALOG_PORT: 5432
PEERDB_CATALOG_USER: postgres
PEERDB_CATALOG_PASSWORD: postgres
PEERDB_CATALOG_DATABASE: postgres
PEERDB_QUEUE_FORCE_TOPIC_CREATION: "true"
ELASTICSEARCH_TEST_ADDRESS: http://localhost:9200
CI_PG_VERSION: ${{ matrix.postgres-version }}