diff --git a/.github/actions/genprotos/action.yml b/.github/actions/genprotos/action.yml index 84bc29d00..84dfd540f 100644 --- a/.github/actions/genprotos/action.yml +++ b/.github/actions/genprotos/action.yml @@ -3,10 +3,10 @@ description: 'Install buf with local plugins, generate protos and cache' runs: using: "composite" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: check cache id: cache - uses: ubicloud/cache@v4 + uses: ubicloud/cache@0a97811d53629b143a56b3c2b1f729fd11719ef7 # v4 with: path: | ./flow/generated/protos @@ -15,7 +15,7 @@ runs: key: ${{ runner.os }}-build-genprotos-${{ hashFiles('buf.gen.yaml', './protos/peers.proto', './protos/flow.proto', './protos/route.proto') }} - if: steps.cache.outputs.cache-hit != 'true' - uses: bufbuild/buf-action@v1 + uses: bufbuild/buf-action@3fb70352251376e958c4c2c92c3818de82a71c2b # v1 with: setup_only: true github_token: ${{ github.token }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d98e8736..5c86636ae 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,18 +5,19 @@ on: branches: [main, release/*] pull_request: branches: [main, release/*] - paths: [nexus/**, protos/**] + paths: [nexus/**, protos/**, .github/workflows/ci.yml] jobs: build: strategy: matrix: runner: [ubicloud-standard-2-ubuntu-2204-arm] + postgres-version: [13, 14, 15, 16, 17] runs-on: ${{ matrix.runner }} timeout-minutes: 30 services: catalog_peer: - image: debezium/postgres:14-alpine + image: postgres:${{ matrix.postgres-version }}-alpine ports: - 7132:5432 env: @@ -29,7 +30,7 @@ jobs: --health-timeout 5s --health-retries 5 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos @@ -41,7 +42,7 @@ jobs: - name: setup gcp service account id: gcp-service-account - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "bq_service_account.json" json: ${{ secrets.GCP_GH_CI_PKEY }} @@ -49,13 +50,13 @@ jobs: - name: setup snowflake credentials id: sf-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "snowflake_creds.json" json: ${{ secrets.SNOWFLAKE_GH_CI_PKEY }} dir: "nexus/server/tests/assets/" - - uses: ubicloud/rust-cache@v2 + - uses: ubicloud/rust-cache@69587b2b3f26e8938580c44a643d265ed12f3119 # v2 with: workspaces: nexus diff --git a/.github/workflows/cleanup.yml b/.github/workflows/cleanup.yml index adf7e2403..9471872f6 100644 --- a/.github/workflows/cleanup.yml +++ b/.github/workflows/cleanup.yml @@ -10,9 +10,9 @@ jobs: timeout-minutes: 60 steps: - name: checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - - uses: ubicloud/setup-go@v5 + - uses: ubicloud/setup-go@35680fe0723d4a9309d4b1ac1c67e0d46eac5f24 # v5 with: go-version: '1.23.0' cache-dependency-path: e2e_cleanup/go.sum @@ -24,28 +24,28 @@ jobs: - name: setup gcp service account id: gcp-service-account - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "bq_service_account.json" json: ${{ secrets.GCP_GH_CI_PKEY }} - name: setup snowflake credentials id: sf-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "snowflake_creds.json" json: ${{ secrets.SNOWFLAKE_GH_CI_PKEY }} - name: setup S3 credentials id: s3-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "s3_creds.json" json: ${{ secrets.S3_CREDS }} - name: setup GCS credentials id: gcs-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "gcs_creds.json" json: ${{ secrets.GCS_CREDS }} @@ -54,20 +54,5 @@ jobs: run: go run main.go working-directory: ./e2e_cleanup env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_REGION: ${{ secrets.AWS_REGION }} TEST_BQ_CREDS: ${{ github.workspace }}/bq_service_account.json TEST_SF_CREDS: ${{ github.workspace }}/snowflake_creds.json - TEST_S3_CREDS: ${{ github.workspace }}/s3_creds.json - TEST_GCS_CREDS: ${{ github.workspace }}/gcs_creds.json - AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} - AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} - AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} - AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - ENABLE_SQLSERVER_TESTS: true - SQLSERVER_HOST: ${{ secrets.SQLSERVER_HOST }} - SQLSERVER_PORT: ${{ secrets.SQLSERVER_PORT }} - SQLSERVER_USER: ${{ secrets.SQLSERVER_USER }} - SQLSERVER_PASSWORD: ${{ secrets.SQLSERVER_PASSWORD }} - SQLSERVER_DB: ${{ secrets.SQLSERVER_DB }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5de1d92c4..303066f11 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -34,7 +34,7 @@ jobs: build-mode: none steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos @@ -47,12 +47,12 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/customer-docker.yml b/.github/workflows/customer-docker.yml index 8278ec3d2..67145512a 100644 --- a/.github/workflows/customer-docker.yml +++ b/.github/workflows/customer-docker.yml @@ -18,15 +18,15 @@ jobs: contents: read packages: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos - - uses: depot/setup-action@v1 + - uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3 with: registry: ghcr.io username: ${{github.actor}} @@ -42,7 +42,7 @@ jobs: echo "branch=$(echo $GITHUB_REF | sed -e 's/.*customer-//')" >> $GITHUB_OUTPUT - name: Build (optionally publish) PeerDB Images - uses: depot/bake-action@v1 + uses: depot/bake-action@143e50b965398f1f5dc8463be7dde6f62b9e9c21 # v1 with: token: ${{ secrets.DEPOT_TOKEN }} files: ./docker-bake.hcl diff --git a/.github/workflows/dev-docker.yml b/.github/workflows/dev-docker.yml index 6011ec4ab..275ad28b7 100644 --- a/.github/workflows/dev-docker.yml +++ b/.github/workflows/dev-docker.yml @@ -17,15 +17,15 @@ jobs: contents: read packages: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos - - uses: depot/setup-action@v1 + - uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3 with: registry: ghcr.io username: ${{github.actor}} @@ -36,7 +36,7 @@ jobs: run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - name: Build (optionally publish) PeerDB Images - uses: depot/bake-action@v1 + uses: depot/bake-action@143e50b965398f1f5dc8463be7dde6f62b9e9c21 # v1 with: token: ${{ secrets.DEPOT_TOKEN }} files: ./docker-bake.hcl diff --git a/.github/workflows/flow-api-client.yml b/.github/workflows/flow-api-client.yml index 046b377db..5e373b2d6 100644 --- a/.github/workflows/flow-api-client.yml +++ b/.github/workflows/flow-api-client.yml @@ -9,7 +9,7 @@ jobs: permissions: contents: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index ab9366d48..e794512a0 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -11,11 +11,12 @@ jobs: strategy: matrix: runner: [ubicloud-standard-16-ubuntu-2204-arm] + postgres-version: [15, 16, 17] runs-on: ${{ matrix.runner }} timeout-minutes: 30 services: catalog: - image: imresamu/postgis:15-3.4-alpine + image: imresamu/postgis:${{ matrix.postgres-version }}-3.5-alpine ports: - 5432:5432 env: @@ -24,21 +25,31 @@ jobs: POSTGRES_DB: postgres POSTGRES_INITDB_ARGS: --locale=C.UTF-8 elasticsearch: - image: elasticsearch:8.13.0 + image: elasticsearch:8.16.0@sha256:a411f7c17549209c5839b69f929de00bd91f1e2dcf08b65d5f41b122eae17f5e ports: - 9200:9200 env: discovery.type: single-node xpack.security.enabled: false xpack.security.enrollment.enabled: false + minio: + image: bitnami/minio:2024.11.7@sha256:9f2d9c45006a2ada1bc485e1393291ce7d54ae1a46260dd491381a4eb8b2fd47 + ports: + - 9999:9999 + env: + MINIO_ROOT_USER: minio + MINIO_ROOT_PASSWORD: miniosecret + MINIO_API_PORT_NUMBER: 9999 + AWS_EC2_METADATA_DISABLED: true + MINIO_DEFAULT_BUCKETS: peerdb steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos - - uses: ubicloud/setup-go@v5 + - uses: ubicloud/setup-go@35680fe0723d4a9309d4b1ac1c67e0d46eac5f24 # v5 with: go-version: '1.23.0' cache-dependency-path: flow/go.sum @@ -53,35 +64,35 @@ jobs: - name: setup gcp service account id: gcp-service-account - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "bq_service_account.json" json: ${{ secrets.GCP_GH_CI_PKEY }} - name: setup snowflake credentials id: sf-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "snowflake_creds.json" json: ${{ secrets.SNOWFLAKE_GH_CI_PKEY }} - name: setup S3 credentials id: s3-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "s3_creds.json" json: ${{ secrets.S3_CREDS }} - name: setup GCS credentials id: gcs-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "gcs_creds.json" json: ${{ secrets.GCS_CREDS }} - name: setup Eventhubs credentials id: eventhubs-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "eh_creds.json" json: ${{ secrets.EH_CREDS }} @@ -99,15 +110,27 @@ jobs: PGPASSWORD: postgres - name: start redpanda - uses: redpanda-data/github-action@v0.1.4 + uses: redpanda-data/github-action@c68af8edc420b987e871615ca40b3a5dd70eb5b1 # v0.1.4 with: version: "latest" - - name: start clickhouse - uses: getsentry/action-clickhouse-in-ci@v1 + - uses: ubicloud/cache@0a97811d53629b143a56b3c2b1f729fd11719ef7 # v4 + id: cache-clickhouse + with: + path: ./clickhouse + key: ${{ runner.os }}-clickhouse + + - name: Install ClickHouse + if: steps.cache-clickhouse.outputs.cache-hit != 'true' + run: | + curl https://clickhouse.com | sh + + - name: Run ClickHouse + run: | + ./clickhouse server & - name: Install Temporal CLI - uses: temporalio/setup-temporal@v0 + uses: temporalio/setup-temporal@1059a504f87e7fa2f385e3fa40d1aa7e62f1c6ca # v0 - name: run tests run: | @@ -119,9 +142,20 @@ jobs: go test -p 32 ./... -timeout 900s working-directory: ./flow env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_REGION: ${{ secrets.AWS_REGION }} + AWS_ENDPOINT_URL_S3: http://localhost:9999 + AWS_ACCESS_KEY_ID: minio + AWS_SECRET_ACCESS_KEY: miniosecret + AWS_REGION: us-east-1 + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: minio + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY: miniosecret + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_REGION: us-east-1 + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3: http://localhost:9999 + PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME: peerdb + PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: minio + PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY: miniosecret + PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_REGION: us-east-1 + PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3: http://localhost:9999 + PEERDB_SNOWFLAKE_AWS_S3_BUCKET_NAME: peerdb TEST_BQ_CREDS: ${{ github.workspace }}/bq_service_account.json TEST_SF_CREDS: ${{ github.workspace }}/snowflake_creds.json TEST_S3_CREDS: ${{ github.workspace }}/s3_creds.json diff --git a/.github/workflows/golang-lint.yml b/.github/workflows/golang-lint.yml index aadcfa7a5..2289eeae1 100644 --- a/.github/workflows/golang-lint.yml +++ b/.github/workflows/golang-lint.yml @@ -13,7 +13,7 @@ jobs: name: lint runs-on: [ubicloud-standard-4-ubuntu-2204-arm] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos @@ -22,12 +22,12 @@ jobs: run: | sudo apt-get update sudo apt-get install libgeos-dev - - uses: ubicloud/setup-go@v5 + - uses: ubicloud/setup-go@35680fe0723d4a9309d4b1ac1c67e0d46eac5f24 # v5 with: go-version: '1.23.0' cache: false - name: golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6 with: version: v1.61 working-directory: ./flow diff --git a/.github/workflows/rust-lint.yml b/.github/workflows/rust-lint.yml index b9e43c1a2..c4e2782f1 100644 --- a/.github/workflows/rust-lint.yml +++ b/.github/workflows/rust-lint.yml @@ -16,7 +16,7 @@ jobs: runner: [ubicloud-standard-4-ubuntu-2204-arm] runs-on: ${{ matrix.runner }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos diff --git a/.github/workflows/stable-docker.yml b/.github/workflows/stable-docker.yml index 9eabbcfb2..0056a7d9c 100644 --- a/.github/workflows/stable-docker.yml +++ b/.github/workflows/stable-docker.yml @@ -15,22 +15,22 @@ jobs: contents: read packages: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos - - uses: depot/setup-action@v1 + - uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3 with: registry: ghcr.io username: ${{github.actor}} password: ${{secrets.GITHUB_TOKEN}} - name: Build (optionally publish) PeerDB Images - uses: depot/bake-action@v1 + uses: depot/bake-action@143e50b965398f1f5dc8463be7dde6f62b9e9c21 # v1 with: token: ${{ secrets.DEPOT_TOKEN }} files: ./docker-bake.hcl diff --git a/.github/workflows/ui-build.yml b/.github/workflows/ui-build.yml index feea1ffda..7915445fe 100644 --- a/.github/workflows/ui-build.yml +++ b/.github/workflows/ui-build.yml @@ -16,7 +16,7 @@ jobs: runs-on: ${{ matrix.runner }} steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos diff --git a/.github/workflows/ui-lint.yml b/.github/workflows/ui-lint.yml index 31e2340ff..6fb1f2b82 100644 --- a/.github/workflows/ui-lint.yml +++ b/.github/workflows/ui-lint.yml @@ -20,7 +20,7 @@ jobs: runs-on: ${{ matrix.runner }} steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos @@ -30,7 +30,7 @@ jobs: run: npm ci - name: lint - uses: wearerequired/lint-action@v2 + uses: wearerequired/lint-action@548d8a7c4b04d3553d32ed5b6e91eb171e10e7bb # v2 with: eslint: true prettier: true diff --git a/docker-bake.hcl b/docker-bake.hcl index 6e6098ca1..4927cd550 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -16,6 +16,7 @@ group "default" { "flow-worker", "flow-api", "flow-snapshot-worker", + "flow-maintenance", "peerdb-ui" ] } @@ -45,6 +46,9 @@ target "flow-snapshot-worker" { "linux/amd64", "linux/arm64", ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } tags = [ "${REGISTRY}/flow-snapshot-worker:${TAG}", "${REGISTRY}/flow-snapshot-worker:${SHA_SHORT}", @@ -59,12 +63,32 @@ target "flow-worker" { "linux/amd64", "linux/arm64", ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } tags = [ "${REGISTRY}/flow-worker:${TAG}", "${REGISTRY}/flow-worker:${SHA_SHORT}", ] } +target "flow-maintenance" { + context = "." + dockerfile = "stacks/flow.Dockerfile" + target = "flow-maintenance" + platforms = [ + "linux/amd64", + "linux/arm64", + ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } + tags = [ + "${REGISTRY}/flow-maintenance:${TAG}", + "${REGISTRY}/flow-maintenance:${SHA_SHORT}", + ] +} + target "peerdb" { context = "." dockerfile = "stacks/peerdb-server.Dockerfile" @@ -72,6 +96,9 @@ target "peerdb" { "linux/amd64", "linux/arm64", ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } tags = [ "${REGISTRY}/peerdb-server:${TAG}", "${REGISTRY}/peerdb-server:${SHA_SHORT}", @@ -85,6 +112,9 @@ target "peerdb-ui" { "linux/amd64", "linux/arm64", ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } tags = [ "${REGISTRY}/peerdb-ui:${TAG}", "${REGISTRY}/peerdb-ui:${SHA_SHORT}", diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 2d492ca4c..98ee987b3 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -39,7 +39,7 @@ x-flow-worker-env: &flow-worker-env services: catalog: container_name: catalog - image: postgres:16-alpine + image: postgres:17-alpine@sha256:0d9624535618a135c5453258fd629f4963390338b11aaffb92292c12df3a6c17 command: -c config_file=/etc/postgresql.conf ports: - 9901:5432 @@ -73,7 +73,7 @@ services: - POSTGRES_PWD=postgres - POSTGRES_SEEDS=catalog - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml - image: temporalio/auto-setup:1.25 + image: temporalio/auto-setup:1.25@sha256:b1edc1e20002d958c8182f2ae08dee877a125083683a627a44917683419ba6a8 ports: - 7233:7233 volumes: @@ -83,7 +83,7 @@ services: pyroscope: container_name: pyroscope - image: grafana/pyroscope:latest + image: grafana/pyroscope:latest@sha256:319bf32ae06b67c1b9795c06ae6c3ba67e9b43382896df7a9df54cdb47a5c535 ports: - 4040:4040 @@ -95,7 +95,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CLI_ADDRESS=temporal:7233 - TEMPORAL_CLI_SHOW_STACKS=1 - image: temporalio/admin-tools:1.25 + image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1@sha256:da0c7a7982b571857173ab8f058e7f139b3054800abb4dcb100445d29a563ee8 stdin_open: true tty: true entrypoint: /etc/temporal/entrypoint.sh @@ -116,7 +116,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true - image: temporalio/ui:2.29.1 + image: temporalio/ui:2.32.0@sha256:82bf98dbe005a831b6bc5dc12ccd7bffd606af2032dae4821ae133caaa943d3d ports: - 8085:8080 @@ -209,7 +209,7 @@ services: - flow-api minio: - image: minio/minio:RELEASE.2024-07-16T23-46-41Z + image: minio/minio:RELEASE.2024-11-07T00-52-20Z@sha256:ac591851803a79aee64bc37f66d77c56b0a4b6e12d9e5356380f4105510f2332 volumes: - minio-data:/data ports: diff --git a/docker-compose.yml b/docker-compose.yml index 860281b99..5448632ac 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,7 +32,7 @@ x-flow-worker-env: &flow-worker-env services: catalog: container_name: catalog - image: postgres:16-alpine + image: postgres:17-alpine@sha256:0d9624535618a135c5453258fd629f4963390338b11aaffb92292c12df3a6c17 command: -c config_file=/etc/postgresql.conf restart: unless-stopped ports: @@ -68,7 +68,7 @@ services: - POSTGRES_PWD=postgres - POSTGRES_SEEDS=catalog - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml - image: temporalio/auto-setup:1.25 + image: temporalio/auto-setup:1.25@sha256:b1edc1e20002d958c8182f2ae08dee877a125083683a627a44917683419ba6a8 ports: - 7233:7233 volumes: @@ -85,7 +85,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CLI_ADDRESS=temporal:7233 - TEMPORAL_CLI_SHOW_STACKS=1 - image: temporalio/admin-tools:1.25 + image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1@sha256:da0c7a7982b571857173ab8f058e7f139b3054800abb4dcb100445d29a563ee8 stdin_open: true tty: true entrypoint: /etc/temporal/entrypoint.sh @@ -106,13 +106,13 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true - image: temporalio/ui:2.29.1 + image: temporalio/ui:2.32.0@sha256:82bf98dbe005a831b6bc5dc12ccd7bffd606af2032dae4821ae133caaa943d3d ports: - 8085:8080 flow-api: container_name: flow_api - image: ghcr.io/peerdb-io/flow-api:latest-dev + image: ghcr.io/peerdb-io/flow-api:stable-v0.19.1 restart: unless-stopped ports: - 8112:8112 @@ -128,7 +128,7 @@ services: flow-snapshot-worker: container_name: flow-snapshot-worker - image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-dev + image: ghcr.io/peerdb-io/flow-snapshot-worker:stable-v0.19.1 restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -138,7 +138,7 @@ services: flow-worker: container_name: flow-worker - image: ghcr.io/peerdb-io/flow-worker:latest-dev + image: ghcr.io/peerdb-io/flow-worker:stable-v0.19.1 restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -151,7 +151,7 @@ services: peerdb: container_name: peerdb-server stop_signal: SIGINT - image: ghcr.io/peerdb-io/peerdb-server:latest-dev + image: ghcr.io/peerdb-io/peerdb-server:stable-v0.19.1 restart: unless-stopped environment: <<: *catalog-config @@ -167,7 +167,7 @@ services: peerdb-ui: container_name: peerdb-ui - image: ghcr.io/peerdb-io/peerdb-ui:latest-dev + image: ghcr.io/peerdb-io/peerdb-ui:stable-v0.19.1 restart: unless-stopped ports: - 3000:3000 @@ -184,7 +184,7 @@ services: - flow-api minio: - image: minio/minio:RELEASE.2024-07-16T23-46-41Z + image: minio/minio:RELEASE.2024-11-07T00-52-20Z@sha256:ac591851803a79aee64bc37f66d77c56b0a4b6e12d9e5356380f4105510f2332 restart: unless-stopped volumes: - minio-data:/data diff --git a/e2e_cleanup/go.mod b/e2e_cleanup/go.mod index 4779d8c87..64dbd19f3 100644 --- a/e2e_cleanup/go.mod +++ b/e2e_cleanup/go.mod @@ -3,17 +3,17 @@ module github.com/PeerDB-io/peer-flow-cleanup go 1.23.0 require ( - cloud.google.com/go/bigquery v1.63.1 + cloud.google.com/go/bigquery v1.64.0 cloud.google.com/go/pubsub v1.45.1 - github.com/snowflakedb/gosnowflake v1.11.2 + github.com/snowflakedb/gosnowflake v1.12.0 github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 - google.golang.org/api v0.203.0 + google.golang.org/api v0.204.0 ) require ( cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.2 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect @@ -21,20 +21,21 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/apache/arrow/go/v15 v15.0.2 // indirect - github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.42 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 // indirect github.com/aws/smithy-go v1.22.0 // indirect github.com/danieljoos/wincred v1.2.2 // indirect github.com/dvsekhvalnov/jose2go v1.7.0 // indirect diff --git a/e2e_cleanup/go.sum b/e2e_cleanup/go.sum index 42b74df80..198f6b4ec 100644 --- a/e2e_cleanup/go.sum +++ b/e2e_cleanup/go.sum @@ -1,12 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/bigquery v1.63.1 h1:/6syiWrSpardKNxdvldS5CUTRJX1iIkSPXCjLjiGL+g= -cloud.google.com/go/bigquery v1.63.1/go.mod h1:ufaITfroCk17WTqBhMpi8CRjsfHjMX07pDrQaRKKX2o= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/bigquery v1.64.0 h1:vSSZisNyhr2ioJE1OuYBQrnrpB7pIhRQm4jfjc7E/js= +cloud.google.com/go/bigquery v1.64.0/go.mod h1:gy8Ooz6HF7QmA+TRtX8tZmXBKH5mCFBwUApGAb3zI7Y= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datacatalog v1.22.1 h1:i0DyKb/o7j+0vgaFtimcRFjYsD6wFw1jpnODYUyiYRs= @@ -38,46 +38,48 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1/go.mod h1:ap1dmS6vQK github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE= github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= -github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= -github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk= +github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA= -github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= -github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34 h1:os83HS/WfOwi1LsZWLCSHTyj+whvPGaxUsq/D1Ol2Q0= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34/go.mod h1:tG0BaDCAweumHRsOHm72tuPgAfRLASQThgthWYeTyV8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= +github.com/aws/aws-sdk-go-v2/config v1.28.1 h1:oxIvOUXy8x0U3fR//0eq+RdCKimWI900+SV+10xsCBw= +github.com/aws/aws-sdk-go-v2/config v1.28.1/go.mod h1:bRQcttQJiARbd5JZxw6wG0yIK3eLeSCPdg6uqmmlIiI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42 h1:sBP0RPjBU4neGpIYyx8mkU2QqLPl5u9cmdTWVzIpHkM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42/go.mod h1:FwZBfU530dJ26rv9saAbxa9Ej3eF/AK0OAY86k13n4M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 h1:68jFVtt3NulEzojFesM/WVarlFpCaXLKaBxDpzkQ9OQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18/go.mod h1:Fjnn5jQVIo6VyedMc0/EhPpfNlPl7dHV916O6B+49aE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 h1:ihPPdcCVSN0IvBByXwqVp28/l4VosBZ6sDulcvU2J7w= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35/go.mod h1:JkgEhs3SVF51Dj3m1Bj+yL8IznpxzkwlA3jLg3x7Kls= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 h1:yV+hCAHZZYJQcwAaszoBNwLbPItHvApxT0kVIw6jRgs= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22/go.mod h1:kbR1TL8llqB1eGnVbybcA4/wgScxdylOdyAd51yxPdw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 h1:4FMHqLfk0efmTqhXVRL5xYRqlEBNBiRI7N6w4jsEdd4= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2/go.mod h1:LWoqeWlK9OZeJxsROW2RqrSPvQHKTpp69r/iDjwsSaw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 h1:t7iUP9+4wdc5lt3E41huP+GvQZJD38WLsgVp4iOtAjg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2/go.mod h1:/niFCtmuQNxqx9v8WAPq5qh7EH25U4BF6tjoyq9bObM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1 h1:MkQ4unegQEStiQYmfFj+Aq5uTp265ncSmm0XTQwDwi0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1/go.mod h1:cB6oAuus7YXRZhWCc1wIwPywwZ1XwweNp2TVAEGYeB8= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 h1:kT6BcZsmMtNkP/iYMcRG+mIEA/IbeiUimXtGmqF39y0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3/go.mod h1:Z8uGua2k4PPaGOYn66pK02rhMrot3Xk3tpBuUFPomZU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlgWH3hpPUnd6U0ikcl6LLA9sLkXE2w1fpMvY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 h1:UTpsIf0loCIWEbrqdLb+0RxnTXfWh2vhw4nQmFi4nPc= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3/go.mod h1:FZ9j3PFHHAR+w0BSEjK955w5YD2UwB/l/H0yAK3MJvI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 h1:2YCmIXv3tmiItw0LlYf6v7gEHebLY45kBEnPezbUKyU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3/go.mod h1:u19stRyNPxGhj6dRm+Cdgu6N75qnbW7+QN0q0dsAk58= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 h1:wVnQ6tigGsRqSWDEEyH6lSAJ9OyFUsSnbaUWChuSGzs= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3/go.mod h1:VZa9yTFyj4o10YGsmDO4gbQJUvvhY72fhumT8W4LqsE= github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -174,8 +176,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= -github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= +github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= +github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -266,8 +268,8 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= -google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index cc09bae0d..8001b5344 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -287,11 +287,13 @@ func (a *FlowableActivity) MaintainPull( ctx = context.WithValue(ctx, shared.FlowNameKey, config.FlowJobName) srcConn, err := connectors.GetByNameAs[connectors.CDCPullConnector](ctx, config.Env, a.CatalogPool, config.SourceName) if err != nil { + a.Alerter.LogFlowError(ctx, config.FlowJobName, err) return err } defer connectors.CloseConnector(ctx, srcConn) if err := srcConn.SetupReplConn(ctx); err != nil { + a.Alerter.LogFlowError(ctx, config.FlowJobName, err) return err } @@ -407,7 +409,7 @@ func (a *FlowableActivity) StartNormalize( if errors.Is(err, errors.ErrUnsupported) { return nil, monitoring.UpdateEndTimeForCDCBatch(ctx, a.CatalogPool, input.FlowConnectionConfigs.FlowJobName, input.SyncBatchID) } else if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get normalize connector: %w", err) } defer connectors.CloseConnector(ctx, dstConn) @@ -418,7 +420,7 @@ func (a *FlowableActivity) StartNormalize( tableNameSchemaMapping, err := a.getTableNameSchemaMapping(ctx, input.FlowConnectionConfigs.FlowJobName) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get table name schema mapping: %w", err) } res, err := dstConn.NormalizeRecords(ctx, &model.NormalizeRecordsRequest{ @@ -436,13 +438,13 @@ func (a *FlowableActivity) StartNormalize( } dstType, err := connectors.LoadPeerType(ctx, a.CatalogPool, input.FlowConnectionConfigs.DestinationName) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get peer type: %w", err) } if dstType == protos.DBType_POSTGRES { err = monitoring.UpdateEndTimeForCDCBatch(ctx, a.CatalogPool, input.FlowConnectionConfigs.FlowJobName, input.SyncBatchID) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to update end time for cdc batch: %w", err) } } diff --git a/flow/activities/flowable_core.go b/flow/activities/flowable_core.go index db04efea3..d583044b0 100644 --- a/flow/activities/flowable_core.go +++ b/flow/activities/flowable_core.go @@ -139,7 +139,7 @@ func syncCore[TPull connectors.CDCPullConnectorCore, TSync connectors.CDCSyncCon batchSize := options.BatchSize if batchSize == 0 { - batchSize = 1_000_000 + batchSize = 250_000 } lastOffset, err := func() (int64, error) { diff --git a/flow/activities/maintenance_activity.go b/flow/activities/maintenance_activity.go new file mode 100644 index 000000000..be42cc8e5 --- /dev/null +++ b/flow/activities/maintenance_activity.go @@ -0,0 +1,284 @@ +package activities + +import ( + "context" + "fmt" + "log/slog" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "go.temporal.io/sdk/activity" + "go.temporal.io/sdk/client" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/PeerDB-io/peer-flow/alerting" + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/peerdbenv" + "github.com/PeerDB-io/peer-flow/shared" + "github.com/PeerDB-io/peer-flow/shared/telemetry" +) + +const ( + mirrorStateBackup = "backup" + mirrorStateRestored = "restore" +) + +type MaintenanceActivity struct { + CatalogPool *pgxpool.Pool + Alerter *alerting.Alerter + TemporalClient client.Client +} + +func (a *MaintenanceActivity) GetAllMirrors(ctx context.Context) (*protos.MaintenanceMirrors, error) { + rows, err := a.CatalogPool.Query(ctx, ` + select distinct on(name) + id, name, workflow_id, + created_at, coalesce(query_string, '')='' is_cdc + from flows + `) + if err != nil { + return &protos.MaintenanceMirrors{}, err + } + + maintenanceMirrorItems, err := pgx.CollectRows(rows, func(row pgx.CollectableRow) (*protos.MaintenanceMirror, error) { + var info protos.MaintenanceMirror + var createdAt time.Time + err := row.Scan(&info.MirrorId, &info.MirrorName, &info.WorkflowId, &createdAt, &info.IsCdc) + info.MirrorCreatedAt = timestamppb.New(createdAt) + return &info, err + }) + return &protos.MaintenanceMirrors{ + Mirrors: maintenanceMirrorItems, + }, err +} + +func (a *MaintenanceActivity) getMirrorStatus(ctx context.Context, mirror *protos.MaintenanceMirror) (protos.FlowStatus, error) { + return shared.GetWorkflowStatus(ctx, a.TemporalClient, mirror.WorkflowId) +} + +func (a *MaintenanceActivity) WaitForRunningSnapshots(ctx context.Context) (*protos.MaintenanceMirrors, error) { + mirrors, err := a.GetAllMirrors(ctx) + if err != nil { + return &protos.MaintenanceMirrors{}, err + } + + slog.Info("Found mirrors for snapshot check", "mirrors", mirrors, "len", len(mirrors.Mirrors)) + + for _, mirror := range mirrors.Mirrors { + lastStatus, err := a.checkAndWaitIfSnapshot(ctx, mirror, 2*time.Minute) + if err != nil { + return &protos.MaintenanceMirrors{}, err + } + slog.Info("Finished checking and waiting for snapshot", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "lastStatus", lastStatus.String()) + } + slog.Info("Finished checking and waiting for all mirrors to finish snapshot") + return mirrors, nil +} + +func (a *MaintenanceActivity) checkAndWaitIfSnapshot( + ctx context.Context, + mirror *protos.MaintenanceMirror, + logEvery time.Duration, +) (protos.FlowStatus, error) { + // In case a mirror was just kicked off, it shows up in the running state, we wait for a bit before checking for snapshot + if mirror.MirrorCreatedAt.AsTime().After(time.Now().Add(-30 * time.Second)) { + slog.Info("Mirror was created less than 30 seconds ago, waiting for it to be ready before checking for snapshot", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId) + time.Sleep(30 * time.Second) + } + + flowStatus, err := RunEveryIntervalUntilFinish(ctx, func() (bool, protos.FlowStatus, error) { + activity.RecordHeartbeat(ctx, fmt.Sprintf("Waiting for mirror %s to finish snapshot", mirror.MirrorName)) + mirrorStatus, err := a.getMirrorStatus(ctx, mirror) + if err != nil { + return false, mirrorStatus, err + } + if mirrorStatus == protos.FlowStatus_STATUS_SNAPSHOT || mirrorStatus == protos.FlowStatus_STATUS_SETUP { + return false, mirrorStatus, nil + } + return true, mirrorStatus, nil + }, 10*time.Second, fmt.Sprintf("Waiting for mirror %s to finish snapshot", mirror.MirrorName), logEvery) + return flowStatus, err +} + +func (a *MaintenanceActivity) EnableMaintenanceMode(ctx context.Context) error { + slog.Info("Enabling maintenance mode") + return peerdbenv.UpdatePeerDBMaintenanceModeEnabled(ctx, a.CatalogPool, true) +} + +func (a *MaintenanceActivity) BackupAllPreviouslyRunningFlows(ctx context.Context, mirrors *protos.MaintenanceMirrors) error { + tx, err := a.CatalogPool.Begin(ctx) + if err != nil { + return err + } + defer shared.RollbackTx(tx, slog.Default()) + + for _, mirror := range mirrors.Mirrors { + _, err := tx.Exec(ctx, ` + insert into maintenance.maintenance_flows + (flow_id, flow_name, workflow_id, flow_created_at, is_cdc, state, from_version) + values + ($1, $2, $3, $4, $5, $6, $7) + `, mirror.MirrorId, mirror.MirrorName, mirror.WorkflowId, mirror.MirrorCreatedAt.AsTime(), mirror.IsCdc, mirrorStateBackup, + peerdbenv.PeerDBVersionShaShort()) + if err != nil { + return err + } + } + return tx.Commit(ctx) +} + +func (a *MaintenanceActivity) PauseMirrorIfRunning(ctx context.Context, mirror *protos.MaintenanceMirror) (bool, error) { + mirrorStatus, err := a.getMirrorStatus(ctx, mirror) + if err != nil { + return false, err + } + + slog.Info("Checking if mirror is running", "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "status", mirrorStatus.String()) + + if mirrorStatus != protos.FlowStatus_STATUS_RUNNING { + return false, nil + } + + slog.Info("Pausing mirror for maintenance", "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId) + + if err := model.FlowSignal.SignalClientWorkflow(ctx, a.TemporalClient, mirror.WorkflowId, "", model.PauseSignal); err != nil { + slog.Error("Error signaling mirror running to pause for maintenance", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "error", err) + return false, err + } + + return RunEveryIntervalUntilFinish(ctx, func() (bool, bool, error) { + updatedMirrorStatus, statusErr := a.getMirrorStatus(ctx, mirror) + if statusErr != nil { + return false, false, statusErr + } + activity.RecordHeartbeat(ctx, "Waiting for mirror to pause with current status "+updatedMirrorStatus.String()) + if statusErr := model.FlowSignal.SignalClientWorkflow(ctx, a.TemporalClient, mirror.WorkflowId, "", + model.PauseSignal); statusErr != nil { + return false, false, statusErr + } + if updatedMirrorStatus == protos.FlowStatus_STATUS_PAUSED { + return true, true, nil + } + return false, false, nil + }, 10*time.Second, "Waiting for mirror to pause", 30*time.Second) +} + +func (a *MaintenanceActivity) CleanBackedUpFlows(ctx context.Context) error { + _, err := a.CatalogPool.Exec(ctx, ` + update maintenance.maintenance_flows + set state = $1, + restored_at = now(), + to_version = $2 + where state = $3 + `, mirrorStateRestored, peerdbenv.PeerDBVersionShaShort(), mirrorStateBackup) + return err +} + +func (a *MaintenanceActivity) GetBackedUpFlows(ctx context.Context) (*protos.MaintenanceMirrors, error) { + rows, err := a.CatalogPool.Query(ctx, ` + select flow_id, flow_name, workflow_id, flow_created_at, is_cdc + from maintenance.maintenance_flows + where state = $1 + `, mirrorStateBackup) + if err != nil { + return nil, err + } + + maintenanceMirrorItems, err := pgx.CollectRows(rows, func(row pgx.CollectableRow) (*protos.MaintenanceMirror, error) { + var info protos.MaintenanceMirror + var createdAt time.Time + err := row.Scan(&info.MirrorId, &info.MirrorName, &info.WorkflowId, &createdAt, &info.IsCdc) + info.MirrorCreatedAt = timestamppb.New(createdAt) + return &info, err + }) + if err != nil { + return nil, err + } + + return &protos.MaintenanceMirrors{ + Mirrors: maintenanceMirrorItems, + }, nil +} + +func (a *MaintenanceActivity) ResumeMirror(ctx context.Context, mirror *protos.MaintenanceMirror) error { + mirrorStatus, err := a.getMirrorStatus(ctx, mirror) + if err != nil { + return err + } + + if mirrorStatus != protos.FlowStatus_STATUS_PAUSED { + slog.Error("Cannot resume mirror that is not paused", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "status", mirrorStatus.String()) + return nil + } + + // There can also be "workflow already completed" errors, what should we do in that case? + if err := model.FlowSignal.SignalClientWorkflow(ctx, a.TemporalClient, mirror.WorkflowId, "", model.NoopSignal); err != nil { + slog.Error("Error signaling mirror to resume for maintenance", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "error", err) + return err + } + return nil +} + +func (a *MaintenanceActivity) DisableMaintenanceMode(ctx context.Context) error { + slog.Info("Disabling maintenance mode") + return peerdbenv.UpdatePeerDBMaintenanceModeEnabled(ctx, a.CatalogPool, false) +} + +func (a *MaintenanceActivity) BackgroundAlerter(ctx context.Context) error { + heartbeatTicker := time.NewTicker(30 * time.Second) + defer heartbeatTicker.Stop() + + alertTicker := time.NewTicker(time.Duration(peerdbenv.PeerDBMaintenanceModeWaitAlertSeconds()) * time.Second) + defer alertTicker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-heartbeatTicker.C: + activity.RecordHeartbeat(ctx, "Maintenance Workflow is still running") + case <-alertTicker.C: + slog.Warn("Maintenance Workflow is still running") + a.Alerter.LogNonFlowWarning(ctx, telemetry.MaintenanceWait, "Waiting", "Maintenance mode is still running") + } + } +} + +func RunEveryIntervalUntilFinish[T any]( + ctx context.Context, + runFunc func() (finished bool, result T, err error), + runInterval time.Duration, + logMessage string, + logInterval time.Duration, +) (T, error) { + runTicker := time.NewTicker(runInterval) + defer runTicker.Stop() + + logTicker := time.NewTicker(logInterval) + defer logTicker.Stop() + var lastResult T + for { + select { + case <-ctx.Done(): + return lastResult, ctx.Err() + case <-runTicker.C: + finished, result, err := runFunc() + lastResult = result + if err != nil { + return lastResult, err + } + if finished { + return lastResult, err + } + case <-logTicker.C: + slog.Info(logMessage, "lastResult", lastResult) + } + } +} diff --git a/flow/alerting/alerting.go b/flow/alerting/alerting.go index e9df410f9..4413b2efb 100644 --- a/flow/alerting/alerting.go +++ b/flow/alerting/alerting.go @@ -356,7 +356,7 @@ func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertConfigId i return true } - logger.Info(fmt.Sprintf("Skipped sending alerts: last alert was sent at %s, which was >=%s ago", createdTimestamp.String(), dur.String())) + logger.Info(fmt.Sprintf("Skipped sending alerts: last alert was sent at %s, which was <=%s ago", createdTimestamp.String(), dur.String())) return false } @@ -377,10 +377,10 @@ func (a *Alerter) sendTelemetryMessage( } if a.snsTelemetrySender != nil { - if status, err := a.snsTelemetrySender.SendMessage(ctx, details, details, attributes); err != nil { + if response, err := a.snsTelemetrySender.SendMessage(ctx, details, details, attributes); err != nil { logger.Warn("failed to send message to snsTelemetrySender", slog.Any("error", err)) } else { - logger.Info("received status from snsTelemetrySender", slog.String("status", status)) + logger.Info("received response from snsTelemetrySender", slog.String("response", response)) } } @@ -388,7 +388,7 @@ func (a *Alerter) sendTelemetryMessage( if status, err := a.incidentIoTelemetrySender.SendMessage(ctx, details, details, attributes); err != nil { logger.Warn("failed to send message to incidentIoTelemetrySender", slog.Any("error", err)) } else { - logger.Info("received status from incident.io", slog.String("status", status)) + logger.Info("received response from incident.io", slog.String("response", status)) } } } @@ -440,6 +440,10 @@ func (a *Alerter) LogFlowError(ctx context.Context, flowName string, err error) if errors.As(err, &pgErr) { tags = append(tags, "pgcode:"+pgErr.Code) } + var netErr *net.OpError + if errors.As(err, &netErr) { + tags = append(tags, "err:Net") + } a.sendTelemetryMessage(ctx, logger, flowName, errorWithStack, telemetry.ERROR, tags...) } diff --git a/flow/cmd/api.go b/flow/cmd/api.go index ca225e429..f81f9d923 100644 --- a/flow/cmd/api.go +++ b/flow/cmd/api.go @@ -191,24 +191,7 @@ func APIMain(ctx context.Context, args *APIServerParams) error { Logger: slog.New(shared.NewSlogHandler(slog.NewJSONHandler(os.Stdout, nil))), } - if peerdbenv.PeerDBTemporalEnableCertAuth() { - slog.Info("Using temporal certificate/key for authentication") - - certs, err := parseTemporalCertAndKey(ctx) - if err != nil { - return fmt.Errorf("unable to base64 decode certificate and key: %w", err) - } - - connOptions := client.ConnectionOptions{ - TLS: &tls.Config{ - Certificates: certs, - MinVersion: tls.VersionTLS13, - }, - } - clientOptions.ConnectionOptions = connOptions - } - - tc, err := client.Dial(clientOptions) + tc, err := setupTemporalClient(ctx, clientOptions) if err != nil { return fmt.Errorf("unable to create Temporal client: %w", err) } @@ -309,3 +292,25 @@ func APIMain(ctx context.Context, args *APIServerParams) error { return nil } + +func setupTemporalClient(ctx context.Context, clientOptions client.Options) (client.Client, error) { + if peerdbenv.PeerDBTemporalEnableCertAuth() { + slog.Info("Using temporal certificate/key for authentication") + + certs, err := parseTemporalCertAndKey(ctx) + if err != nil { + return nil, fmt.Errorf("unable to base64 decode certificate and key: %w", err) + } + + connOptions := client.ConnectionOptions{ + TLS: &tls.Config{ + Certificates: certs, + MinVersion: tls.VersionTLS13, + }, + } + clientOptions.ConnectionOptions = connOptions + } + + tc, err := client.Dial(clientOptions) + return tc, err +} diff --git a/flow/cmd/handler.go b/flow/cmd/handler.go index 8b30331ae..6caefaf47 100644 --- a/flow/cmd/handler.go +++ b/flow/cmd/handler.go @@ -19,6 +19,7 @@ import ( "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/peerdbenv" "github.com/PeerDB-io/peer-flow/shared" peerflow "github.com/PeerDB-io/peer-flow/workflows" ) @@ -327,6 +328,17 @@ func (h *FlowRequestHandler) FlowStateChange( ) (*protos.FlowStateChangeResponse, error) { logs := slog.String("flowJobName", req.FlowJobName) slog.Info("FlowStateChange called", logs, slog.Any("req", req)) + underMaintenance, err := peerdbenv.PeerDBMaintenanceModeEnabled(ctx, nil) + if err != nil { + slog.Error("unable to check maintenance mode", logs, slog.Any("error", err)) + return nil, fmt.Errorf("unable to load dynamic config: %w", err) + } + + if underMaintenance { + slog.Warn("Flow state change request denied due to maintenance", logs) + return nil, errors.New("PeerDB is under maintenance") + } + workflowID, err := h.getWorkflowID(ctx, req.FlowJobName) if err != nil { slog.Error("[flow-state-change] unable to get workflowID", logs, slog.Any("error", err)) @@ -408,7 +420,7 @@ func (h *FlowRequestHandler) handleCancelWorkflow(ctx context.Context, workflowI if err != nil { slog.Error(fmt.Sprintf("unable to cancel PeerFlow workflow: %s. Attempting to terminate.", err.Error())) terminationReason := fmt.Sprintf("workflow %s did not cancel in time.", workflowID) - if err = h.temporalClient.TerminateWorkflow(ctx, workflowID, runID, terminationReason); err != nil { + if err := h.temporalClient.TerminateWorkflow(ctx, workflowID, runID, terminationReason); err != nil { return fmt.Errorf("unable to terminate PeerFlow workflow: %w", err) } } @@ -456,10 +468,9 @@ func (h *FlowRequestHandler) DropPeer( } var inMirror pgtype.Int8 - queryErr := h.pool.QueryRow(ctx, - "SELECT COUNT(*) FROM flows WHERE source_peer=$1 or destination_peer=$2", - peerID, peerID).Scan(&inMirror) - if queryErr != nil { + if queryErr := h.pool.QueryRow(ctx, + "SELECT COUNT(*) FROM flows WHERE source_peer=$1 or destination_peer=$1", peerID, + ).Scan(&inMirror); queryErr != nil { return nil, fmt.Errorf("failed to check for existing mirrors with peer %s: %w", req.PeerName, queryErr) } @@ -467,8 +478,7 @@ func (h *FlowRequestHandler) DropPeer( return nil, fmt.Errorf("peer %s is currently involved in an ongoing mirror", req.PeerName) } - _, delErr := h.pool.Exec(ctx, "DELETE FROM peers WHERE name = $1", req.PeerName) - if delErr != nil { + if _, delErr := h.pool.Exec(ctx, "DELETE FROM peers WHERE name = $1", req.PeerName); delErr != nil { return nil, fmt.Errorf("failed to delete peer %s from metadata table: %w", req.PeerName, delErr) } @@ -477,9 +487,8 @@ func (h *FlowRequestHandler) DropPeer( func (h *FlowRequestHandler) getWorkflowID(ctx context.Context, flowJobName string) (string, error) { q := "SELECT workflow_id FROM flows WHERE name = $1" - row := h.pool.QueryRow(ctx, q, flowJobName) var workflowID string - if err := row.Scan(&workflowID); err != nil { + if err := h.pool.QueryRow(ctx, q, flowJobName).Scan(&workflowID); err != nil { return "", fmt.Errorf("unable to get workflowID for flow job %s: %w", flowJobName, err) } @@ -491,6 +500,14 @@ func (h *FlowRequestHandler) ResyncMirror( ctx context.Context, req *protos.ResyncMirrorRequest, ) (*protos.ResyncMirrorResponse, error) { + underMaintenance, err := peerdbenv.PeerDBMaintenanceModeEnabled(ctx, nil) + if err != nil { + return nil, fmt.Errorf("unable to get maintenance mode status: %w", err) + } + if underMaintenance { + return nil, errors.New("PeerDB is under maintenance") + } + isCDC, err := h.isCDCFlow(ctx, req.FlowJobName) if err != nil { return nil, err @@ -507,23 +524,66 @@ func (h *FlowRequestHandler) ResyncMirror( config.Resync = true config.DoInitialSnapshot = true // validate mirror first because once the mirror is dropped, there's no going back - _, err = h.ValidateCDCMirror(ctx, &protos.CreateCDCFlowRequest{ + if _, err := h.ValidateCDCMirror(ctx, &protos.CreateCDCFlowRequest{ ConnectionConfigs: config, - }) - if err != nil { + }); err != nil { return nil, err } - err = h.shutdownFlow(ctx, req.FlowJobName, req.DropStats) - if err != nil { + if err := h.shutdownFlow(ctx, req.FlowJobName, req.DropStats); err != nil { return nil, err } - _, err = h.CreateCDCFlow(ctx, &protos.CreateCDCFlowRequest{ + if _, err := h.CreateCDCFlow(ctx, &protos.CreateCDCFlowRequest{ ConnectionConfigs: config, - }) - if err != nil { + }); err != nil { return nil, err } return &protos.ResyncMirrorResponse{}, nil } + +func (h *FlowRequestHandler) GetInstanceInfo(ctx context.Context, in *protos.InstanceInfoRequest) (*protos.InstanceInfoResponse, error) { + enabled, err := peerdbenv.PeerDBMaintenanceModeEnabled(ctx, nil) + if err != nil { + slog.Error("unable to get maintenance mode status", slog.Any("error", err)) + return &protos.InstanceInfoResponse{ + Status: protos.InstanceStatus_INSTANCE_STATUS_UNKNOWN, + }, fmt.Errorf("unable to get maintenance mode status: %w", err) + } + if enabled { + return &protos.InstanceInfoResponse{ + Status: protos.InstanceStatus_INSTANCE_STATUS_MAINTENANCE, + }, nil + } + return &protos.InstanceInfoResponse{ + Status: protos.InstanceStatus_INSTANCE_STATUS_READY, + }, nil +} + +func (h *FlowRequestHandler) Maintenance(ctx context.Context, in *protos.MaintenanceRequest) (*protos.MaintenanceResponse, error) { + taskQueueId := shared.MaintenanceFlowTaskQueue + if in.UsePeerflowTaskQueue { + taskQueueId = shared.PeerFlowTaskQueue + } + switch { + case in.Status == protos.MaintenanceStatus_MAINTENANCE_STATUS_START: + workflowRun, err := peerflow.RunStartMaintenanceWorkflow(ctx, h.temporalClient, &protos.StartMaintenanceFlowInput{}, taskQueueId) + if err != nil { + return nil, err + } + return &protos.MaintenanceResponse{ + WorkflowId: workflowRun.GetID(), + RunId: workflowRun.GetRunID(), + }, nil + case in.Status == protos.MaintenanceStatus_MAINTENANCE_STATUS_END: + workflowRun, err := peerflow.RunEndMaintenanceWorkflow(ctx, h.temporalClient, &protos.EndMaintenanceFlowInput{}, taskQueueId) + if err != nil { + return nil, err + } + return &protos.MaintenanceResponse{ + WorkflowId: workflowRun.GetID(), + RunId: workflowRun.GetRunID(), + }, nil + } + return nil, errors.New("invalid maintenance status") +} diff --git a/flow/cmd/maintenance.go b/flow/cmd/maintenance.go new file mode 100644 index 000000000..474a67db3 --- /dev/null +++ b/flow/cmd/maintenance.go @@ -0,0 +1,246 @@ +package cmd + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log/slog" + "os" + + "github.com/aws/smithy-go/ptr" + "go.temporal.io/sdk/client" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/peerdbenv" + "github.com/PeerDB-io/peer-flow/shared" + peerflow "github.com/PeerDB-io/peer-flow/workflows" +) + +type MaintenanceCLIParams struct { + TemporalHostPort string + TemporalNamespace string + Mode string + FlowGrpcAddress string + SkipIfK8sServiceMissing string + FlowTlsEnabled bool + SkipOnApiVersionMatch bool + SkipOnNoMirrors bool + UseMaintenanceTaskQueue bool + AssumeSkippedMaintenanceWorkflows bool +} + +type StartMaintenanceResult struct { + SkippedReason *string `json:"skippedReason,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` + CLIVersion string `json:"cliVersion,omitempty"` + Skipped bool `json:"skipped,omitempty"` +} + +// MaintenanceMain is the entry point for the maintenance command, requires access to Temporal client, will exit after +// running the requested maintenance workflow +func MaintenanceMain(ctx context.Context, args *MaintenanceCLIParams) error { + slog.Info("Starting Maintenance Mode CLI") + clientOptions := client.Options{ + HostPort: args.TemporalHostPort, + Namespace: args.TemporalNamespace, + Logger: slog.New(shared.NewSlogHandler(slog.NewJSONHandler(os.Stdout, nil))), + } + tc, err := setupTemporalClient(ctx, clientOptions) + if err != nil { + return fmt.Errorf("unable to create Temporal client: %w", err) + } + + taskQueueId := shared.MaintenanceFlowTaskQueue + if !args.UseMaintenanceTaskQueue { + taskQueueId = shared.PeerFlowTaskQueue + } + + if args.Mode == "start" { + if args.AssumeSkippedMaintenanceWorkflows { + slog.Info("Assuming maintenance workflows were skipped") + return WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: true, + SkippedReason: ptr.String("Assumed skipped by CLI Flag"), + CLIVersion: peerdbenv.PeerDBVersionShaShort(), + }) + } + skipped, err := skipStartMaintenanceIfNeeded(ctx, args) + if err != nil { + return err + } + if skipped { + return nil + } + slog.Info("Running start maintenance workflow") + workflowRun, err := peerflow.RunStartMaintenanceWorkflow(ctx, tc, &protos.StartMaintenanceFlowInput{}, taskQueueId) + if err != nil { + slog.Error("Error running start maintenance workflow", "error", err) + return err + } + var output *protos.StartMaintenanceFlowOutput + if err := workflowRun.Get(ctx, &output); err != nil { + slog.Error("Error in start maintenance workflow", "error", err) + return err + } + slog.Info("Start maintenance workflow completed", "output", output) + return WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: false, + CLIVersion: peerdbenv.PeerDBVersionShaShort(), + }) + } else if args.Mode == "end" { + if input, err := ReadLastMaintenanceOutput(ctx); input != nil || err != nil { + if err != nil { + return err + } + slog.Info("Checking if end maintenance workflow should be skipped", "input", input) + if input.Skipped { + slog.Info("Skipping end maintenance workflow as start maintenance was skipped", "reason", input.SkippedReason) + return nil + } + } + workflowRun, err := peerflow.RunEndMaintenanceWorkflow(ctx, tc, &protos.EndMaintenanceFlowInput{}, taskQueueId) + if err != nil { + slog.Error("Error running end maintenance workflow", "error", err) + return err + } + var output *protos.EndMaintenanceFlowOutput + if err := workflowRun.Get(ctx, &output); err != nil { + slog.Error("Error in end maintenance workflow", "error", err) + return err + } + slog.Info("End maintenance workflow completed", "output", output) + } else { + return fmt.Errorf("unknown flow type %s", args.Mode) + } + slog.Info("Maintenance workflow completed with type", "type", args.Mode) + return nil +} + +func skipStartMaintenanceIfNeeded(ctx context.Context, args *MaintenanceCLIParams) (bool, error) { + if args.SkipIfK8sServiceMissing != "" { + slog.Info("Checking if k8s service exists", "service", args.SkipIfK8sServiceMissing) + exists, err := CheckK8sServiceExistence(ctx, args.SkipIfK8sServiceMissing) + if err != nil { + return false, err + } + if !exists { + slog.Info("Skipping maintenance workflow due to missing k8s service", "service", args.SkipIfK8sServiceMissing) + return true, WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: true, + SkippedReason: ptr.String(fmt.Sprintf("K8s service %s missing", args.SkipIfK8sServiceMissing)), + CLIVersion: peerdbenv.PeerDBVersionShaShort(), + }) + } + } + if args.SkipOnApiVersionMatch || args.SkipOnNoMirrors { + if args.FlowGrpcAddress == "" { + return false, errors.New("flow address is required when skipping based on API") + } + slog.Info("Constructing flow client") + transportCredentials := credentials.NewTLS(&tls.Config{ + MinVersion: tls.VersionTLS12, + }) + if !args.FlowTlsEnabled { + transportCredentials = insecure.NewCredentials() + } + conn, err := grpc.NewClient(args.FlowGrpcAddress, + grpc.WithTransportCredentials(transportCredentials), + ) + if err != nil { + return false, fmt.Errorf("unable to dial grpc flow server: %w", err) + } + peerFlowClient := protos.NewFlowServiceClient(conn) + if args.SkipOnApiVersionMatch { + slog.Info("Checking if CLI version matches API version", "cliVersion", peerdbenv.PeerDBVersionShaShort()) + version, err := peerFlowClient.GetVersion(ctx, &protos.PeerDBVersionRequest{}) + if err != nil { + return false, err + } + slog.Info("Got version from flow", "version", version.Version) + if version.Version == peerdbenv.PeerDBVersionShaShort() { + slog.Info("Skipping maintenance workflow due to matching versions") + return true, WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: true, + SkippedReason: ptr.String(fmt.Sprintf("CLI version %s matches API version %s", peerdbenv.PeerDBVersionShaShort(), + version.Version)), + APIVersion: version.Version, + CLIVersion: peerdbenv.PeerDBVersionShaShort(), + }) + } + } + if args.SkipOnNoMirrors { + slog.Info("Checking if there are any mirrors") + mirrors, err := peerFlowClient.ListMirrors(ctx, &protos.ListMirrorsRequest{}) + if err != nil { + return false, err + } + slog.Info("Got mirrors from flow", "mirrors", mirrors.Mirrors) + if len(mirrors.Mirrors) == 0 { + slog.Info("Skipping maintenance workflow due to no mirrors") + return true, WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: true, + SkippedReason: ptr.String("No mirrors found"), + }) + } + } + } + return false, nil +} + +func WriteMaintenanceOutputToCatalog(ctx context.Context, result StartMaintenanceResult) error { + pool, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) + if err != nil { + return err + } + _, err = pool.Exec(ctx, ` + insert into maintenance.start_maintenance_outputs + (cli_version, api_version, skipped, skipped_reason) + values + ($1, $2, $3, $4) + `, result.CLIVersion, result.APIVersion, result.Skipped, result.SkippedReason) + return err +} + +func ReadLastMaintenanceOutput(ctx context.Context) (*StartMaintenanceResult, error) { + pool, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) + if err != nil { + return nil, err + } + var result StartMaintenanceResult + if err := pool.QueryRow(ctx, ` + select cli_version, api_version, skipped, skipped_reason + from maintenance.start_maintenance_outputs + order by created_at desc + limit 1 + `).Scan(&result.CLIVersion, &result.APIVersion, &result.Skipped, &result.SkippedReason); err != nil { + return nil, err + } + return &result, nil +} + +func CheckK8sServiceExistence(ctx context.Context, serviceName string) (bool, error) { + config, err := rest.InClusterConfig() + if err != nil { + return false, err + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return false, err + } + _, err = clientset.CoreV1().Services(peerdbenv.GetEnvString("POD_NAMESPACE", "")).Get(ctx, serviceName, v1.GetOptions{}) + if err != nil { + if k8sErrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/flow/cmd/mirror_status.go b/flow/cmd/mirror_status.go index ffd6eba45..156185054 100644 --- a/flow/cmd/mirror_status.go +++ b/flow/cmd/mirror_status.go @@ -215,8 +215,8 @@ func (h *FlowRequestHandler) CDCGraph(ctx context.Context, req *protos.GraphRequ } rows, err := h.pool.Query(ctx, `select tm, coalesce(sum(rows_in_batch), 0) from generate_series(date_trunc($2, now() - $1::INTERVAL * 30), now(), $1::INTERVAL) tm - left join peerdb_stats.cdc_batches on start_time >= tm and start_time < tm + $1::INTERVAL - group by 1 order by 1`, req.AggregateType, truncField) + left join peerdb_stats.cdc_batches on start_time >= tm and start_time < tm + $1::INTERVAL and flow_name = $3 + group by 1 order by 1`, req.AggregateType, truncField, req.FlowJobName) if err != nil { return nil, err } @@ -447,20 +447,7 @@ func (h *FlowRequestHandler) isCDCFlow(ctx context.Context, flowJobName string) } func (h *FlowRequestHandler) getWorkflowStatus(ctx context.Context, workflowID string) (protos.FlowStatus, error) { - res, err := h.temporalClient.QueryWorkflow(ctx, workflowID, "", shared.FlowStatusQuery) - if err != nil { - slog.Error(fmt.Sprintf("failed to get status in workflow with ID %s: %s", workflowID, err.Error())) - return protos.FlowStatus_STATUS_UNKNOWN, - fmt.Errorf("failed to get status in workflow with ID %s: %w", workflowID, err) - } - var state protos.FlowStatus - err = res.Get(&state) - if err != nil { - slog.Error(fmt.Sprintf("failed to get status in workflow with ID %s: %s", workflowID, err.Error())) - return protos.FlowStatus_STATUS_UNKNOWN, - fmt.Errorf("failed to get status in workflow with ID %s: %w", workflowID, err) - } - return state, nil + return shared.GetWorkflowStatus(ctx, h.temporalClient, workflowID) } func (h *FlowRequestHandler) getCDCWorkflowState(ctx context.Context, @@ -598,10 +585,15 @@ func (h *FlowRequestHandler) CDCBatches(ctx context.Context, req *protos.GetCDCB return nil, err } + var page int32 + if req.Limit != 0 { + page = rowsBehind/int32(req.Limit) + 1 + } + return &protos.GetCDCBatchesResponse{ CdcBatches: batches, Total: total, - Page: rowsBehind/int32(req.Limit) + 1, + Page: page, }, nil } @@ -755,7 +747,7 @@ func (h *FlowRequestHandler) ListMirrorLogs( } page := req.Page - if page == 0 { + if page == 0 && req.NumPerPage != 0 { page = rowsBehind/req.NumPerPage + 1 } diff --git a/flow/cmd/settings.go b/flow/cmd/settings.go index 12e072859..dd4755f4a 100644 --- a/flow/cmd/settings.go +++ b/flow/cmd/settings.go @@ -55,8 +55,7 @@ func (h *FlowRequestHandler) PostDynamicSetting( ctx context.Context, req *protos.PostDynamicSettingRequest, ) (*protos.PostDynamicSettingResponse, error) { - _, err := h.pool.Exec(ctx, `insert into dynamic_settings (config_name, config_value) values ($1, $2) - on conflict (config_name) do update set config_value = $2`, req.Name, req.Value) + err := peerdbenv.UpdateDynamicSetting(ctx, h.pool, req.Name, req.Value) if err != nil { slog.Error("[PostDynamicConfig] failed to execute update setting", slog.Any("error", err)) return nil, err diff --git a/flow/cmd/validate_mirror.go b/flow/cmd/validate_mirror.go index 3e870aa66..83c9d2a07 100644 --- a/flow/cmd/validate_mirror.go +++ b/flow/cmd/validate_mirror.go @@ -14,6 +14,7 @@ import ( connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/peerdbenv" "github.com/PeerDB-io/peer-flow/shared/telemetry" ) @@ -25,6 +26,17 @@ var ( func (h *FlowRequestHandler) ValidateCDCMirror( ctx context.Context, req *protos.CreateCDCFlowRequest, ) (*protos.ValidateCDCMirrorResponse, error) { + underMaintenance, err := peerdbenv.PeerDBMaintenanceModeEnabled(ctx, nil) + if err != nil { + slog.Error("unable to check maintenance mode", slog.Any("error", err)) + return nil, fmt.Errorf("unable to load dynamic config: %w", err) + } + + if underMaintenance { + slog.Warn("Validate request denied due to maintenance", "flowName", req.ConnectionConfigs.FlowJobName) + return nil, errors.New("PeerDB is under maintenance") + } + if !req.ConnectionConfigs.Resync { mirrorExists, existCheckErr := h.CheckIfMirrorNameExists(ctx, req.ConnectionConfigs.FlowJobName) if existCheckErr != nil { diff --git a/flow/cmd/worker.go b/flow/cmd/worker.go index 9db97288c..cca0202ec 100644 --- a/flow/cmd/worker.go +++ b/flow/cmd/worker.go @@ -10,6 +10,7 @@ import ( "runtime" "github.com/grafana/pyroscope-go" + "go.opentelemetry.io/otel/metric" "go.temporal.io/sdk/client" temporalotel "go.temporal.io/sdk/contrib/opentelemetry" "go.temporal.io/sdk/worker" @@ -30,6 +31,7 @@ type WorkerSetupOptions struct { TemporalMaxConcurrentWorkflowTasks int EnableProfiling bool EnableOtelMetrics bool + UseMaintenanceTaskQueue bool } type workerSetupResponse struct { @@ -124,8 +126,11 @@ func WorkerSetup(opts *WorkerSetupOptions) (*workerSetupResponse, error) { return nil, fmt.Errorf("unable to create Temporal client: %w", err) } slog.Info("Created temporal client") - - taskQueue := peerdbenv.PeerFlowTaskQueueName(shared.PeerFlowTaskQueue) + queueId := shared.PeerFlowTaskQueue + if opts.UseMaintenanceTaskQueue { + queueId = shared.MaintenanceFlowTaskQueue + } + taskQueue := peerdbenv.PeerFlowTaskQueueName(queueId) slog.Info( fmt.Sprintf("Creating temporal worker for queue %v: %v workflow workers %v activity workers", taskQueue, @@ -153,8 +158,8 @@ func WorkerSetup(opts *WorkerSetupOptions) (*workerSetupResponse, error) { otelManager = &otel_metrics.OtelManager{ MetricsProvider: metricsProvider, Meter: metricsProvider.Meter("io.peerdb.flow-worker"), - Float64GaugesCache: make(map[string]*otel_metrics.Float64SyncGauge), - Int64GaugesCache: make(map[string]*otel_metrics.Int64SyncGauge), + Float64GaugesCache: make(map[string]metric.Float64Gauge), + Int64GaugesCache: make(map[string]metric.Int64Gauge), } cleanupOtelManagerFunc = func() { shutDownErr := otelManager.MetricsProvider.Shutdown(context.Background()) @@ -170,6 +175,12 @@ func WorkerSetup(opts *WorkerSetupOptions) (*workerSetupResponse, error) { OtelManager: otelManager, }) + w.RegisterActivity(&activities.MaintenanceActivity{ + CatalogPool: conn, + Alerter: alerting.NewAlerter(context.Background(), conn), + TemporalClient: c, + }) + return &workerSetupResponse{ Client: c, Worker: w, diff --git a/flow/connectors/bigquery/merge_stmt_generator.go b/flow/connectors/bigquery/merge_stmt_generator.go index e903ef586..5ee4f883c 100644 --- a/flow/connectors/bigquery/merge_stmt_generator.go +++ b/flow/connectors/bigquery/merge_stmt_generator.go @@ -34,7 +34,7 @@ func (m *mergeStmtGenerator) generateFlattenedCTE(dstTable string, normalizedTab var castStmt string shortCol := m.shortColumn[column.Name] switch qvalue.QValueKind(colType) { - case qvalue.QValueKindJSON, qvalue.QValueKindHStore: + case qvalue.QValueKindJSON, qvalue.QValueKindJSONB, qvalue.QValueKindHStore: // if the type is JSON, then just extract JSON castStmt = fmt.Sprintf("CAST(PARSE_JSON(JSON_VALUE(_peerdb_data, '$.%s'),wide_number_mode=>'round') AS %s) AS `%s`", column.Name, bqTypeString, shortCol) diff --git a/flow/connectors/bigquery/qvalue_convert.go b/flow/connectors/bigquery/qvalue_convert.go index d2d9d9f0c..aa798641a 100644 --- a/flow/connectors/bigquery/qvalue_convert.go +++ b/flow/connectors/bigquery/qvalue_convert.go @@ -34,7 +34,7 @@ func qValueKindToBigQueryType(columnDescription *protos.FieldDescription, nullab case qvalue.QValueKindString: bqField.Type = bigquery.StringFieldType // json also is stored as string for now - case qvalue.QValueKindJSON, qvalue.QValueKindHStore: + case qvalue.QValueKindJSON, qvalue.QValueKindJSONB, qvalue.QValueKindHStore: bqField.Type = bigquery.JSONFieldType // time related case qvalue.QValueKindTimestamp, qvalue.QValueKindTimestampTZ: diff --git a/flow/connectors/clickhouse/cdc.go b/flow/connectors/clickhouse/cdc.go index 1c0b651ad..d3eb883b4 100644 --- a/flow/connectors/clickhouse/cdc.go +++ b/flow/connectors/clickhouse/cdc.go @@ -8,7 +8,7 @@ import ( "log/slog" "strings" - _ "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ClickHouse/clickhouse-go/v2" _ "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "github.com/PeerDB-io/peer-flow/connectors/utils" @@ -20,7 +20,7 @@ import ( const ( checkIfTableExistsSQL = `SELECT exists(SELECT 1 FROM system.tables WHERE database = ? AND name = ?) AS table_exists;` - dropTableIfExistsSQL = `DROP TABLE IF EXISTS %s;` + dropTableIfExistsSQL = "DROP TABLE IF EXISTS `%s`;" ) // getRawTableName returns the raw table name for the given table identifier. @@ -46,13 +46,13 @@ func (c *ClickHouseConnector) CreateRawTable(ctx context.Context, req *protos.Cr rawTableName := c.getRawTableName(req.FlowJobName) createRawTableSQL := `CREATE TABLE IF NOT EXISTS %s ( - _peerdb_uid UUID NOT NULL, - _peerdb_timestamp Int64 NOT NULL, - _peerdb_destination_table_name String NOT NULL, - _peerdb_data String NOT NULL, - _peerdb_record_type Int NOT NULL, + _peerdb_uid UUID, + _peerdb_timestamp Int64, + _peerdb_destination_table_name String, + _peerdb_data String, + _peerdb_record_type Int, _peerdb_match_data String, - _peerdb_batch_id Int, + _peerdb_batch_id Int64, _peerdb_unchanged_toast_columns String ) ENGINE = MergeTree() ORDER BY (_peerdb_batch_id, _peerdb_destination_table_name);` @@ -88,13 +88,12 @@ func (c *ClickHouseConnector) syncRecordsViaAvro( } avroSyncer := c.avroSyncMethod(req.FlowJobName) - numRecords, err := avroSyncer.SyncRecords(ctx, stream, req.FlowJobName, syncBatchID) + numRecords, err := avroSyncer.SyncRecords(ctx, req.Env, stream, req.FlowJobName, syncBatchID) if err != nil { return nil, err } - err = c.ReplayTableSchemaDeltas(ctx, req.FlowJobName, req.Records.SchemaDeltas) - if err != nil { + if err := c.ReplayTableSchemaDeltas(ctx, req.FlowJobName, req.Records.SchemaDeltas); err != nil { return nil, fmt.Errorf("failed to sync schema changes: %w", err) } @@ -113,8 +112,7 @@ func (c *ClickHouseConnector) SyncRecords(ctx context.Context, req *model.SyncRe return nil, err } - err = c.FinishBatch(ctx, req.FlowJobName, req.SyncBatchID, res.LastSyncedCheckpointID) - if err != nil { + if err := c.FinishBatch(ctx, req.FlowJobName, req.SyncBatchID, res.LastSyncedCheckpointID); err != nil { c.logger.Error("failed to increment id", slog.Any("error", err)) return nil, err } @@ -137,15 +135,13 @@ func (c *ClickHouseConnector) ReplayTableSchemaDeltas(ctx context.Context, flowJ for _, addedColumn := range schemaDelta.AddedColumns { clickHouseColType, err := qvalue.QValueKind(addedColumn.Type).ToDWHColumnType(protos.DBType_CLICKHOUSE) if err != nil { - return fmt.Errorf("failed to convert column type %s to ClickHouse type: %w", - addedColumn.Type, err) + return fmt.Errorf("failed to convert column type %s to ClickHouse type: %w", addedColumn.Type, err) } err = c.execWithLogging(ctx, - fmt.Sprintf("ALTER TABLE %s ADD COLUMN IF NOT EXISTS \"%s\" %s", + fmt.Sprintf("ALTER TABLE `%s` ADD COLUMN IF NOT EXISTS `%s` %s", schemaDelta.DstTableName, addedColumn.Name, clickHouseColType)) if err != nil { - return fmt.Errorf("failed to add column %s for table %s: %w", addedColumn.Name, - schemaDelta.DstTableName, err) + return fmt.Errorf("failed to add column %s for table %s: %w", addedColumn.Name, schemaDelta.DstTableName, err) } c.logger.Info(fmt.Sprintf("[schema delta replay] added column %s with data type %s", addedColumn.Name, addedColumn.Type), @@ -186,34 +182,47 @@ func (c *ClickHouseConnector) RenameTables( } allCols := strings.Join(columnNames, ",") - c.logger.Info(fmt.Sprintf("handling soft-deletes for table '%s'...", renameRequest.NewName)) - err = c.execWithLogging(ctx, - fmt.Sprintf("INSERT INTO %s(%s,%s) SELECT %s,true FROM %s WHERE %s = 1", - renameRequest.CurrentName, allCols, signColName, allCols, renameRequest.NewName, signColName)) - if err != nil { + c.logger.Info("handling soft-deletes for table before rename", slog.String("NewName", renameRequest.NewName)) + if err := c.execWithLogging(ctx, + fmt.Sprintf("INSERT INTO `%s`(%s,%s) SELECT %s,true FROM `%s` WHERE %s = 1", + renameRequest.CurrentName, allCols, signColName, allCols, renameRequest.NewName, signColName), + ); err != nil { return nil, fmt.Errorf("unable to handle soft-deletes for table %s: %w", renameRequest.NewName, err) } - } else { - c.logger.Info(fmt.Sprintf("table '%s' does not exist, skipping soft-deletes transfer for it", renameRequest.NewName)) - } - // drop the dst table if exists - err = c.execWithLogging(ctx, "DROP TABLE IF EXISTS "+renameRequest.NewName) - if err != nil { - return nil, fmt.Errorf("unable to drop table %s: %w", renameRequest.NewName, err) + // target table exists, so we can attempt to swap. In most cases, we will have Atomic engine, + // which supports a special query to exchange two tables, allowing dependent (materialized) views and dictionaries on these tables + c.logger.Info("attempting atomic exchange", + slog.String("OldName", renameRequest.CurrentName), slog.String("NewName", renameRequest.NewName)) + if err = c.execWithLogging(ctx, + fmt.Sprintf("EXCHANGE TABLES `%s` and `%s`", renameRequest.NewName, renameRequest.CurrentName), + ); err == nil { + if err := c.execWithLogging(ctx, fmt.Sprintf(dropTableIfExistsSQL, renameRequest.CurrentName)); err != nil { + return nil, fmt.Errorf("unable to drop exchanged table %s: %w", renameRequest.CurrentName, err) + } + } else if ex, ok := err.(*clickhouse.Exception); !ok || ex.Code != 48 { + // code 48 == not implemented -> move on to the fallback code, in all other error codes / types + // return, since we know/assume that the exchange would be the sensible action + return nil, fmt.Errorf("unable to exchange tables %s and %s: %w", renameRequest.NewName, renameRequest.CurrentName, err) + } } - // rename the src table to dst - err = c.execWithLogging(ctx, fmt.Sprintf("RENAME TABLE %s TO %s", - renameRequest.CurrentName, - renameRequest.NewName)) - if err != nil { - return nil, fmt.Errorf("unable to rename table %s to %s: %w", - renameRequest.CurrentName, renameRequest.NewName, err) + // either original table doesn't exist, in which case it is safe to just run rename, + // or err is set (in which case err comes from EXCHANGE TABLES) + if !originalTableExists || err != nil { + if err := c.execWithLogging(ctx, fmt.Sprintf(dropTableIfExistsSQL, renameRequest.NewName)); err != nil { + return nil, fmt.Errorf("unable to drop table %s: %w", renameRequest.NewName, err) + } + + if err := c.execWithLogging(ctx, + fmt.Sprintf("RENAME TABLE `%s` TO `%s`", renameRequest.CurrentName, renameRequest.NewName), + ); err != nil { + return nil, fmt.Errorf("unable to rename table %s to %s: %w", renameRequest.CurrentName, renameRequest.NewName, err) + } } - c.logger.Info(fmt.Sprintf("successfully renamed table '%s' to '%s'", - renameRequest.CurrentName, renameRequest.NewName)) + c.logger.Info("successfully renamed table", + slog.String("OldName", renameRequest.CurrentName), slog.String("NewName", renameRequest.NewName)) } return &protos.RenameTablesOutput{ diff --git a/flow/connectors/clickhouse/clickhouse.go b/flow/connectors/clickhouse/clickhouse.go index 1c7d110e3..63ccea693 100644 --- a/flow/connectors/clickhouse/clickhouse.go +++ b/flow/connectors/clickhouse/clickhouse.go @@ -33,7 +33,6 @@ type ClickHouseConnector struct { logger log.Logger config *protos.ClickhouseConfig credsProvider *utils.ClickHouseS3Credentials - s3Stage *ClickHouseS3Stage } func ValidateS3(ctx context.Context, creds *utils.ClickHouseS3Credentials) error { @@ -92,21 +91,21 @@ func (c *ClickHouseConnector) ValidateCheck(ctx context.Context) error { // add a column if err := c.exec(ctx, - fmt.Sprintf("ALTER TABLE %s ADD COLUMN updated_at DateTime64(9) DEFAULT now64()", validateDummyTableName), + fmt.Sprintf("ALTER TABLE `%s` ADD COLUMN updated_at DateTime64(9) DEFAULT now64()", validateDummyTableName), ); err != nil { return fmt.Errorf("failed to add column to validation table %s: %w", validateDummyTableName, err) } // rename the table if err := c.exec(ctx, - fmt.Sprintf("RENAME TABLE %s TO %s", validateDummyTableName, validateDummyTableName+"_renamed"), + fmt.Sprintf("RENAME TABLE `%s` TO `%s`", validateDummyTableName, validateDummyTableName+"_renamed"), ); err != nil { return fmt.Errorf("failed to rename validation table %s: %w", validateDummyTableName, err) } validateDummyTableName += "_renamed" // insert a row - if err := c.exec(ctx, fmt.Sprintf("INSERT INTO %s VALUES (1, now64())", validateDummyTableName)); err != nil { + if err := c.exec(ctx, fmt.Sprintf("INSERT INTO `%s` VALUES (1, now64())", validateDummyTableName)); err != nil { return fmt.Errorf("failed to insert into validation table %s: %w", validateDummyTableName, err) } @@ -129,7 +128,7 @@ func NewClickHouseConnector( config *protos.ClickhouseConfig, ) (*ClickHouseConnector, error) { logger := shared.LoggerFromCtx(ctx) - database, err := Connect(ctx, config) + database, err := Connect(ctx, env, config) if err != nil { return nil, fmt.Errorf("failed to open connection to ClickHouse peer: %w", err) } @@ -153,12 +152,10 @@ func NewClickHouseConnector( } awsBucketPath := config.S3Path - if awsBucketPath == "" { deploymentUID := peerdbenv.PeerDBDeploymentUID() flowName, _ := ctx.Value(shared.FlowNameKey).(string) - bucketPathSuffix := fmt.Sprintf("%s/%s", - url.PathEscape(deploymentUID), url.PathEscape(flowName)) + bucketPathSuffix := fmt.Sprintf("%s/%s", url.PathEscape(deploymentUID), url.PathEscape(flowName)) // Fallback: Get S3 credentials from environment awsBucketName, err := peerdbenv.PeerDBClickHouseAWSS3BucketName(ctx, env) if err != nil { @@ -170,10 +167,7 @@ func NewClickHouseConnector( awsBucketPath = fmt.Sprintf("s3://%s/%s", awsBucketName, bucketPathSuffix) } - clickHouseS3CredentialsNew := utils.ClickHouseS3Credentials{ - Provider: credentialsProvider, - BucketPath: awsBucketPath, - } + credentials, err := credentialsProvider.Retrieve(ctx) if err != nil { return nil, err @@ -184,8 +178,10 @@ func NewClickHouseConnector( PostgresMetadata: pgMetadata, config: config, logger: logger, - credsProvider: &clickHouseS3CredentialsNew, - s3Stage: NewClickHouseS3Stage(), + credsProvider: &utils.ClickHouseS3Credentials{ + Provider: credentialsProvider, + BucketPath: awsBucketPath, + }, } if credentials.AWS.SessionToken != "" { @@ -209,7 +205,7 @@ func NewClickHouseConnector( return connector, nil } -func Connect(ctx context.Context, config *protos.ClickhouseConfig) (clickhouse.Conn, error) { +func Connect(ctx context.Context, env map[string]string, config *protos.ClickhouseConfig) (clickhouse.Conn, error) { var tlsSetting *tls.Config if !config.DisableTls { tlsSetting = &tls.Config{MinVersion: tls.VersionTLS13} @@ -232,6 +228,13 @@ func Connect(ctx context.Context, config *protos.ClickhouseConfig) (clickhouse.C tlsSetting.RootCAs = caPool } + var settings clickhouse.Settings + if maxInsertThreads, err := peerdbenv.PeerDBClickHouseMaxInsertThreads(ctx, env); err != nil { + return nil, fmt.Errorf("failed to load max_insert_threads config: %w", err) + } else if maxInsertThreads != 0 { + settings = clickhouse.Settings{"max_insert_threads": maxInsertThreads} + } + conn, err := clickhouse.Open(&clickhouse.Options{ Addr: []string{fmt.Sprintf("%s:%d", config.Host, config.Port)}, Auth: clickhouse.Auth{ @@ -249,6 +252,7 @@ func Connect(ctx context.Context, config *protos.ClickhouseConfig) (clickhouse.C {Name: "peerdb"}, }, }, + Settings: settings, DialTimeout: 3600 * time.Second, ReadTimeout: 3600 * time.Second, }) @@ -381,8 +385,7 @@ func (c *ClickHouseConnector) checkTablesEmptyAndEngine(ctx context.Context, tab for rows.Next() { var tableName, engine string var totalRows uint64 - err = rows.Scan(&tableName, &engine, &totalRows) - if err != nil { + if err := rows.Scan(&tableName, &engine, &totalRows); err != nil { return fmt.Errorf("failed to scan information for tables: %w", err) } if totalRows != 0 && optedForInitialLoad { @@ -393,8 +396,8 @@ func (c *ClickHouseConnector) checkTablesEmptyAndEngine(ctx context.Context, tab slog.String("table", tableName), slog.String("engine", engine)) } } - if rows.Err() != nil { - return fmt.Errorf("failed to read rows: %w", rows.Err()) + if err := rows.Err(); err != nil { + return fmt.Errorf("failed to read rows: %w", err) } return nil } @@ -418,14 +421,13 @@ func (c *ClickHouseConnector) getTableColumnsMapping(ctx context.Context, for rows.Next() { var tableName string var fieldDescription protos.FieldDescription - err = rows.Scan(&fieldDescription.Name, &fieldDescription.Type, &tableName) - if err != nil { + if err := rows.Scan(&fieldDescription.Name, &fieldDescription.Type, &tableName); err != nil { return nil, fmt.Errorf("failed to scan columns for tables: %w", err) } tableColumnsMapping[tableName] = append(tableColumnsMapping[tableName], &fieldDescription) } - if rows.Err() != nil { - return nil, fmt.Errorf("failed to read rows: %w", rows.Err()) + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("failed to read rows: %w", err) } return tableColumnsMapping, nil } diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index d65d61e9d..2debe0f4d 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -6,11 +6,15 @@ import ( "database/sql" "errors" "fmt" + "log/slog" "slices" "strconv" "strings" "time" + "github.com/ClickHouse/clickhouse-go/v2" + "golang.org/x/sync/errgroup" + "github.com/PeerDB-io/peer-flow/datatypes" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" @@ -261,8 +265,7 @@ func (c *ClickHouseConnector) NormalizeRecords( }, nil } - err = c.copyAvroStagesToDestination(ctx, req.FlowJobName, normBatchID, req.SyncBatchID) - if err != nil { + if err := c.copyAvroStagesToDestination(ctx, req.FlowJobName, normBatchID, req.SyncBatchID); err != nil { return nil, fmt.Errorf("failed to copy avro stages to destination: %w", err) } @@ -277,9 +280,48 @@ func (c *ClickHouseConnector) NormalizeRecords( return nil, err } + enablePrimaryUpdate, err := peerdbenv.PeerDBEnableClickHousePrimaryUpdate(ctx, req.Env) + if err != nil { + return nil, err + } + + parallelNormalize, err := peerdbenv.PeerDBClickHouseParallelNormalize(ctx, req.Env) + if err != nil { + return nil, err + } + parallelNormalize = min(max(parallelNormalize, 1), len(destinationTableNames)) + if parallelNormalize > 1 { + c.logger.Info("normalizing in parallel", slog.Int("connections", parallelNormalize)) + } + + queries := make(chan string) rawTbl := c.getRawTableName(req.FlowJobName) - // model the raw table data as inserts. + group, errCtx := errgroup.WithContext(ctx) + for i := range parallelNormalize { + group.Go(func() error { + var chConn clickhouse.Conn + if i == 0 { + chConn = c.database + } else { + var err error + chConn, err = Connect(errCtx, req.Env, c.config) + if err != nil { + return err + } + defer chConn.Close() + } + + for query := range queries { + c.logger.Info("normalizing batch", slog.String("query", query)) + if err := chConn.Exec(errCtx, query); err != nil { + return fmt.Errorf("error while inserting into normalized table: %w", err) + } + } + return nil + }) + } + for _, tbl := range destinationTableNames { // SELECT projection FROM raw_table WHERE _peerdb_batch_id > normalize_batch_id AND _peerdb_batch_id <= sync_batch_id selectQuery := strings.Builder{} @@ -298,11 +340,6 @@ func (c *ClickHouseConnector) NormalizeRecords( } } - enablePrimaryUpdate, err := peerdbenv.PeerDBEnableClickHousePrimaryUpdate(ctx, req.Env) - if err != nil { - return nil, err - } - projection := strings.Builder{} projectionUpdate := strings.Builder{} @@ -337,6 +374,7 @@ func (c *ClickHouseConnector) NormalizeRecords( var err error clickHouseType, err = colType.ToDWHColumnType(protos.DBType_CLICKHOUSE) if err != nil { + close(queries) return nil, fmt.Errorf("error while converting column type to clickhouse type: %w", err) } } @@ -432,16 +470,23 @@ func (c *ClickHouseConnector) NormalizeRecords( insertIntoSelectQuery.WriteString(colSelector.String()) insertIntoSelectQuery.WriteString(selectQuery.String()) - q := insertIntoSelectQuery.String() - - if err := c.execWithLogging(ctx, q); err != nil { - return nil, fmt.Errorf("error while inserting into normalized table: %w", err) + select { + case queries <- insertIntoSelectQuery.String(): + case <-errCtx.Done(): + close(queries) + c.logger.Error("[clickhouse] context canceled while normalizing", + slog.Any("error", errCtx.Err()), + slog.Any("cause", context.Cause(errCtx))) + return nil, context.Cause(errCtx) } } + close(queries) + if err := group.Wait(); err != nil { + return nil, err + } - err = c.UpdateNormalizeBatchID(ctx, req.FlowJobName, req.SyncBatchID) - if err != nil { - c.logger.Error("[clickhouse] error while updating normalize batch id", "error", err) + if err := c.UpdateNormalizeBatchID(ctx, req.FlowJobName, req.SyncBatchID); err != nil { + c.logger.Error("[clickhouse] error while updating normalize batch id", slog.Int64("BatchID", req.SyncBatchID), slog.Any("error", err)) return nil, err } @@ -461,7 +506,7 @@ func (c *ClickHouseConnector) getDistinctTableNamesInBatch( rawTbl := c.getRawTableName(flowJobName) q := fmt.Sprintf( - `SELECT DISTINCT _peerdb_destination_table_name FROM %s WHERE _peerdb_batch_id > %d AND _peerdb_batch_id <= %d`, + `SELECT DISTINCT _peerdb_destination_table_name FROM %s WHERE _peerdb_batch_id>%d AND _peerdb_batch_id<=%d`, rawTbl, normalizeBatchID, syncBatchID) rows, err := c.query(ctx, q) @@ -472,8 +517,7 @@ func (c *ClickHouseConnector) getDistinctTableNamesInBatch( var tableNames []string for rows.Next() { var tableName sql.NullString - err = rows.Scan(&tableName) - if err != nil { + if err := rows.Scan(&tableName); err != nil { return nil, fmt.Errorf("error while scanning table name: %w", err) } @@ -484,7 +528,7 @@ func (c *ClickHouseConnector) getDistinctTableNamesInBatch( tableNames = append(tableNames, tableName.String) } - if rows.Err() != nil { + if err := rows.Err(); err != nil { return nil, fmt.Errorf("failed to read rows: %w", err) } @@ -493,7 +537,7 @@ func (c *ClickHouseConnector) getDistinctTableNamesInBatch( func (c *ClickHouseConnector) copyAvroStageToDestination(ctx context.Context, flowJobName string, syncBatchID int64) error { avroSyncMethod := c.avroSyncMethod(flowJobName) - avroFile, err := c.s3Stage.GetAvroStage(ctx, flowJobName, syncBatchID) + avroFile, err := GetAvroStage(ctx, flowJobName, syncBatchID) if err != nil { return fmt.Errorf("failed to get avro stage: %w", err) } @@ -510,8 +554,7 @@ func (c *ClickHouseConnector) copyAvroStagesToDestination( ctx context.Context, flowJobName string, normBatchID, syncBatchID int64, ) error { for s := normBatchID + 1; s <= syncBatchID; s++ { - err := c.copyAvroStageToDestination(ctx, flowJobName, s) - if err != nil { + if err := c.copyAvroStageToDestination(ctx, flowJobName, s); err != nil { return fmt.Errorf("failed to copy avro stage to destination: %w", err) } } diff --git a/flow/connectors/clickhouse/qrep_avro_sync.go b/flow/connectors/clickhouse/qrep_avro_sync.go index edbd0392c..fa2cfe103 100644 --- a/flow/connectors/clickhouse/qrep_avro_sync.go +++ b/flow/connectors/clickhouse/qrep_avro_sync.go @@ -18,8 +18,8 @@ import ( ) type ClickHouseAvroSyncMethod struct { - config *protos.QRepConfig - connector *ClickHouseConnector + *ClickHouseConnector + config *protos.QRepConfig } func NewClickHouseAvroSyncMethod( @@ -27,22 +27,22 @@ func NewClickHouseAvroSyncMethod( connector *ClickHouseConnector, ) *ClickHouseAvroSyncMethod { return &ClickHouseAvroSyncMethod{ - config: config, - connector: connector, + ClickHouseConnector: connector, + config: config, } } func (s *ClickHouseAvroSyncMethod) CopyStageToDestination(ctx context.Context, avroFile *avro.AvroFile) error { - stagingPath := s.connector.credsProvider.BucketPath + stagingPath := s.credsProvider.BucketPath s3o, err := utils.NewS3BucketAndPrefix(stagingPath) if err != nil { return err } - endpoint := s.connector.credsProvider.Provider.GetEndpointURL() - region := s.connector.credsProvider.Provider.GetRegion() + endpoint := s.credsProvider.Provider.GetEndpointURL() + region := s.credsProvider.Provider.GetRegion() avroFileUrl := utils.FileURLForS3Service(endpoint, region, s3o.Bucket, avroFile.FilePath) - creds, err := s.connector.credsProvider.Provider.Retrieve(ctx) + creds, err := s.credsProvider.Provider.Retrieve(ctx) if err != nil { return err } @@ -55,11 +55,12 @@ func (s *ClickHouseAvroSyncMethod) CopyStageToDestination(ctx context.Context, a s.config.DestinationTableIdentifier, avroFileUrl, creds.AWS.AccessKeyID, creds.AWS.SecretAccessKey, sessionTokenPart) - return s.connector.database.Exec(ctx, query) + return s.database.Exec(ctx, query) } func (s *ClickHouseAvroSyncMethod) SyncRecords( ctx context.Context, + env map[string]string, stream *model.QRecordStream, flowJobName string, syncBatchID int64, @@ -67,7 +68,7 @@ func (s *ClickHouseAvroSyncMethod) SyncRecords( dstTableName := s.config.DestinationTableIdentifier schema := stream.Schema() - s.connector.logger.Info("sync function called and schema acquired", + s.logger.Info("sync function called and schema acquired", slog.String("dstTable", dstTableName)) avroSchema, err := s.getAvroSchema(dstTableName, schema) @@ -76,19 +77,18 @@ func (s *ClickHouseAvroSyncMethod) SyncRecords( } batchIdentifierForFile := fmt.Sprintf("%s_%d", shared.RandomString(16), syncBatchID) - avroFile, err := s.writeToAvroFile(ctx, stream, avroSchema, batchIdentifierForFile, flowJobName) + avroFile, err := s.writeToAvroFile(ctx, env, stream, avroSchema, batchIdentifierForFile, flowJobName) if err != nil { return 0, err } - s.connector.logger.Info("[SyncRecords] written records to Avro file", + s.logger.Info("[SyncRecords] written records to Avro file", slog.String("dstTable", dstTableName), slog.String("avroFile", avroFile.FilePath), slog.Int("numRecords", avroFile.NumRecords), slog.Int64("syncBatchID", syncBatchID)) - err = s.connector.s3Stage.SetAvroStage(ctx, flowJobName, syncBatchID, avroFile) - if err != nil { + if err := SetAvroStage(ctx, flowJobName, syncBatchID, avroFile); err != nil { return 0, fmt.Errorf("failed to set avro stage: %w", err) } @@ -103,7 +103,7 @@ func (s *ClickHouseAvroSyncMethod) SyncQRepRecords( stream *model.QRecordStream, ) (int, error) { dstTableName := config.DestinationTableIdentifier - stagingPath := s.connector.credsProvider.BucketPath + stagingPath := s.credsProvider.BucketPath startTime := time.Now() avroSchema, err := s.getAvroSchema(dstTableName, stream.Schema()) @@ -111,7 +111,7 @@ func (s *ClickHouseAvroSyncMethod) SyncQRepRecords( return 0, err } - avroFile, err := s.writeToAvroFile(ctx, stream, avroSchema, partition.PartitionId, config.FlowJobName) + avroFile, err := s.writeToAvroFile(ctx, config.Env, stream, avroSchema, partition.PartitionId, config.FlowJobName) if err != nil { return 0, err } @@ -121,13 +121,13 @@ func (s *ClickHouseAvroSyncMethod) SyncQRepRecords( return 0, err } - creds, err := s.connector.credsProvider.Provider.Retrieve(ctx) + creds, err := s.credsProvider.Provider.Retrieve(ctx) if err != nil { return 0, err } - endpoint := s.connector.credsProvider.Provider.GetEndpointURL() - region := s.connector.credsProvider.Provider.GetRegion() + endpoint := s.credsProvider.Provider.GetEndpointURL() + region := s.credsProvider.Provider.GetRegion() avroFileUrl := utils.FileURLForS3Service(endpoint, region, s3o.Bucket, avroFile.FilePath) selector := make([]string, 0, len(dstTableSchema)) for _, col := range dstTableSchema { @@ -151,13 +151,13 @@ func (s *ClickHouseAvroSyncMethod) SyncQRepRecords( config.DestinationTableIdentifier, selectorStr, selectorStr, avroFileUrl, creds.AWS.AccessKeyID, creds.AWS.SecretAccessKey, sessionTokenPart) - if err := s.connector.database.Exec(ctx, query); err != nil { - s.connector.logger.Error("Failed to insert into select for ClickHouse", slog.Any("error", err)) + if err := s.database.Exec(ctx, query); err != nil { + s.logger.Error("Failed to insert into select for ClickHouse", slog.Any("error", err)) return 0, err } - if err := s.connector.FinishQRepPartition(ctx, partition, config.FlowJobName, startTime); err != nil { - s.connector.logger.Error("Failed to finish QRep partition", slog.Any("error", err)) + if err := s.FinishQRepPartition(ctx, partition, config.FlowJobName, startTime); err != nil { + s.logger.Error("Failed to finish QRep partition", slog.Any("error", err)) return 0, err } @@ -177,12 +177,13 @@ func (s *ClickHouseAvroSyncMethod) getAvroSchema( func (s *ClickHouseAvroSyncMethod) writeToAvroFile( ctx context.Context, + env map[string]string, stream *model.QRecordStream, avroSchema *model.QRecordAvroSchemaDefinition, identifierForFile string, flowJobName string, ) (*avro.AvroFile, error) { - stagingPath := s.connector.credsProvider.BucketPath + stagingPath := s.credsProvider.BucketPath ocfWriter := avro.NewPeerDBOCFWriter(stream, avroSchema, avro.CompressZstd, protos.DBType_CLICKHOUSE) s3o, err := utils.NewS3BucketAndPrefix(stagingPath) if err != nil { @@ -191,7 +192,7 @@ func (s *ClickHouseAvroSyncMethod) writeToAvroFile( s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro.zst", s3o.Prefix, flowJobName, identifierForFile) s3AvroFileKey = strings.Trim(s3AvroFileKey, "/") - avroFile, err := ocfWriter.WriteRecordsToS3(ctx, s3o.Bucket, s3AvroFileKey, s.connector.credsProvider.Provider) + avroFile, err := ocfWriter.WriteRecordsToS3(ctx, env, s3o.Bucket, s3AvroFileKey, s.credsProvider.Provider) if err != nil { return nil, fmt.Errorf("failed to write records to S3: %w", err) } diff --git a/flow/connectors/clickhouse/s3_stage.go b/flow/connectors/clickhouse/s3_stage.go index b4ca7d71c..5f5eb899a 100644 --- a/flow/connectors/clickhouse/s3_stage.go +++ b/flow/connectors/clickhouse/s3_stage.go @@ -6,19 +6,12 @@ import ( "fmt" "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" utils "github.com/PeerDB-io/peer-flow/connectors/utils/avro" "github.com/PeerDB-io/peer-flow/peerdbenv" ) -type ClickHouseS3Stage struct{} - -func NewClickHouseS3Stage() *ClickHouseS3Stage { - return &ClickHouseS3Stage{} -} - -func (c *ClickHouseS3Stage) SetAvroStage( +func SetAvroStage( ctx context.Context, flowJobName string, syncBatchID int64, @@ -29,36 +22,36 @@ func (c *ClickHouseS3Stage) SetAvroStage( return fmt.Errorf("failed to marshal avro file: %w", err) } - conn, err := c.getConn(ctx) + conn, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) if err != nil { return fmt.Errorf("failed to get connection: %w", err) } - _, err = conn.Exec(ctx, ` + if _, err := conn.Exec(ctx, ` INSERT INTO ch_s3_stage (flow_job_name, sync_batch_id, avro_file) VALUES ($1, $2, $3) ON CONFLICT (flow_job_name, sync_batch_id) - DO UPDATE SET avro_file = $3, created_at = CURRENT_TIMESTAMP - `, flowJobName, syncBatchID, avroFileJSON) - if err != nil { + DO UPDATE SET avro_file = $3, created_at = CURRENT_TIMESTAMP`, + flowJobName, syncBatchID, avroFileJSON, + ); err != nil { return fmt.Errorf("failed to set avro stage: %w", err) } return nil } -func (c *ClickHouseS3Stage) GetAvroStage(ctx context.Context, flowJobName string, syncBatchID int64) (*utils.AvroFile, error) { - conn, err := c.getConn(ctx) +func GetAvroStage(ctx context.Context, flowJobName string, syncBatchID int64) (*utils.AvroFile, error) { + conn, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) if err != nil { return nil, fmt.Errorf("failed to get connection: %w", err) } var avroFileJSON []byte - err = conn.QueryRow(ctx, ` + if err := conn.QueryRow(ctx, ` SELECT avro_file FROM ch_s3_stage - WHERE flow_job_name = $1 AND sync_batch_id = $2 - `, flowJobName, syncBatchID).Scan(&avroFileJSON) - if err != nil { + WHERE flow_job_name = $1 AND sync_batch_id = $2`, + flowJobName, syncBatchID, + ).Scan(&avroFileJSON); err != nil { if err == pgx.ErrNoRows { return nil, fmt.Errorf("no avro stage found for flow job %s and sync batch %d", flowJobName, syncBatchID) } @@ -72,12 +65,3 @@ func (c *ClickHouseS3Stage) GetAvroStage(ctx context.Context, flowJobName string return &avroFile, nil } - -func (c *ClickHouseS3Stage) getConn(ctx context.Context) (*pgxpool.Pool, error) { - conn, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) - if err != nil { - return nil, fmt.Errorf("unable to create catalog connection pool: %w", err) - } - - return conn, nil -} diff --git a/flow/connectors/external_metadata/store.go b/flow/connectors/external_metadata/store.go index 515b622ee..f253bf228 100644 --- a/flow/connectors/external_metadata/store.go +++ b/flow/connectors/external_metadata/store.go @@ -172,11 +172,10 @@ func (p *PostgresMetadata) FinishBatch(ctx context.Context, jobName string, sync func (p *PostgresMetadata) UpdateNormalizeBatchID(ctx context.Context, jobName string, batchID int64) error { p.logger.Info("updating normalize batch id for job", slog.Int64("batchID", batchID)) - _, err := p.pool.Exec(ctx, - `UPDATE `+lastSyncStateTableName+ - ` SET normalize_batch_id=$2 WHERE job_name=$1`, jobName, batchID) - if err != nil { - p.logger.Error("failed to update normalize batch id", slog.Any("error", err)) + if _, err := p.pool.Exec(ctx, + `UPDATE `+lastSyncStateTableName+` SET normalize_batch_id=$2 WHERE job_name=$1`, jobName, batchID, + ); err != nil { + p.logger.Error("failed to update normalize batch id", slog.Int64("batchID", batchID), slog.Any("error", err)) return err } diff --git a/flow/connectors/postgres/cdc.go b/flow/connectors/postgres/cdc.go index a355cfa00..9f4bd0b96 100644 --- a/flow/connectors/postgres/cdc.go +++ b/flow/connectors/postgres/cdc.go @@ -67,9 +67,9 @@ func (c *PostgresConnector) NewPostgresCDCSource(cdcConfig *PostgresCDCConfig) * relationMessageMapping: cdcConfig.RelationMessageMapping, slot: cdcConfig.Slot, publication: cdcConfig.Publication, - childToParentRelIDMapping: cdcConfig.ChildToParentRelIDMap, typeMap: pgtype.NewMap(), commitLock: nil, + childToParentRelIDMapping: cdcConfig.ChildToParentRelIDMap, catalogPool: cdcConfig.CatalogPool, flowJobName: cdcConfig.FlowJobName, hushWarnUnhandledMessageType: make(map[pglogrepl.MessageType]struct{}), @@ -85,21 +85,18 @@ func GetChildToParentRelIDMap(ctx context.Context, conn *pgx.Conn) (map[uint32]u WHERE parent.relkind='p'; ` - rows, err := conn.Query(ctx, query, pgx.QueryExecModeSimpleProtocol) + rows, err := conn.Query(ctx, query) if err != nil { return nil, fmt.Errorf("error querying for child to parent relid map: %w", err) } - defer rows.Close() childToParentRelIDMap := make(map[uint32]uint32) - var parentRelID pgtype.Uint32 - var childRelID pgtype.Uint32 - for rows.Next() { - err := rows.Scan(&parentRelID, &childRelID) - if err != nil { - return nil, fmt.Errorf("error scanning child to parent relid map: %w", err) - } + var parentRelID, childRelID pgtype.Uint32 + if _, err := pgx.ForEachRow(rows, []any{&parentRelID, &childRelID}, func() error { childToParentRelIDMap[childRelID.Uint32] = parentRelID.Uint32 + return nil + }); err != nil { + return nil, fmt.Errorf("error iterating over child to parent relid map: %w", err) } return childToParentRelIDMap, nil @@ -114,6 +111,7 @@ type replProcessor[Items model.Items] interface { p *PostgresCDCSource, tuple *pglogrepl.TupleDataColumn, col *pglogrepl.RelationMessageColumn, + customTypeMapping map[uint32]string, ) error } @@ -128,6 +126,7 @@ func (pgProcessor) Process( p *PostgresCDCSource, tuple *pglogrepl.TupleDataColumn, col *pglogrepl.RelationMessageColumn, + customTypeMapping map[uint32]string, ) error { switch tuple.DataType { case 'n': // null @@ -158,13 +157,14 @@ func (qProcessor) Process( p *PostgresCDCSource, tuple *pglogrepl.TupleDataColumn, col *pglogrepl.RelationMessageColumn, + customTypeMapping map[uint32]string, ) error { switch tuple.DataType { case 'n': // null items.AddColumn(col.Name, qvalue.QValueNull(qvalue.QValueKindInvalid)) case 't': // text // bytea also appears here as a hex - data, err := p.decodeColumnData(tuple.Data, col.DataType, pgtype.TextFormatCode) + data, err := p.decodeColumnData(tuple.Data, col.DataType, pgtype.TextFormatCode, customTypeMapping) if err != nil { p.logger.Error("error decoding text column data", slog.Any("error", err), slog.String("columnName", col.Name), slog.Int64("dataType", int64(col.DataType))) @@ -172,7 +172,7 @@ func (qProcessor) Process( } items.AddColumn(col.Name, data) case 'b': // binary - data, err := p.decodeColumnData(tuple.Data, col.DataType, pgtype.BinaryFormatCode) + data, err := p.decodeColumnData(tuple.Data, col.DataType, pgtype.BinaryFormatCode, customTypeMapping) if err != nil { return fmt.Errorf("error decoding binary column data: %w", err) } @@ -189,6 +189,7 @@ func processTuple[Items model.Items]( tuple *pglogrepl.TupleData, rel *pglogrepl.RelationMessage, exclude map[string]struct{}, + customTypeMapping map[uint32]string, ) (Items, map[string]struct{}, error) { // if the tuple is nil, return an empty map if tuple == nil { @@ -208,7 +209,7 @@ func processTuple[Items model.Items]( unchangedToastColumns = make(map[string]struct{}) } unchangedToastColumns[rcol.Name] = struct{}{} - } else if err := processor.Process(items, p, tcol, rcol); err != nil { + } else if err := processor.Process(items, p, tcol, rcol, customTypeMapping); err != nil { var none Items return none, nil, err } @@ -216,7 +217,9 @@ func processTuple[Items model.Items]( return items, unchangedToastColumns, nil } -func (p *PostgresCDCSource) decodeColumnData(data []byte, dataType uint32, formatCode int16) (qvalue.QValue, error) { +func (p *PostgresCDCSource) decodeColumnData(data []byte, dataType uint32, + formatCode int16, customTypeMapping map[uint32]string, +) (qvalue.QValue, error) { var parsedData any var err error if dt, ok := p.typeMap.TypeForOID(dataType); ok { @@ -260,7 +263,7 @@ func (p *PostgresCDCSource) decodeColumnData(data []byte, dataType uint32, forma return retVal, nil } - typeName, ok := p.customTypesMapping[dataType] + typeName, ok := customTypeMapping[dataType] if ok { customQKind := customTypeToQKind(typeName) switch customQKind { @@ -634,17 +637,21 @@ func processMessage[Items model.Items]( if err != nil { return nil, fmt.Errorf("error parsing logical message: %w", err) } + customTypeMapping, err := p.fetchCustomTypeMapping(ctx) + if err != nil { + return nil, err + } switch msg := logicalMsg.(type) { case *pglogrepl.BeginMessage: logger.Debug("BeginMessage", slog.Any("FinalLSN", msg.FinalLSN), slog.Any("XID", msg.Xid)) p.commitLock = msg case *pglogrepl.InsertMessage: - return processInsertMessage(p, xld.WALStart, msg, processor) + return processInsertMessage(p, xld.WALStart, msg, processor, customTypeMapping) case *pglogrepl.UpdateMessage: - return processUpdateMessage(p, xld.WALStart, msg, processor) + return processUpdateMessage(p, xld.WALStart, msg, processor, customTypeMapping) case *pglogrepl.DeleteMessage: - return processDeleteMessage(p, xld.WALStart, msg, processor) + return processDeleteMessage(p, xld.WALStart, msg, processor, customTypeMapping) case *pglogrepl.CommitMessage: // for a commit message, update the last checkpoint id for the record batch. logger.Debug("CommitMessage", slog.Any("CommitLSN", msg.CommitLSN), slog.Any("TransactionEndLSN", msg.TransactionEndLSN)) @@ -694,6 +701,7 @@ func processInsertMessage[Items model.Items]( lsn pglogrepl.LSN, msg *pglogrepl.InsertMessage, processor replProcessor[Items], + customTypeMapping map[uint32]string, ) (model.Record[Items], error) { relID := p.getParentRelIDIfPartitioned(msg.RelationID) @@ -710,7 +718,7 @@ func processInsertMessage[Items model.Items]( return nil, fmt.Errorf("unknown relation id: %d", relID) } - items, _, err := processTuple(processor, p, msg.Tuple, rel, p.tableNameMapping[tableName].Exclude) + items, _, err := processTuple(processor, p, msg.Tuple, rel, p.tableNameMapping[tableName].Exclude, customTypeMapping) if err != nil { return nil, fmt.Errorf("error converting tuple to map: %w", err) } @@ -729,6 +737,7 @@ func processUpdateMessage[Items model.Items]( lsn pglogrepl.LSN, msg *pglogrepl.UpdateMessage, processor replProcessor[Items], + customTypeMapping map[uint32]string, ) (model.Record[Items], error) { relID := p.getParentRelIDIfPartitioned(msg.RelationID) @@ -745,13 +754,14 @@ func processUpdateMessage[Items model.Items]( return nil, fmt.Errorf("unknown relation id: %d", relID) } - oldItems, _, err := processTuple(processor, p, msg.OldTuple, rel, p.tableNameMapping[tableName].Exclude) + oldItems, _, err := processTuple(processor, p, msg.OldTuple, rel, + p.tableNameMapping[tableName].Exclude, customTypeMapping) if err != nil { return nil, fmt.Errorf("error converting old tuple to map: %w", err) } newItems, unchangedToastColumns, err := processTuple( - processor, p, msg.NewTuple, rel, p.tableNameMapping[tableName].Exclude) + processor, p, msg.NewTuple, rel, p.tableNameMapping[tableName].Exclude, customTypeMapping) if err != nil { return nil, fmt.Errorf("error converting new tuple to map: %w", err) } @@ -785,6 +795,7 @@ func processDeleteMessage[Items model.Items]( lsn pglogrepl.LSN, msg *pglogrepl.DeleteMessage, processor replProcessor[Items], + customTypeMapping map[uint32]string, ) (model.Record[Items], error) { relID := p.getParentRelIDIfPartitioned(msg.RelationID) @@ -801,7 +812,8 @@ func processDeleteMessage[Items model.Items]( return nil, fmt.Errorf("unknown relation id: %d", relID) } - items, _, err := processTuple(processor, p, msg.OldTuple, rel, p.tableNameMapping[tableName].Exclude) + items, _, err := processTuple(processor, p, msg.OldTuple, rel, + p.tableNameMapping[tableName].Exclude, customTypeMapping) if err != nil { return nil, fmt.Errorf("error converting tuple to map: %w", err) } @@ -844,6 +856,10 @@ func processRelationMessage[Items model.Items]( slog.Uint64("relId", uint64(currRel.RelationID))) return nil, nil } + customTypeMapping, err := p.fetchCustomTypeMapping(ctx) + if err != nil { + return nil, err + } // retrieve current TableSchema for table changed, mapping uses dst table name as key, need to translate source name currRelDstInfo, ok := p.tableNameMapping[currRelName] @@ -867,7 +883,7 @@ func processRelationMessage[Items model.Items]( case protos.TypeSystem_Q: qKind := p.postgresOIDToQValueKind(column.DataType) if qKind == qvalue.QValueKindInvalid { - typeName, ok := p.customTypesMapping[column.DataType] + typeName, ok := customTypeMapping[column.DataType] if ok { qKind = customTypeToQKind(typeName) } diff --git a/flow/connectors/postgres/client.go b/flow/connectors/postgres/client.go index 2d480f780..70b0d15d1 100644 --- a/flow/connectors/postgres/client.go +++ b/flow/connectors/postgres/client.go @@ -376,8 +376,7 @@ func (c *PostgresConnector) createSlotAndPublication( } srcTableNames = append(srcTableNames, parsedSrcTableName.String()) } - err := c.CreatePublication(ctx, srcTableNames, publication) - if err != nil { + if err := c.CreatePublication(ctx, srcTableNames, publication); err != nil { signal.SlotCreated <- SlotCreationResult{Err: err} return } @@ -395,7 +394,7 @@ func (c *PostgresConnector) createSlotAndPublication( c.logger.Warn(fmt.Sprintf("Creating replication slot '%s'", slot)) // THIS IS NOT IN A TX! - if _, err = conn.Exec(ctx, "SET idle_in_transaction_session_timeout=0"); err != nil { + if _, err := conn.Exec(ctx, "SET idle_in_transaction_session_timeout=0"); err != nil { signal.SlotCreated <- SlotCreationResult{Err: fmt.Errorf("[slot] error setting idle_in_transaction_session_timeout: %w", err)} return } @@ -551,7 +550,14 @@ func (c *PostgresConnector) jobMetadataExists(ctx context.Context, jobName strin } func (c *PostgresConnector) MajorVersion(ctx context.Context) (shared.PGVersion, error) { - return shared.GetMajorVersion(ctx, c.conn) + if c.pgVersion == 0 { + pgVersion, err := shared.GetMajorVersion(ctx, c.conn) + if err != nil { + return 0, err + } + c.pgVersion = pgVersion + } + return c.pgVersion, nil } func (c *PostgresConnector) updateSyncMetadata(ctx context.Context, flowJobName string, lastCP int64, syncBatchID int64, diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index d0087d3be..e685b5c12 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -17,6 +17,7 @@ import ( "github.com/jackc/pgx/v5/pgtype" "github.com/jackc/pgx/v5/pgxpool" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.temporal.io/sdk/log" "go.temporal.io/sdk/temporal" @@ -39,12 +40,13 @@ type PostgresConnector struct { conn *pgx.Conn replConn *pgx.Conn replState *ReplState - customTypesMapping map[uint32]string + customTypeMapping map[uint32]string hushWarnOID map[uint32]struct{} relationMessageMapping model.RelationMessageMapping connStr string metadataSchema string replLock sync.Mutex + pgVersion shared.PGVersion } type ReplState struct { @@ -87,33 +89,39 @@ func NewPostgresConnector(ctx context.Context, env map[string]string, pgConfig * return nil, fmt.Errorf("failed to create connection: %w", err) } - customTypeMap, err := shared.GetCustomDataTypes(ctx, conn) - if err != nil { - logger.Error("failed to get custom type map", slog.Any("error", err)) - return nil, fmt.Errorf("failed to get custom type map: %w", err) - } - metadataSchema := "_peerdb_internal" if pgConfig.MetadataSchema != nil { metadataSchema = *pgConfig.MetadataSchema } return &PostgresConnector{ - connStr: connectionString, + logger: logger, config: pgConfig, ssh: tunnel, conn: conn, replConn: nil, replState: nil, - replLock: sync.Mutex{}, - customTypesMapping: customTypeMap, - metadataSchema: metadataSchema, + customTypeMapping: nil, hushWarnOID: make(map[uint32]struct{}), - logger: logger, relationMessageMapping: make(model.RelationMessageMapping), + connStr: connectionString, + metadataSchema: metadataSchema, + replLock: sync.Mutex{}, + pgVersion: 0, }, nil } +func (c *PostgresConnector) fetchCustomTypeMapping(ctx context.Context) (map[uint32]string, error) { + if c.customTypeMapping == nil { + customTypeMapping, err := shared.GetCustomDataTypes(ctx, c.conn) + if err != nil { + return nil, err + } + c.customTypeMapping = customTypeMapping + } + return c.customTypeMapping, nil +} + func (c *PostgresConnector) CreateReplConn(ctx context.Context) (*pgx.Conn, error) { // create a separate connection pool for non-replication queries as replication connections cannot // be used for extended query protocol, i.e. prepared statements @@ -129,6 +137,7 @@ func (c *PostgresConnector) CreateReplConn(ctx context.Context) (*pgx.Conn, erro replConfig.Config.RuntimeParams["replication"] = "database" replConfig.Config.RuntimeParams["bytea_output"] = "hex" replConfig.Config.RuntimeParams["intervalstyle"] = "postgres" + replConfig.DefaultQueryExecMode = pgx.QueryExecModeSimpleProtocol conn, err := c.ssh.NewPostgresConnFromConfig(ctx, replConfig) if err != nil { @@ -168,6 +177,7 @@ func (c *PostgresConnector) MaybeStartReplication( slotName string, publicationName string, lastOffset int64, + pgVersion shared.PGVersion, ) error { if c.replState != nil && (c.replState.Offset != lastOffset || c.replState.Slot != slotName || @@ -180,7 +190,7 @@ func (c *PostgresConnector) MaybeStartReplication( } if c.replState == nil { - replicationOpts, err := c.replicationOptions(ctx, publicationName) + replicationOpts, err := c.replicationOptions(publicationName, pgVersion) if err != nil { return fmt.Errorf("error getting replication options: %w", err) } @@ -210,7 +220,8 @@ func (c *PostgresConnector) MaybeStartReplication( return nil } -func (c *PostgresConnector) replicationOptions(ctx context.Context, publicationName string) (pglogrepl.StartReplicationOptions, error) { +func (c *PostgresConnector) replicationOptions(publicationName string, pgVersion shared.PGVersion, +) (pglogrepl.StartReplicationOptions, error) { pluginArguments := append(make([]string, 0, 3), "proto_version '1'") if publicationName != "" { @@ -220,10 +231,7 @@ func (c *PostgresConnector) replicationOptions(ctx context.Context, publicationN return pglogrepl.StartReplicationOptions{}, errors.New("publication name is not set") } - pgversion, err := c.MajorVersion(ctx) - if err != nil { - return pglogrepl.StartReplicationOptions{}, err - } else if pgversion >= shared.POSTGRES_14 { + if pgVersion >= shared.POSTGRES_14 { pluginArguments = append(pluginArguments, "messages 'true'") } @@ -380,12 +388,21 @@ func pullCore[Items model.Items]( c.logger.Info("PullRecords: performed checks for slot and publication") - childToParentRelIDMap, err := GetChildToParentRelIDMap(ctx, c.conn) + // cached, since this connector is reused + pgVersion, err := c.MajorVersion(ctx) if err != nil { - return fmt.Errorf("error getting child to parent relid map: %w", err) + return err + } + var childToParentRelIDMap map[uint32]uint32 + // only initialize the map if needed, escape hatch because custom publications may not have the right setting + if req.OverridePublicationName != "" || pgVersion < shared.POSTGRES_13 { + childToParentRelIDMap, err = GetChildToParentRelIDMap(ctx, c.conn) + if err != nil { + return fmt.Errorf("error getting child to parent relid map: %w", err) + } } - if err := c.MaybeStartReplication(ctx, slotName, publicationName, req.LastOffset); err != nil { + if err := c.MaybeStartReplication(ctx, slotName, publicationName, req.LastOffset, pgVersion); err != nil { // in case of Aurora error ERROR: replication slots cannot be used on RO (Read Only) node (SQLSTATE 55000) if shared.IsSQLStateError(err, pgerrcode.ObjectNotInPrerequisiteState) && strings.Contains(err.Error(), "replication slots cannot be used on RO (Read Only) node") { @@ -396,15 +413,15 @@ func pullCore[Items model.Items]( } cdc := c.NewPostgresCDCSource(&PostgresCDCConfig{ + CatalogPool: catalogPool, SrcTableIDNameMapping: req.SrcTableIDNameMapping, - Slot: slotName, - Publication: publicationName, TableNameMapping: req.TableNameMapping, TableNameSchemaMapping: req.TableNameSchemaMapping, ChildToParentRelIDMap: childToParentRelIDMap, - CatalogPool: catalogPool, - FlowJobName: req.FlowJobName, RelationMessageMapping: c.relationMessageMapping, + FlowJobName: req.FlowJobName, + Slot: slotName, + Publication: publicationName, }) if err := PullCdcRecords(ctx, cdc, req, processor, &c.replLock); err != nil { @@ -766,6 +783,10 @@ func (c *PostgresConnector) getTableSchemaForTable( if err != nil { return nil, err } + customTypeMapping, err := c.fetchCustomTypeMapping(ctx) + if err != nil { + return nil, err + } relID, err := c.getRelIDForTable(ctx, schemaTable) if err != nil { @@ -811,7 +832,7 @@ func (c *PostgresConnector) getTableSchemaForTable( case protos.TypeSystem_PG: colType = c.postgresOIDToName(fieldDescription.DataTypeOID) if colType == "" { - typeName, ok := c.customTypesMapping[fieldDescription.DataTypeOID] + typeName, ok := customTypeMapping[fieldDescription.DataTypeOID] if !ok { return nil, fmt.Errorf("error getting type name for %d", fieldDescription.DataTypeOID) } @@ -820,7 +841,7 @@ func (c *PostgresConnector) getTableSchemaForTable( case protos.TypeSystem_Q: qColType := c.postgresOIDToQValueKind(fieldDescription.DataTypeOID) if qColType == qvalue.QValueKindInvalid { - typeName, ok := c.customTypesMapping[fieldDescription.DataTypeOID] + typeName, ok := customTypeMapping[fieldDescription.DataTypeOID] if ok { qColType = customTypeToQKind(typeName) } else { @@ -891,15 +912,17 @@ func (c *PostgresConnector) SetupNormalizedTable( if tableAlreadyExists { c.logger.Info("[postgres] table already exists, skipping", slog.String("table", tableIdentifier)) - if config.IsResync { - err := c.ExecuteCommand(ctx, fmt.Sprintf(dropTableIfExistsSQL, - QuoteIdentifier(parsedNormalizedTable.Schema), - QuoteIdentifier(parsedNormalizedTable.Table))) - if err != nil { - return false, fmt.Errorf("error while dropping _resync table: %w", err) - } + if !config.IsResync { + return true, nil + } + + err := c.ExecuteCommand(ctx, fmt.Sprintf(dropTableIfExistsSQL, + QuoteIdentifier(parsedNormalizedTable.Schema), + QuoteIdentifier(parsedNormalizedTable.Table))) + if err != nil { + return false, fmt.Errorf("error while dropping _resync table: %w", err) } - return true, nil + c.logger.Info("[postgres] dropped resync table for resync", slog.String("resyncTable", parsedNormalizedTable.String())) } // convert the column names and types to Postgres types @@ -1096,7 +1119,7 @@ func (c *PostgresConnector) SetupReplication(ctx context.Context, signal SlotSig return } - tableNameMapping := make(map[string]model.NameAndExclude) + tableNameMapping := make(map[string]model.NameAndExclude, len(req.TableNameMapping)) for k, v := range req.TableNameMapping { tableNameMapping[k] = model.NameAndExclude{ Name: v, @@ -1110,9 +1133,9 @@ func (c *PostgresConnector) SetupReplication(ctx context.Context, signal SlotSig func (c *PostgresConnector) PullFlowCleanup(ctx context.Context, jobName string) error { // Slotname would be the job name prefixed with "peerflow_slot_" slotName := "peerflow_slot_" + jobName - _, err := c.conn.Exec(ctx, `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots - WHERE slot_name=$1`, slotName) - if err != nil { + if _, err := c.conn.Exec( + ctx, `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name=$1`, slotName, + ); err != nil { return fmt.Errorf("error dropping replication slot: %w", err) } @@ -1122,14 +1145,14 @@ func (c *PostgresConnector) PullFlowCleanup(ctx context.Context, jobName string) // as drop publication if exists requires permissions // for a publication which we did not create via peerdb user var publicationExists bool - err = c.conn.QueryRow(ctx, "SELECT EXISTS(SELECT 1 FROM pg_publication WHERE pubname=$1)", publicationName).Scan(&publicationExists) - if err != nil { + if err := c.conn.QueryRow( + ctx, "SELECT EXISTS(SELECT 1 FROM pg_publication WHERE pubname=$1)", publicationName, + ).Scan(&publicationExists); err != nil { return fmt.Errorf("error checking if publication exists: %w", err) } if publicationExists { - _, err = c.conn.Exec(ctx, "DROP PUBLICATION IF EXISTS "+publicationName) - if err != nil { + if _, err := c.conn.Exec(ctx, "DROP PUBLICATION IF EXISTS "+publicationName); err != nil { return fmt.Errorf("error dropping publication: %w", err) } } @@ -1144,9 +1167,9 @@ func (c *PostgresConnector) SyncFlowCleanup(ctx context.Context, jobName string) } defer shared.RollbackTx(syncFlowCleanupTx, c.logger) - _, err = c.execWithLoggingTx(ctx, fmt.Sprintf(dropTableIfExistsSQL, c.metadataSchema, - getRawTableIdentifier(jobName)), syncFlowCleanupTx) - if err != nil { + if _, err := c.execWithLoggingTx(ctx, + fmt.Sprintf(dropTableIfExistsSQL, c.metadataSchema, getRawTableIdentifier(jobName)), syncFlowCleanupTx, + ); err != nil { return fmt.Errorf("unable to drop raw table: %w", err) } @@ -1162,10 +1185,10 @@ func (c *PostgresConnector) SyncFlowCleanup(ctx context.Context, jobName string) } } - err = syncFlowCleanupTx.Commit(ctx) - if err != nil { + if err := syncFlowCleanupTx.Commit(ctx); err != nil { return fmt.Errorf("unable to commit transaction for sync flow cleanup: %w", err) } + return nil } @@ -1192,11 +1215,17 @@ func (c *PostgresConnector) HandleSlotInfo( logger.Info(fmt.Sprintf("Checking %s lag for %s", alertKeys.SlotName, alertKeys.PeerName), slog.Float64("LagInMB", float64(slotInfo[0].LagInMb))) alerter.AlertIfSlotLag(ctx, alertKeys, slotInfo[0]) - slotMetricGauges.SlotLagGauge.Set(float64(slotInfo[0].LagInMb), attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.SlotNameKey, alertKeys.SlotName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + + if slotMetricGauges.SlotLagGauge != nil { + slotMetricGauges.SlotLagGauge.Record(ctx, float64(slotInfo[0].LagInMb), metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.SlotNameKey, alertKeys.SlotName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID())), + )) + } else { + logger.Warn("warning: slotMetricGauges.SlotLagGauge is nil") + } // Also handles alerts for PeerDB user connections exceeding a given limit here res, err := getOpenConnectionsForUser(ctx, c.conn, c.config.User) @@ -1205,26 +1234,38 @@ func (c *PostgresConnector) HandleSlotInfo( return err } alerter.AlertIfOpenConnections(ctx, alertKeys, res) - slotMetricGauges.OpenConnectionsGauge.Set(res.CurrentOpenConnections, attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + if slotMetricGauges.OpenConnectionsGauge != nil { + slotMetricGauges.OpenConnectionsGauge.Record(ctx, res.CurrentOpenConnections, metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + ))) + } else { + logger.Warn("warning: slotMetricGauges.OpenConnectionsGauge is nil") + } replicationRes, err := getOpenReplicationConnectionsForUser(ctx, c.conn, c.config.User) if err != nil { logger.Warn("warning: failed to get current open replication connections", "error", err) return err } - slotMetricGauges.OpenReplicationConnectionsGauge.Set(replicationRes.CurrentOpenConnections, attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + if slotMetricGauges.OpenReplicationConnectionsGauge != nil { + slotMetricGauges.OpenReplicationConnectionsGauge.Record(ctx, replicationRes.CurrentOpenConnections, + metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + )), + ) + } else { + logger.Warn("warning: slotMetricGauges.OpenReplicationConnectionsGauge is nil") + } var intervalSinceLastNormalize *time.Duration - err = alerter.CatalogPool.QueryRow(ctx, "SELECT now()-max(end_time) FROM peerdb_stats.cdc_batches WHERE flow_name=$1", - alertKeys.FlowName).Scan(&intervalSinceLastNormalize) - if err != nil { + if err := alerter.CatalogPool.QueryRow( + ctx, "SELECT now()-max(end_time) FROM peerdb_stats.cdc_batches WHERE flow_name=$1", alertKeys.FlowName, + ).Scan(&intervalSinceLastNormalize); err != nil { logger.Warn("failed to get interval since last normalize", slog.Any("error", err)) } // what if the first normalize errors out/hangs? @@ -1233,10 +1274,17 @@ func (c *PostgresConnector) HandleSlotInfo( return nil } if intervalSinceLastNormalize != nil { - slotMetricGauges.IntervalSinceLastNormalizeGauge.Set(intervalSinceLastNormalize.Seconds(), attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + if slotMetricGauges.IntervalSinceLastNormalizeGauge != nil { + slotMetricGauges.IntervalSinceLastNormalizeGauge.Record(ctx, intervalSinceLastNormalize.Seconds(), + metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + )), + ) + } else { + logger.Warn("warning: slotMetricGauges.IntervalSinceLastNormalizeGauge is nil") + } alerter.AlertIfTooLongSinceLastNormalize(ctx, alertKeys, *intervalSinceLastNormalize) } @@ -1244,12 +1292,9 @@ func (c *PostgresConnector) HandleSlotInfo( } func getOpenConnectionsForUser(ctx context.Context, conn *pgx.Conn, user string) (*protos.GetOpenConnectionsForUserResult, error) { - row := conn.QueryRow(ctx, getNumConnectionsForUser, user) - // COUNT() returns BIGINT var result pgtype.Int8 - err := row.Scan(&result) - if err != nil { + if err := conn.QueryRow(ctx, getNumConnectionsForUser, user).Scan(&result); err != nil { return nil, fmt.Errorf("error while reading result row: %w", err) } @@ -1260,12 +1305,9 @@ func getOpenConnectionsForUser(ctx context.Context, conn *pgx.Conn, user string) } func getOpenReplicationConnectionsForUser(ctx context.Context, conn *pgx.Conn, user string) (*protos.GetOpenConnectionsForUserResult, error) { - row := conn.QueryRow(ctx, getNumReplicationConnections, user) - // COUNT() returns BIGINT var result pgtype.Int8 - err := row.Scan(&result) - if err != nil { + if err := conn.QueryRow(ctx, getNumReplicationConnections, user).Scan(&result); err != nil { return nil, fmt.Errorf("error while reading result row: %w", err) } @@ -1444,7 +1486,7 @@ func (c *PostgresConnector) RenameTables( } // rename the src table to dst - _, err = c.execWithLoggingTx(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", src, dstTable.Table), renameTablesTx) + _, err = c.execWithLoggingTx(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", src, QuoteIdentifier(dstTable.Table)), renameTablesTx) if err != nil { return nil, fmt.Errorf("unable to rename table %s to %s: %w", src, dst, err) } @@ -1483,8 +1525,7 @@ func (c *PostgresConnector) RemoveTableEntriesFromRawTable( func (c *PostgresConnector) GetVersion(ctx context.Context) (string, error) { var version string - err := c.conn.QueryRow(ctx, "SELECT version()").Scan(&version) - if err != nil { + if err := c.conn.QueryRow(ctx, "SELECT version()").Scan(&version); err != nil { return "", err } c.logger.Info("[postgres] version", slog.String("version", version)) diff --git a/flow/connectors/postgres/qrep.go b/flow/connectors/postgres/qrep.go index 2a65ec353..1cd2cd595 100644 --- a/flow/connectors/postgres/qrep.go +++ b/flow/connectors/postgres/qrep.go @@ -84,7 +84,6 @@ func (c *PostgresConnector) getNumRowsPartitions( config *protos.QRepConfig, last *protos.QRepPartition, ) ([]*protos.QRepPartition, error) { - var err error numRowsPerPartition := int64(config.NumRowsPerPartition) quotedWatermarkColumn := QuoteIdentifier(config.WatermarkColumn) @@ -116,7 +115,7 @@ func (c *PostgresConnector) getNumRowsPartitions( } var totalRows pgtype.Int8 - if err = row.Scan(&totalRows); err != nil { + if err := row.Scan(&totalRows); err != nil { return nil, fmt.Errorf("failed to query for total rows: %w", err) } @@ -177,19 +176,16 @@ func (c *PostgresConnector) getNumRowsPartitions( return nil, fmt.Errorf("failed to scan row: %w", err) } - err = partitionHelper.AddPartition(start, end) - if err != nil { + if err := partitionHelper.AddPartition(start, end); err != nil { return nil, fmt.Errorf("failed to add partition: %w", err) } } - err = rows.Err() - if err != nil { + if err := rows.Err(); err != nil { return nil, fmt.Errorf("failed to read rows: %w", err) } - err = tx.Commit(ctx) - if err != nil { + if err := tx.Commit(ctx); err != nil { return nil, fmt.Errorf("failed to commit transaction: %w", err) } @@ -332,10 +328,15 @@ func corePullQRepRecords( sink QRepPullSink, ) (int, error) { partitionIdLog := slog.String(string(shared.PartitionIDKey), partition.PartitionId) + if partition.FullTablePartition { c.logger.Info("pulling full table partition", partitionIdLog) - executor := c.NewQRepQueryExecutorSnapshot(config.SnapshotName, config.FlowJobName, partition.PartitionId) - _, err := executor.ExecuteQueryIntoSink(ctx, sink, config.Query) + executor, err := c.NewQRepQueryExecutorSnapshot(ctx, config.SnapshotName, + config.FlowJobName, partition.PartitionId) + if err != nil { + return 0, fmt.Errorf("failed to create query executor: %w", err) + } + _, err = executor.ExecuteQueryIntoSink(ctx, sink, config.Query) return 0, err } c.logger.Info("Obtained ranges for partition for PullQRepStream", partitionIdLog) @@ -373,7 +374,11 @@ func corePullQRepRecords( return 0, err } - executor := c.NewQRepQueryExecutorSnapshot(config.SnapshotName, config.FlowJobName, partition.PartitionId) + executor, err := c.NewQRepQueryExecutorSnapshot(ctx, config.SnapshotName, config.FlowJobName, + partition.PartitionId) + if err != nil { + return 0, fmt.Errorf("failed to create query executor: %w", err) + } numRecords, err := executor.ExecuteQueryIntoSink(ctx, sink, query, rangeStart, rangeEnd) if err != nil { @@ -673,7 +678,11 @@ func pullXminRecordStream( queryArgs = []interface{}{strconv.FormatInt(partition.Range.Range.(*protos.PartitionRange_IntRange).IntRange.Start&0xffffffff, 10)} } - executor := c.NewQRepQueryExecutorSnapshot(config.SnapshotName, config.FlowJobName, partition.PartitionId) + executor, err := c.NewQRepQueryExecutorSnapshot(ctx, config.SnapshotName, + config.FlowJobName, partition.PartitionId) + if err != nil { + return 0, 0, fmt.Errorf("failed to create query executor: %w", err) + } numRecords, currentSnapshotXmin, err := executor.ExecuteQueryIntoSinkGettingCurrentSnapshotXmin( ctx, diff --git a/flow/connectors/postgres/qrep_bench_test.go b/flow/connectors/postgres/qrep_bench_test.go index d880343f4..777faf6e6 100644 --- a/flow/connectors/postgres/qrep_bench_test.go +++ b/flow/connectors/postgres/qrep_bench_test.go @@ -4,6 +4,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/PeerDB-io/peer-flow/peerdbenv" ) @@ -12,13 +14,12 @@ func BenchmarkQRepQueryExecutor(b *testing.B) { ctx := context.Background() connector, err := NewPostgresConnector(ctx, nil, peerdbenv.GetCatalogPostgresConfigFromEnv(ctx)) - if err != nil { - b.Fatalf("failed to create connection: %v", err) - } + require.NoError(b, err, "error while creating connector") defer connector.Close() // Create a new QRepQueryExecutor instance - qe := connector.NewQRepQueryExecutor("test flow", "test part") + qe, err := connector.NewQRepQueryExecutor(ctx, "test flow", "test part") + require.NoError(b, err, "error while creating QRepQueryExecutor") // Run the benchmark b.ResetTimer() @@ -28,8 +29,6 @@ func BenchmarkQRepQueryExecutor(b *testing.B) { // Execute the query and process the rows _, err := qe.ExecuteAndProcessQuery(ctx, query) - if err != nil { - b.Fatalf("failed to execute query: %v", err) - } + require.NoError(b, err, "error while executing query") } } diff --git a/flow/connectors/postgres/qrep_query_executor.go b/flow/connectors/postgres/qrep_query_executor.go index 2fa6ecd7f..f3c915c13 100644 --- a/flow/connectors/postgres/qrep_query_executor.go +++ b/flow/connectors/postgres/qrep_query_executor.go @@ -18,24 +18,35 @@ import ( type QRepQueryExecutor struct { *PostgresConnector - logger log.Logger - snapshot string - flowJobName string - partitionID string + logger log.Logger + customTypeMapping map[uint32]string + snapshot string + flowJobName string + partitionID string } -func (c *PostgresConnector) NewQRepQueryExecutor(flowJobName string, partitionID string) *QRepQueryExecutor { - return c.NewQRepQueryExecutorSnapshot("", flowJobName, partitionID) +func (c *PostgresConnector) NewQRepQueryExecutor(ctx context.Context, + flowJobName string, partitionID string, +) (*QRepQueryExecutor, error) { + return c.NewQRepQueryExecutorSnapshot(ctx, "", flowJobName, partitionID) } -func (c *PostgresConnector) NewQRepQueryExecutorSnapshot(snapshot string, flowJobName string, partitionID string) *QRepQueryExecutor { +func (c *PostgresConnector) NewQRepQueryExecutorSnapshot(ctx context.Context, + snapshot string, flowJobName string, partitionID string, +) (*QRepQueryExecutor, error) { + customTypeMapping, err := c.fetchCustomTypeMapping(ctx) + if err != nil { + c.logger.Error("[pg_query_executor] failed to fetch custom type mapping", slog.Any("error", err)) + return nil, fmt.Errorf("failed to fetch custom type mapping: %w", err) + } return &QRepQueryExecutor{ PostgresConnector: c, snapshot: snapshot, flowJobName: flowJobName, partitionID: partitionID, logger: log.With(c.logger, slog.String(string(shared.PartitionIDKey), partitionID)), - } + customTypeMapping: customTypeMapping, + }, nil } func (qe *QRepQueryExecutor) ExecuteQuery(ctx context.Context, query string, args ...interface{}) (pgx.Rows, error) { @@ -67,7 +78,7 @@ func (qe *QRepQueryExecutor) fieldDescriptionsToSchema(fds []pgconn.FieldDescrip cname := fd.Name ctype := qe.postgresOIDToQValueKind(fd.DataTypeOID) if ctype == qvalue.QValueKindInvalid { - typeName, ok := qe.customTypesMapping[fd.DataTypeOID] + typeName, ok := qe.customTypeMapping[fd.DataTypeOID] if ok { ctype = customTypeToQKind(typeName) } else { @@ -98,6 +109,7 @@ func (qe *QRepQueryExecutor) fieldDescriptionsToSchema(fds []pgconn.FieldDescrip } func (qe *QRepQueryExecutor) ProcessRows( + ctx context.Context, rows pgx.Rows, fieldDescriptions []pgconn.FieldDescription, ) (*model.QRecordBatch, error) { @@ -115,12 +127,13 @@ func (qe *QRepQueryExecutor) ProcessRows( } // Check for any errors encountered during iteration - if rows.Err() != nil { - return nil, fmt.Errorf("row iteration failed: %w", rows.Err()) + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("row iteration failed: %w", err) } + schema := qe.fieldDescriptionsToSchema(fieldDescriptions) batch := &model.QRecordBatch{ - Schema: qe.fieldDescriptionsToSchema(fieldDescriptions), + Schema: schema, Records: records, } @@ -186,7 +199,8 @@ func (qe *QRepQueryExecutor) processFetchedRows( fieldDescriptions := rows.FieldDescriptions() if !stream.IsSchemaSet() { - stream.SetSchema(qe.fieldDescriptionsToSchema(fieldDescriptions)) + schema := qe.fieldDescriptionsToSchema(fieldDescriptions) + stream.SetSchema(schema) } numRows, err := qe.processRowsStream(ctx, cursorName, stream, rows, fieldDescriptions) @@ -324,7 +338,7 @@ func (qe *QRepQueryExecutor) mapRowToQRecord( for i, fd := range fds { // Check if it's a custom type first - typeName, ok := qe.customTypesMapping[fd.DataTypeOID] + typeName, ok := qe.customTypeMapping[fd.DataTypeOID] if !ok { tmp, err := qe.parseFieldFromPostgresOID(fd.DataTypeOID, values[i]) if err != nil { diff --git a/flow/connectors/postgres/qrep_query_executor_test.go b/flow/connectors/postgres/qrep_query_executor_test.go index d7932ba00..f8f686c42 100644 --- a/flow/connectors/postgres/qrep_query_executor_test.go +++ b/flow/connectors/postgres/qrep_query_executor_test.go @@ -1,7 +1,6 @@ package connpostgres import ( - "bytes" "context" "fmt" "testing" @@ -10,6 +9,7 @@ import ( "github.com/google/uuid" "github.com/jackc/pgx/v5" "github.com/shopspring/decimal" + "github.com/stretchr/testify/require" "github.com/PeerDB-io/peer-flow/peerdbenv" ) @@ -19,18 +19,14 @@ func setupDB(t *testing.T) (*PostgresConnector, string) { connector, err := NewPostgresConnector(context.Background(), nil, peerdbenv.GetCatalogPostgresConfigFromEnv(context.Background())) - if err != nil { - t.Fatalf("unable to create connector: %v", err) - } + require.NoError(t, err, "error while creating connector") // Create unique schema name using current time schemaName := fmt.Sprintf("schema_%d", time.Now().Unix()) // Create the schema _, err = connector.conn.Exec(context.Background(), fmt.Sprintf("CREATE SCHEMA %s;", schemaName)) - if err != nil { - t.Fatalf("unable to create schema: %v", err) - } + require.NoError(t, err, "error while creating schema") return connector, schemaName } @@ -39,9 +35,7 @@ func teardownDB(t *testing.T, conn *pgx.Conn, schemaName string) { t.Helper() _, err := conn.Exec(context.Background(), fmt.Sprintf("DROP SCHEMA %s CASCADE;", schemaName)) - if err != nil { - t.Fatalf("error while dropping schema: %v", err) - } + require.NoError(t, err, "error while dropping schema") } func TestExecuteAndProcessQuery(t *testing.T) { @@ -53,31 +47,20 @@ func TestExecuteAndProcessQuery(t *testing.T) { query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.test(id SERIAL PRIMARY KEY, data TEXT);", schemaName) _, err := conn.Exec(ctx, query) - if err != nil { - t.Fatalf("error while creating test table: %v", err) - } + require.NoError(t, err, "error while creating table") query = fmt.Sprintf("INSERT INTO %s.test(data) VALUES('testdata');", schemaName) _, err = conn.Exec(ctx, query) - if err != nil { - t.Fatalf("error while inserting into test table: %v", err) - } + require.NoError(t, err, "error while inserting data") - qe := connector.NewQRepQueryExecutor("test flow", "test part") + qe, err := connector.NewQRepQueryExecutor(ctx, "test flow", "test part") + require.NoError(t, err, "error while creating QRepQueryExecutor") query = fmt.Sprintf("SELECT * FROM %s.test;", schemaName) batch, err := qe.ExecuteAndProcessQuery(context.Background(), query) - if err != nil { - t.Fatalf("error while executing and processing query: %v", err) - } - - if len(batch.Records) != 1 { - t.Fatalf("expected 1 record, got %v", len(batch.Records)) - } - - if batch.Records[0][1].Value() != "testdata" { - t.Fatalf("expected 'testdata', got %v", batch.Records[0][0].Value()) - } + require.NoError(t, err, "error while executing query") + require.Len(t, batch.Records, 1, "expected 1 record") + require.Equal(t, "testdata", batch.Records[0][1].Value(), "expected 'testdata'") } func TestAllDataTypes(t *testing.T) { @@ -109,9 +92,7 @@ func TestAllDataTypes(t *testing.T) { );`, schemaName) _, err := conn.Exec(ctx, query) - if err != nil { - t.Fatalf("error while creating test table: %v", err) - } + require.NoError(t, err, "error while creating table") // Insert a row into the table query = fmt.Sprintf(` @@ -137,7 +118,7 @@ func TestAllDataTypes(t *testing.T) { )`, schemaName) - savedTime := time.Now() + savedTime := time.Now().UTC() savedUUID := uuid.New() _, err = conn.Exec( @@ -160,48 +141,34 @@ func TestAllDataTypes(t *testing.T) { savedTime, // col_tz4 savedTime, // col_date ) - if err != nil { - t.Fatalf("error while inserting into test table: %v", err) - } + require.NoError(t, err, "error while inserting into test table") - qe := connector.NewQRepQueryExecutor("test flow", "test part") + qe, err := connector.NewQRepQueryExecutor(ctx, "test flow", "test part") + require.NoError(t, err, "error while creating QRepQueryExecutor") // Select the row back out of the table query = fmt.Sprintf("SELECT * FROM %s.test;", schemaName) rows, err := qe.ExecuteQuery(context.Background(), query) - if err != nil { - t.Fatalf("error while executing query: %v", err) - } + require.NoError(t, err, "error while executing query") defer rows.Close() // Use rows.FieldDescriptions() to get field descriptions fieldDescriptions := rows.FieldDescriptions() - batch, err := qe.ProcessRows(rows, fieldDescriptions) - if err != nil { - t.Fatalf("failed to process rows: %v", err) - } - - if len(batch.Records) != 1 { - t.Fatalf("expected 1 record, got %v", len(batch.Records)) - } + batch, err := qe.ProcessRows(ctx, rows, fieldDescriptions) + require.NoError(t, err, "error while processing rows") + require.Len(t, batch.Records, 1, "expected 1 record") // Retrieve the results. record := batch.Records[0] expectedBool := true - if record[0].Value().(bool) != expectedBool { - t.Fatalf("expected %v, got %v", expectedBool, record[0].Value()) - } + require.Equal(t, expectedBool, record[0].Value(), "expected true") expectedInt4 := int32(2) - if record[1].Value().(int32) != expectedInt4 { - t.Fatalf("expected %v, got %v", expectedInt4, record[1].Value()) - } + require.Equal(t, expectedInt4, record[1].Value(), "expected 2") expectedInt8 := int64(3) - if record[2].Value().(int64) != expectedInt8 { - t.Fatalf("expected %v, got %v", expectedInt8, record[2].Value()) - } + require.Equal(t, expectedInt8, record[2].Value(), "expected 3") expectedFloat4 := float32(1.1) if record[3].Value().(float32) != expectedFloat4 { @@ -214,28 +181,21 @@ func TestAllDataTypes(t *testing.T) { } expectedText := "text" - if record[5].Value().(string) != expectedText { - t.Fatalf("expected %v, got %v", expectedText, record[5].Value()) - } + require.Equal(t, expectedText, record[5].Value(), "expected 'text'") expectedBytea := []byte("bytea") - if !bytes.Equal(record[6].Value().([]byte), expectedBytea) { - t.Fatalf("expected %v, got %v", expectedBytea, record[6].Value()) - } + require.Equal(t, expectedBytea, record[6].Value(), "expected 'bytea'") expectedJSON := `{"key":"value"}` - if record[7].Value().(string) != expectedJSON { - t.Fatalf("expected %v, got %v", expectedJSON, record[7].Value()) - } + require.Equal(t, expectedJSON, record[7].Value(), "expected '{\"key\":\"value\"}'") actualUUID := record[8].Value().([16]uint8) - if !bytes.Equal(actualUUID[:], savedUUID[:]) { - t.Fatalf("expected %v, got %v", savedUUID, actualUUID) - } + require.Equal(t, savedUUID[:], actualUUID[:], "expected savedUUID: %v", savedUUID) + actualTime := record[9].Value().(time.Time) + require.Equal(t, savedTime.Truncate(time.Second), + actualTime.Truncate(time.Second), "expected savedTime: %v", savedTime) expectedNumeric := "123.456" actualNumeric := record[10].Value().(decimal.Decimal).String() - if actualNumeric != expectedNumeric { - t.Fatalf("expected %v, got %v", expectedNumeric, actualNumeric) - } + require.Equal(t, expectedNumeric, actualNumeric, "expected 123.456") } diff --git a/flow/connectors/postgres/qvalue_convert.go b/flow/connectors/postgres/qvalue_convert.go index d359212bd..fe2489ed3 100644 --- a/flow/connectors/postgres/qvalue_convert.go +++ b/flow/connectors/postgres/qvalue_convert.go @@ -62,8 +62,10 @@ func (c *PostgresConnector) postgresOIDToQValueKind(recvOID uint32) qvalue.QValu return qvalue.QValueKindString case pgtype.ByteaOID: return qvalue.QValueKindBytes - case pgtype.JSONOID, pgtype.JSONBOID: + case pgtype.JSONOID: return qvalue.QValueKindJSON + case pgtype.JSONBOID: + return qvalue.QValueKindJSONB case pgtype.UUIDOID: return qvalue.QValueKindUUID case pgtype.TimeOID: @@ -104,8 +106,14 @@ func (c *PostgresConnector) postgresOIDToQValueKind(recvOID uint32) qvalue.QValu return qvalue.QValueKindArrayTimestampTZ case pgtype.TextArrayOID, pgtype.VarcharArrayOID, pgtype.BPCharArrayOID: return qvalue.QValueKindArrayString + case pgtype.JSONArrayOID: + return qvalue.QValueKindArrayJSON + case pgtype.JSONBArrayOID: + return qvalue.QValueKindArrayJSONB case pgtype.IntervalOID: return qvalue.QValueKindInterval + case pgtype.TstzrangeOID: + return qvalue.QValueKindTSTZRange default: typeName, ok := pgtype.NewMap().TypeForOID(recvOID) if !ok { @@ -161,6 +169,8 @@ func qValueKindToPostgresType(colTypeStr string) string { return "BYTEA" case qvalue.QValueKindJSON: return "JSON" + case qvalue.QValueKindJSONB: + return "JSONB" case qvalue.QValueKindHStore: return "HSTORE" case qvalue.QValueKindUUID: @@ -203,6 +213,10 @@ func qValueKindToPostgresType(colTypeStr string) string { return "BOOLEAN[]" case qvalue.QValueKindArrayString: return "TEXT[]" + case qvalue.QValueKindArrayJSON: + return "JSON[]" + case qvalue.QValueKindArrayJSONB: + return "JSONB[]" case qvalue.QValueKindGeography: return "GEOGRAPHY" case qvalue.QValueKindGeometry: @@ -214,12 +228,12 @@ func qValueKindToPostgresType(colTypeStr string) string { } } -func parseJSON(value interface{}) (qvalue.QValue, error) { +func parseJSON(value interface{}, isArray bool) (qvalue.QValue, error) { jsonVal, err := json.Marshal(value) if err != nil { return nil, fmt.Errorf("failed to parse JSON: %w", err) } - return qvalue.QValueJSON{Val: string(jsonVal)}, nil + return qvalue.QValueJSON{Val: string(jsonVal), IsArray: isArray}, nil } func convertToArray[T any](kind qvalue.QValueKind, value interface{}) ([]T, error) { @@ -277,6 +291,31 @@ func parseFieldFromQValueKind(qvalueKind qvalue.QValueKind, value interface{}) ( } return qvalue.QValueString{Val: string(intervalJSON)}, nil + case qvalue.QValueKindTSTZRange: + tstzrangeObject := value.(pgtype.Range[interface{}]) + lowerBoundType := tstzrangeObject.LowerType + upperBoundType := tstzrangeObject.UpperType + lowerTime, err := convertTimeRangeBound(tstzrangeObject.Lower) + if err != nil { + return nil, fmt.Errorf("[tstzrange]error for lower time bound: %w", err) + } + + upperTime, err := convertTimeRangeBound(tstzrangeObject.Upper) + if err != nil { + return nil, fmt.Errorf("[tstzrange]error for upper time bound: %w", err) + } + + lowerBracket := "[" + if lowerBoundType == pgtype.Exclusive { + lowerBracket = "(" + } + upperBracket := "]" + if upperBoundType == pgtype.Exclusive { + upperBracket = ")" + } + tstzrangeStr := fmt.Sprintf("%s%v,%v%s", + lowerBracket, lowerTime, upperTime, upperBracket) + return qvalue.QValueTSTZRange{Val: tstzrangeStr}, nil case qvalue.QValueKindDate: switch val := value.(type) { case time.Time: @@ -306,12 +345,18 @@ func parseFieldFromQValueKind(qvalueKind qvalue.QValueKind, value interface{}) ( case qvalue.QValueKindBoolean: boolVal := value.(bool) return qvalue.QValueBoolean{Val: boolVal}, nil - case qvalue.QValueKindJSON: - tmp, err := parseJSON(value) + case qvalue.QValueKindJSON, qvalue.QValueKindJSONB: + tmp, err := parseJSON(value, false) if err != nil { return nil, fmt.Errorf("failed to parse JSON: %w", err) } return tmp, nil + case qvalue.QValueKindArrayJSON, qvalue.QValueKindArrayJSONB: + tmp, err := parseJSON(value, true) + if err != nil { + return nil, fmt.Errorf("failed to parse JSON Array: %w", err) + } + return tmp, nil case qvalue.QValueKindInt16: intVal := value.(int16) return qvalue.QValueInt16{Val: intVal}, nil @@ -483,3 +528,23 @@ func customTypeToQKind(typeName string) qvalue.QValueKind { return qvalue.QValueKindString } } + +// Postgres does not like timestamps of the form 2006-01-02 15:04:05 +0000 UTC +// in tstzrange. +// convertTimeRangeBound removes the +0000 UTC part +func convertTimeRangeBound(timeBound interface{}) (string, error) { + layout := "2006-01-02 15:04:05 -0700 MST" + postgresFormat := "2006-01-02 15:04:05" + var convertedTime string + if timeBound != nil { + lowerParsed, err := time.Parse(layout, fmt.Sprint(timeBound)) + if err != nil { + return "", fmt.Errorf("unexpected lower bound value in tstzrange. Error: %v", err) + } + convertedTime = lowerParsed.Format(postgresFormat) + } else { + convertedTime = "" + } + + return convertedTime, nil +} diff --git a/flow/connectors/postgres/validate.go b/flow/connectors/postgres/validate.go index 2b5729f67..ca9665a31 100644 --- a/flow/connectors/postgres/validate.go +++ b/flow/connectors/postgres/validate.go @@ -145,13 +145,11 @@ func (c *PostgresConnector) CheckReplicationConnectivity(ctx context.Context) er func (c *PostgresConnector) CheckPublicationCreationPermissions(ctx context.Context, srcTableNames []string) error { pubName := "_peerdb_tmp_test_publication_" + shared.RandomString(5) - err := c.CreatePublication(ctx, srcTableNames, pubName) - if err != nil { + if err := c.CreatePublication(ctx, srcTableNames, pubName); err != nil { return err } - _, err = c.conn.Exec(ctx, "DROP PUBLICATION "+pubName) - if err != nil { + if _, err := c.conn.Exec(ctx, "DROP PUBLICATION "+pubName); err != nil { return fmt.Errorf("failed to drop publication: %v", err) } return nil diff --git a/flow/connectors/s3/qrep.go b/flow/connectors/s3/qrep.go index 14c7b31ef..9fbb485ab 100644 --- a/flow/connectors/s3/qrep.go +++ b/flow/connectors/s3/qrep.go @@ -25,7 +25,7 @@ func (c *S3Connector) SyncQRepRecords( return 0, err } - numRecords, err := c.writeToAvroFile(ctx, stream, avroSchema, partition.PartitionId, config.FlowJobName) + numRecords, err := c.writeToAvroFile(ctx, config.Env, stream, avroSchema, partition.PartitionId, config.FlowJobName) if err != nil { return 0, err } @@ -47,6 +47,7 @@ func getAvroSchema( func (c *S3Connector) writeToAvroFile( ctx context.Context, + env map[string]string, stream *model.QRecordStream, avroSchema *model.QRecordAvroSchemaDefinition, partitionID string, @@ -60,7 +61,7 @@ func (c *S3Connector) writeToAvroFile( s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro", s3o.Prefix, jobName, partitionID) writer := avro.NewPeerDBOCFWriter(stream, avroSchema, avro.CompressNone, protos.DBType_SNOWFLAKE) - avroFile, err := writer.WriteRecordsToS3(ctx, s3o.Bucket, s3AvroFileKey, c.credentialsProvider) + avroFile, err := writer.WriteRecordsToS3(ctx, env, s3o.Bucket, s3AvroFileKey, c.credentialsProvider) if err != nil { return 0, fmt.Errorf("failed to write records to S3: %w", err) } diff --git a/flow/connectors/snowflake/merge_stmt_generator.go b/flow/connectors/snowflake/merge_stmt_generator.go index 3f0cfbc63..37b4ed7bd 100644 --- a/flow/connectors/snowflake/merge_stmt_generator.go +++ b/flow/connectors/snowflake/merge_stmt_generator.go @@ -52,7 +52,7 @@ func (m *mergeStmtGenerator) generateMergeStmt(dstTable string) (string, error) flattenedCastsSQLArray = append(flattenedCastsSQLArray, fmt.Sprintf("TO_GEOMETRY(CAST(%s:\"%s\" AS STRING),true) AS %s", toVariantColumnName, column.Name, targetColumnName)) - case qvalue.QValueKindJSON, qvalue.QValueKindHStore, qvalue.QValueKindInterval: + case qvalue.QValueKindJSON, qvalue.QValueKindJSONB, qvalue.QValueKindHStore, qvalue.QValueKindInterval: flattenedCastsSQLArray = append(flattenedCastsSQLArray, fmt.Sprintf("PARSE_JSON(CAST(%s:\"%s\" AS STRING)) AS %s", toVariantColumnName, column.Name, targetColumnName)) diff --git a/flow/connectors/snowflake/qrep.go b/flow/connectors/snowflake/qrep.go index 96a1fa911..556627699 100644 --- a/flow/connectors/snowflake/qrep.go +++ b/flow/connectors/snowflake/qrep.go @@ -89,14 +89,12 @@ func (c *SnowflakeConnector) SetupQRepMetadataTables(ctx context.Context, config } stageName := c.getStageNameForJob(config.FlowJobName) - err = c.createStage(ctx, stageName, config) - if err != nil { + if err := c.createStage(ctx, stageName, config); err != nil { return err } if config.WriteMode.WriteType == protos.QRepWriteType_QREP_WRITE_MODE_OVERWRITE { - _, err = c.execWithLogging(ctx, "TRUNCATE TABLE "+config.DestinationTableIdentifier) - if err != nil { + if _, err := c.execWithLogging(ctx, "TRUNCATE TABLE "+config.DestinationTableIdentifier); err != nil { return fmt.Errorf("failed to TRUNCATE table before query replication: %w", err) } } @@ -224,8 +222,7 @@ func (c *SnowflakeConnector) getColsFromTable(ctx context.Context, tableName str }) } - err = rows.Err() - if err != nil { + if err := rows.Err(); err != nil { return nil, fmt.Errorf("failed to read rows: %w", err) } @@ -280,11 +277,10 @@ func (c *SnowflakeConnector) dropStage(ctx context.Context, stagingPath string, } for _, object := range page.Contents { - _, err = s3svc.DeleteObject(ctx, &s3.DeleteObjectInput{ + if _, err := s3svc.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: aws.String(s3o.Bucket), Key: object.Key, - }) - if err != nil { + }); err != nil { c.logger.Error("failed to delete objects from bucket", slog.Any("error", err)) return fmt.Errorf("failed to delete objects from bucket: %w", err) } diff --git a/flow/connectors/snowflake/qrep_avro_sync.go b/flow/connectors/snowflake/qrep_avro_sync.go index 2e37705c1..0fea54b02 100644 --- a/flow/connectors/snowflake/qrep_avro_sync.go +++ b/flow/connectors/snowflake/qrep_avro_sync.go @@ -20,8 +20,8 @@ import ( ) type SnowflakeAvroSyncHandler struct { - config *protos.QRepConfig - connector *SnowflakeConnector + *SnowflakeConnector + config *protos.QRepConfig } func NewSnowflakeAvroSyncHandler( @@ -29,13 +29,14 @@ func NewSnowflakeAvroSyncHandler( connector *SnowflakeConnector, ) *SnowflakeAvroSyncHandler { return &SnowflakeAvroSyncHandler{ - config: config, - connector: connector, + SnowflakeConnector: connector, + config: config, } } func (s *SnowflakeAvroSyncHandler) SyncRecords( ctx context.Context, + env map[string]string, dstTableSchema []*sql.ColumnType, stream *model.QRecordStream, flowJobName string, @@ -45,7 +46,7 @@ func (s *SnowflakeAvroSyncHandler) SyncRecords( schema := stream.Schema() - s.connector.logger.Info("sync function called and schema acquired", tableLog) + s.logger.Info("sync function called and schema acquired", tableLog) avroSchema, err := s.getAvroSchema(dstTableName, schema) if err != nil { @@ -53,32 +54,31 @@ func (s *SnowflakeAvroSyncHandler) SyncRecords( } partitionID := shared.RandomString(16) - avroFile, err := s.writeToAvroFile(ctx, stream, avroSchema, partitionID, flowJobName) + avroFile, err := s.writeToAvroFile(ctx, env, stream, avroSchema, partitionID, flowJobName) if err != nil { return 0, err } defer avroFile.Cleanup() - s.connector.logger.Info(fmt.Sprintf("written %d records to Avro file", avroFile.NumRecords), tableLog) + s.logger.Info(fmt.Sprintf("written %d records to Avro file", avroFile.NumRecords), tableLog) - stage := s.connector.getStageNameForJob(s.config.FlowJobName) - err = s.connector.createStage(ctx, stage, s.config) - if err != nil { + stage := s.getStageNameForJob(s.config.FlowJobName) + if err := s.createStage(ctx, stage, s.config); err != nil { return 0, err } - s.connector.logger.Info("Created stage " + stage) + s.logger.Info("Created stage " + stage) err = s.putFileToStage(ctx, avroFile, stage) if err != nil { return 0, err } - s.connector.logger.Info("pushed avro file to stage", tableLog) + s.logger.Info("pushed avro file to stage", tableLog) - writeHandler := NewSnowflakeAvroConsolidateHandler(s.connector, s.config, s.config.DestinationTableIdentifier, stage) + writeHandler := NewSnowflakeAvroConsolidateHandler(s.SnowflakeConnector, s.config, s.config.DestinationTableIdentifier, stage) err = writeHandler.CopyStageToDestination(ctx) if err != nil { return 0, err } - s.connector.logger.Info(fmt.Sprintf("copying records into %s from stage %s", + s.logger.Info(fmt.Sprintf("copying records into %s from stage %s", s.config.DestinationTableIdentifier, stage)) return avroFile.NumRecords, nil @@ -96,7 +96,7 @@ func (s *SnowflakeAvroSyncHandler) SyncQRepRecords( dstTableName := config.DestinationTableIdentifier schema := stream.Schema() - s.connector.logger.Info("sync function called and schema acquired", partitionLog) + s.logger.Info("sync function called and schema acquired", partitionLog) err := s.addMissingColumns(ctx, schema, dstTableSchema, dstTableName, partition) if err != nil { @@ -108,22 +108,20 @@ func (s *SnowflakeAvroSyncHandler) SyncQRepRecords( return 0, err } - avroFile, err := s.writeToAvroFile(ctx, stream, avroSchema, partition.PartitionId, config.FlowJobName) + avroFile, err := s.writeToAvroFile(ctx, config.Env, stream, avroSchema, partition.PartitionId, config.FlowJobName) if err != nil { return 0, err } defer avroFile.Cleanup() - stage := s.connector.getStageNameForJob(config.FlowJobName) + stage := s.getStageNameForJob(config.FlowJobName) - err = s.putFileToStage(ctx, avroFile, stage) - if err != nil { + if err := s.putFileToStage(ctx, avroFile, stage); err != nil { return 0, err } - s.connector.logger.Info("Put file to stage in Avro sync for snowflake", partitionLog) + s.logger.Info("Put file to stage in Avro sync for snowflake", partitionLog) - err = s.connector.FinishQRepPartition(ctx, partition, config.FlowJobName, startTime) - if err != nil { + if err := s.FinishQRepPartition(ctx, partition, config.FlowJobName, startTime); err != nil { return 0, err } @@ -152,14 +150,14 @@ func (s *SnowflakeAvroSyncHandler) addMissingColumns( } if !hasColumn { - s.connector.logger.Info(fmt.Sprintf("adding column %s to destination table %s", + s.logger.Info(fmt.Sprintf("adding column %s to destination table %s", col.Name, dstTableName), partitionLog) colsToTypes[col.Name] = col.Type } } if len(colsToTypes) > 0 { - tx, err := s.connector.database.Begin() + tx, err := s.database.Begin() if err != nil { return fmt.Errorf("failed to begin transaction: %w", err) } @@ -173,7 +171,7 @@ func (s *SnowflakeAvroSyncHandler) addMissingColumns( alterTableCmd := fmt.Sprintf("ALTER TABLE %s ", dstTableName) alterTableCmd += fmt.Sprintf("ADD COLUMN IF NOT EXISTS \"%s\" %s;", upperCasedColName, sfColType) - s.connector.logger.Info(fmt.Sprintf("altering destination table %s with command `%s`", + s.logger.Info(fmt.Sprintf("altering destination table %s with command `%s`", dstTableName, alterTableCmd), partitionLog) if _, err := tx.ExecContext(ctx, alterTableCmd); err != nil { @@ -185,10 +183,10 @@ func (s *SnowflakeAvroSyncHandler) addMissingColumns( return fmt.Errorf("failed to commit transaction: %w", err) } - s.connector.logger.Info("successfully added missing columns to destination table "+ + s.logger.Info("successfully added missing columns to destination table "+ dstTableName, partitionLog) } else { - s.connector.logger.Info("no missing columns found in destination table "+dstTableName, partitionLog) + s.logger.Info("no missing columns found in destination table "+dstTableName, partitionLog) } return nil @@ -203,12 +201,13 @@ func (s *SnowflakeAvroSyncHandler) getAvroSchema( return nil, fmt.Errorf("failed to define Avro schema: %w", err) } - s.connector.logger.Info(fmt.Sprintf("Avro schema: %v\n", avroSchema)) + s.logger.Info(fmt.Sprintf("Avro schema: %v\n", avroSchema)) return avroSchema, nil } func (s *SnowflakeAvroSyncHandler) writeToAvroFile( ctx context.Context, + env map[string]string, stream *model.QRecordStream, avroSchema *model.QRecordAvroSchemaDefinition, partitionID string, @@ -223,7 +222,7 @@ func (s *SnowflakeAvroSyncHandler) writeToAvroFile( } localFilePath := fmt.Sprintf("%s/%s.avro.zst", tmpDir, partitionID) - s.connector.logger.Info("writing records to local file " + localFilePath) + s.logger.Info("writing records to local file " + localFilePath) avroFile, err := ocfWriter.WriteRecordsToAvroFile(ctx, localFilePath) if err != nil { return nil, fmt.Errorf("failed to write records to Avro file: %w", err) @@ -238,14 +237,14 @@ func (s *SnowflakeAvroSyncHandler) writeToAvroFile( } s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro.zst", s3o.Prefix, s.config.FlowJobName, partitionID) - s.connector.logger.Info("OCF: Writing records to S3", + s.logger.Info("OCF: Writing records to S3", slog.String(string(shared.PartitionIDKey), partitionID)) provider, err := utils.GetAWSCredentialsProvider(ctx, "snowflake", utils.PeerAWSCredentials{}) if err != nil { return nil, err } - avroFile, err := ocfWriter.WriteRecordsToS3(ctx, s3o.Bucket, s3AvroFileKey, provider) + avroFile, err := ocfWriter.WriteRecordsToS3(ctx, env, s3o.Bucket, s3AvroFileKey, provider) if err != nil { return nil, fmt.Errorf("failed to write records to S3: %w", err) } @@ -258,16 +257,16 @@ func (s *SnowflakeAvroSyncHandler) writeToAvroFile( func (s *SnowflakeAvroSyncHandler) putFileToStage(ctx context.Context, avroFile *avro.AvroFile, stage string) error { if avroFile.StorageLocation != avro.AvroLocalStorage { - s.connector.logger.Info("no file to put to stage") + s.logger.Info("no file to put to stage") return nil } putCmd := fmt.Sprintf("PUT file://%s @%s", avroFile.FilePath, stage) - if _, err := s.connector.database.ExecContext(ctx, putCmd); err != nil { + if _, err := s.database.ExecContext(ctx, putCmd); err != nil { return fmt.Errorf("failed to put file to stage: %w", err) } - s.connector.logger.Info(fmt.Sprintf("put file %s to stage %s", avroFile.FilePath, stage)) + s.logger.Info(fmt.Sprintf("put file %s to stage %s", avroFile.FilePath, stage)) return nil } diff --git a/flow/connectors/snowflake/snowflake.go b/flow/connectors/snowflake/snowflake.go index 124a5c65a..06e3fb881 100644 --- a/flow/connectors/snowflake/snowflake.go +++ b/flow/connectors/snowflake/snowflake.go @@ -259,15 +259,13 @@ func (c *SnowflakeConnector) getDistinctTableNamesInBatch( var result pgtype.Text destinationTableNames := make([]string, 0) for rows.Next() { - err = rows.Scan(&result) - if err != nil { + if err := rows.Scan(&result); err != nil { return nil, fmt.Errorf("failed to read row: %w", err) } destinationTableNames = append(destinationTableNames, result.String) } - err = rows.Err() - if err != nil { + if err := rows.Err(); err != nil { return nil, fmt.Errorf("failed to read rows: %w", err) } return destinationTableNames, nil @@ -425,8 +423,7 @@ func (c *SnowflakeConnector) SyncRecords(ctx context.Context, req *model.SyncRec return nil, err } - err = c.FinishBatch(ctx, req.FlowJobName, req.SyncBatchID, res.LastSyncedCheckpointID) - if err != nil { + if err := c.FinishBatch(ctx, req.FlowJobName, req.SyncBatchID, res.LastSyncedCheckpointID); err != nil { return nil, err } @@ -458,7 +455,7 @@ func (c *SnowflakeConnector) syncRecordsViaAvro( return nil, err } - numRecords, err := avroSyncer.SyncRecords(ctx, destinationTableSchema, stream, req.FlowJobName) + numRecords, err := avroSyncer.SyncRecords(ctx, req.Env, destinationTableSchema, stream, req.FlowJobName) if err != nil { return nil, err } diff --git a/flow/connectors/utils/avro/avro_writer.go b/flow/connectors/utils/avro/avro_writer.go index 6f193be88..ee72e2c28 100644 --- a/flow/connectors/utils/avro/avro_writer.go +++ b/flow/connectors/utils/avro/avro_writer.go @@ -23,6 +23,7 @@ import ( "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/peerdbenv" "github.com/PeerDB-io/peer-flow/shared" ) @@ -187,7 +188,11 @@ func (p *peerDBOCFWriter) WriteOCF(ctx context.Context, w io.Writer) (int, error } func (p *peerDBOCFWriter) WriteRecordsToS3( - ctx context.Context, bucketName, key string, s3Creds utils.AWSCredentialsProvider, + ctx context.Context, + env map[string]string, + bucketName string, + key string, + s3Creds utils.AWSCredentialsProvider, ) (*AvroFile, error) { logger := shared.LoggerFromCtx(ctx) s3svc, err := utils.CreateS3Client(ctx, s3Creds) @@ -215,12 +220,23 @@ func (p *peerDBOCFWriter) WriteRecordsToS3( numRows, writeOcfError = p.WriteOCF(ctx, w) }() - _, err = manager.NewUploader(s3svc).Upload(ctx, &s3.PutObjectInput{ + partSize, err := peerdbenv.PeerDBS3PartSize(ctx, env) + if err != nil { + return nil, fmt.Errorf("could not get s3 part size config: %w", err) + } + + // Create the uploader using the AWS SDK v2 manager + uploader := manager.NewUploader(s3svc, func(u *manager.Uploader) { + if partSize > 0 { + u.PartSize = partSize + } + }) + + if _, err := uploader.Upload(ctx, &s3.PutObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(key), Body: r, - }) - if err != nil { + }); err != nil { s3Path := "s3://" + bucketName + "/" + key logger.Error("failed to upload file", slog.Any("error", err), slog.String("s3_path", s3Path)) return nil, fmt.Errorf("failed to upload file: %w", err) diff --git a/flow/connectors/utils/aws.go b/flow/connectors/utils/aws.go index 89fcab7b2..73eab604a 100644 --- a/flow/connectors/utils/aws.go +++ b/flow/connectors/utils/aws.go @@ -126,12 +126,10 @@ func (s *StaticAWSCredentialsProvider) Retrieve(ctx context.Context) (AWSCredent } func (s *StaticAWSCredentialsProvider) GetEndpointURL() string { - endpoint := "" if s.credentials.EndpointUrl != nil { - endpoint = *s.credentials.EndpointUrl + return *s.credentials.EndpointUrl } - - return endpoint + return "" } func NewStaticAWSCredentialsProvider(credentials AWSCredentials, region string) AWSCredentialsProvider { @@ -209,12 +207,9 @@ func GetAWSCredentialsProvider(ctx context.Context, connectorName string, peerCr } func FileURLForS3Service(endpoint string, region string, bucket string, filePath string) string { - // example: min.io local bucket or GCS - matches := s3CompatibleServiceEndpointPattern.MatchString(endpoint) - if matches { + if s3CompatibleServiceEndpointPattern.MatchString(endpoint) { return fmt.Sprintf("%s/%s/%s", endpoint, bucket, filePath) } - return fmt.Sprintf("https://%s.s3.%s.amazonaws.com/%s", bucket, region, filePath) } @@ -238,25 +233,17 @@ func NewS3BucketAndPrefix(s3Path string) (*S3BucketAndPrefix, error) { } type resolverV2 struct { - userProvidedEndpointUrl string + url.URL } func (r *resolverV2) ResolveEndpoint(ctx context.Context, params s3.EndpointParameters) ( smithyendpoints.Endpoint, error, ) { - if r.userProvidedEndpointUrl != "" { - u, err := url.Parse(r.userProvidedEndpointUrl) - if err != nil { - return smithyendpoints.Endpoint{}, err - } - - u.Path += "/" + *params.Bucket - return smithyendpoints.Endpoint{ - URI: *u, - }, nil - } - - return s3.NewDefaultEndpointResolverV2().ResolveEndpoint(ctx, params) + uri := r.URL + uri.Path += "/" + *params.Bucket + return smithyendpoints.Endpoint{ + URI: uri, + }, nil } func CreateS3Client(ctx context.Context, credsProvider AWSCredentialsProvider) (*s3.Client, error) { @@ -265,28 +252,35 @@ func CreateS3Client(ctx context.Context, credsProvider AWSCredentialsProvider) ( return nil, err } - s3Client := s3.NewFromConfig(aws.Config{}, func(options *s3.Options) { - options.Region = credsProvider.GetRegion() - options.Credentials = credsProvider.GetUnderlyingProvider() - - if awsCredentials.EndpointUrl != nil && *awsCredentials.EndpointUrl != "" { - options.BaseEndpoint = awsCredentials.EndpointUrl - options.EndpointResolverV2 = &resolverV2{ - userProvidedEndpointUrl: *awsCredentials.EndpointUrl, - } + options := s3.Options{ + Region: credsProvider.GetRegion(), + Credentials: credsProvider.GetUnderlyingProvider(), + } + if awsCredentials.EndpointUrl != nil && *awsCredentials.EndpointUrl != "" { + options.BaseEndpoint = awsCredentials.EndpointUrl + options.UsePathStyle = true + url, err := url.Parse(*awsCredentials.EndpointUrl) + if err != nil { + return nil, err + } + options.EndpointResolverV2 = &resolverV2{ + URL: *url, + } + if strings.Contains(*awsCredentials.EndpointUrl, "storage.googleapis.com") { // Assign custom client with our own transport options.HTTPClient = &http.Client{ Transport: &RecalculateV4Signature{ next: http.DefaultTransport, signer: v4.NewSigner(), credentials: credsProvider.GetUnderlyingProvider(), - region: credsProvider.GetRegion(), + region: options.Region, }, } } - }) - return s3Client, nil + } + + return s3.New(options), nil } // RecalculateV4Signature allow GCS over S3, removing Accept-Encoding header from sign @@ -314,8 +308,7 @@ func (lt *RecalculateV4Signature) RoundTrip(req *http.Request) (*http.Response, if err != nil { return nil, err } - err = lt.signer.SignHTTP(req.Context(), creds, req, v4.GetPayloadHash(req.Context()), "s3", lt.region, timeDate) - if err != nil { + if err := lt.signer.SignHTTP(req.Context(), creds, req, v4.GetPayloadHash(req.Context()), "s3", lt.region, timeDate); err != nil { return nil, err } // Reset Accept-Encoding if desired @@ -331,21 +324,20 @@ func PutAndRemoveS3(ctx context.Context, client *s3.Client, bucket string, prefi reader := strings.NewReader(time.Now().Format(time.RFC3339)) bucketName := aws.String(bucket) temporaryObjectPath := prefix + "/" + _peerDBCheck + uuid.New().String() - temporaryObjectPath = strings.TrimPrefix(temporaryObjectPath, "/") - _, putErr := client.PutObject(ctx, &s3.PutObjectInput{ + key := aws.String(strings.TrimPrefix(temporaryObjectPath, "/")) + + if _, putErr := client.PutObject(ctx, &s3.PutObjectInput{ Bucket: bucketName, - Key: aws.String(temporaryObjectPath), + Key: key, Body: reader, - }) - if putErr != nil { + }); putErr != nil { return fmt.Errorf("failed to write to bucket: %w", putErr) } - _, delErr := client.DeleteObject(ctx, &s3.DeleteObjectInput{ + if _, delErr := client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: bucketName, - Key: aws.String(temporaryObjectPath), - }) - if delErr != nil { + Key: key, + }); delErr != nil { return fmt.Errorf("failed to delete from bucket: %w", delErr) } diff --git a/flow/connectors/utils/monitoring/monitoring.go b/flow/connectors/utils/monitoring/monitoring.go index 9c7397004..98a62ec65 100644 --- a/flow/connectors/utils/monitoring/monitoring.go +++ b/flow/connectors/utils/monitoring/monitoring.go @@ -96,8 +96,10 @@ func UpdateEndTimeForCDCBatch( batchID int64, ) error { _, err := pool.Exec(ctx, - "UPDATE peerdb_stats.cdc_batches SET end_time=$1 WHERE flow_name=$2 AND batch_id=$3", - time.Now(), flowJobName, batchID) + `UPDATE peerdb_stats.cdc_batches + SET end_time = COALESCE(end_time, NOW()) + WHERE flow_name = $1 AND batch_id <= $2`, + flowJobName, batchID) if err != nil { return fmt.Errorf("error while updating batch in cdc_batch: %w", err) } diff --git a/flow/e2e/clickhouse/clickhouse.go b/flow/e2e/clickhouse/clickhouse.go index e1eafd6b4..975676152 100644 --- a/flow/e2e/clickhouse/clickhouse.go +++ b/flow/e2e/clickhouse/clickhouse.go @@ -56,6 +56,11 @@ func (s ClickHouseSuite) Peer() *protos.Peer { } func (s ClickHouseSuite) PeerForDatabase(dbname string) *protos.Peer { + region := "" + if s.s3Helper.S3Config.Region != nil { + region = *s.s3Helper.S3Config.Region + } + ret := &protos.Peer{ Name: e2e.AddSuffix(s, dbname), Type: protos.DBType_CLICKHOUSE, @@ -67,7 +72,7 @@ func (s ClickHouseSuite) PeerForDatabase(dbname string) *protos.Peer { S3Path: s.s3Helper.BucketName, AccessKeyId: *s.s3Helper.S3Config.AccessKeyId, SecretAccessKey: *s.s3Helper.S3Config.SecretAccessKey, - Region: *s.s3Helper.S3Config.Region, + Region: region, DisableTls: true, Endpoint: s.s3Helper.S3Config.Endpoint, }, @@ -87,7 +92,7 @@ func (s ClickHouseSuite) Teardown() { } func (s ClickHouseSuite) GetRows(table string, cols string) (*model.QRecordBatch, error) { - ch, err := connclickhouse.Connect(context.Background(), s.Peer().GetClickhouseConfig()) + ch, err := connclickhouse.Connect(context.Background(), nil, s.Peer().GetClickhouseConfig()) if err != nil { return nil, err } @@ -188,7 +193,7 @@ func SetupSuite(t *testing.T) ClickHouseSuite { conn, err := e2e.SetupPostgres(t, suffix) require.NoError(t, err, "failed to setup postgres") - s3Helper, err := e2e_s3.NewS3TestHelper(false) + s3Helper, err := e2e_s3.NewS3TestHelper(e2e_s3.Minio) require.NoError(t, err, "failed to setup S3") s := ClickHouseSuite{ @@ -198,7 +203,7 @@ func SetupSuite(t *testing.T) ClickHouseSuite { s3Helper: s3Helper, } - ch, err := connclickhouse.Connect(context.Background(), s.PeerForDatabase("default").GetClickhouseConfig()) + ch, err := connclickhouse.Connect(context.Background(), nil, s.PeerForDatabase("default").GetClickhouseConfig()) require.NoError(t, err, "failed to connect to clickhouse") err = ch.Exec(context.Background(), "CREATE DATABASE e2e_test_"+suffix) require.NoError(t, err, "failed to create clickhouse database") diff --git a/flow/e2e/clickhouse/peer_flow_ch_test.go b/flow/e2e/clickhouse/peer_flow_ch_test.go index 3cf1f9759..9c4fa2a16 100644 --- a/flow/e2e/clickhouse/peer_flow_ch_test.go +++ b/flow/e2e/clickhouse/peer_flow_ch_test.go @@ -11,6 +11,7 @@ import ( "github.com/shopspring/decimal" "github.com/stretchr/testify/require" + "github.com/PeerDB-io/peer-flow/connectors/clickhouse" "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/e2eshared" "github.com/PeerDB-io/peer-flow/generated/protos" @@ -460,11 +461,10 @@ func (s ClickHouseSuite) Test_Replident_Full_Unchanged_TOAST_Updates() { e2e.RequireEnvCanceled(s.t, env) } -// Replicate a table called "table" and a column with hyphen in it -func (s ClickHouseSuite) Test_Weird_Table_And_Column() { - srcTableName := "table" - srcFullName := s.attachSchemaSuffix("\"table\"") - dstTableName := "table" +func (s ClickHouseSuite) WeirdTable(tableName string) { + srcTableName := tableName + srcFullName := s.attachSchemaSuffix(fmt.Sprintf("\"%s\"", tableName)) + dstTableName := tableName _, err := s.Conn().Exec(context.Background(), fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %s ( @@ -474,14 +474,12 @@ func (s ClickHouseSuite) Test_Weird_Table_And_Column() { `, srcFullName)) require.NoError(s.t, err) - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s (key) VALUES ('init'); - `, srcFullName)) + _, err = s.Conn().Exec(context.Background(), fmt.Sprintf("INSERT INTO %s (key) VALUES ('init')", srcFullName)) require.NoError(s.t, err) connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("clickhouse_test_weird_table_and_column"), - TableNameMapping: map[string]string{s.attachSchemaSuffix("table"): dstTableName}, + FlowJobName: s.attachSuffix("clickhouse_test_weird_table_" + strings.ReplaceAll(tableName, "-", "_")), + TableNameMapping: map[string]string{s.attachSchemaSuffix(tableName): dstTableName}, Destination: s.Peer().Name, } flowConnConfig := connectionGen.GenerateFlowConnectionConfigs(s.t) @@ -492,15 +490,57 @@ func (s ClickHouseSuite) Test_Weird_Table_And_Column() { e2e.EnvWaitForEqualTablesWithNames(env, s, "waiting on initial", srcTableName, dstTableName, "id,key") - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s (key) VALUES ('cdc'); - `, srcFullName)) + _, err = s.Conn().Exec(context.Background(), fmt.Sprintf("INSERT INTO %s (key) VALUES ('cdc')", srcFullName)) require.NoError(s.t, err) e2e.EnvWaitForEqualTablesWithNames(env, s, "waiting on cdc", srcTableName, dstTableName, "id,key") env.Cancel() e2e.RequireEnvCanceled(s.t, env) + + env = e2e.ExecuteWorkflow(tc, shared.PeerFlowTaskQueue, peerflow.DropFlowWorkflow, &protos.DropFlowInput{ + FlowJobName: flowConnConfig.FlowJobName, + DropFlowStats: false, + FlowConnectionConfigs: flowConnConfig, + }) + e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) + // now test weird names with rename based resync + ch, err := connclickhouse.Connect(context.Background(), nil, s.Peer().GetClickhouseConfig()) + require.NoError(s.t, err) + require.NoError(s.t, ch.Exec(context.Background(), fmt.Sprintf("DROP TABLE `%s`", dstTableName))) + require.NoError(s.t, ch.Close()) + flowConnConfig.Resync = true + env = e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) + e2e.SetupCDCFlowStatusQuery(s.t, env, flowConnConfig) + e2e.EnvWaitForEqualTablesWithNames(env, s, "waiting on initial", srcTableName, dstTableName, "id,key") + env.Cancel() + e2e.RequireEnvCanceled(s.t, env) + + env = e2e.ExecuteWorkflow(tc, shared.PeerFlowTaskQueue, peerflow.DropFlowWorkflow, &protos.DropFlowInput{ + FlowJobName: flowConnConfig.FlowJobName, + DropFlowStats: false, + FlowConnectionConfigs: flowConnConfig, + }) + e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) + // now test weird names with exchange based resync + ch, err = connclickhouse.Connect(context.Background(), nil, s.Peer().GetClickhouseConfig()) + require.NoError(s.t, err) + require.NoError(s.t, ch.Exec(context.Background(), fmt.Sprintf("TRUNCATE TABLE `%s`", dstTableName))) + require.NoError(s.t, ch.Close()) + env = e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) + e2e.SetupCDCFlowStatusQuery(s.t, env, flowConnConfig) + e2e.EnvWaitForEqualTablesWithNames(env, s, "waiting on initial", srcTableName, dstTableName, "id,key") + env.Cancel() + e2e.RequireEnvCanceled(s.t, env) +} + +func (s ClickHouseSuite) Test_WeirdTable_Keyword() { + s.WeirdTable("table") +} + +func (s ClickHouseSuite) Test_WeirdTable_Dash() { + s.t.SkipNow() // TODO fix avro errors by sanitizing names + s.WeirdTable("table-group") } // large NUMERICs (precision >76) are mapped to String on CH, test diff --git a/flow/e2e/congen.go b/flow/e2e/congen.go index bcb0bf48f..91c5817d4 100644 --- a/flow/e2e/congen.go +++ b/flow/e2e/congen.go @@ -18,18 +18,16 @@ import ( func cleanPostgres(conn *pgx.Conn, suffix string) error { // drop the e2e_test schema with the given suffix if it exists - _, err := conn.Exec(context.Background(), fmt.Sprintf("DROP SCHEMA IF EXISTS e2e_test_%s CASCADE", suffix)) - if err != nil { + if _, err := conn.Exec(context.Background(), fmt.Sprintf("DROP SCHEMA IF EXISTS e2e_test_%s CASCADE", suffix)); err != nil { return fmt.Errorf("failed to drop e2e_test schema: %w", err) } // drop all open slots with the given suffix - _, err = conn.Exec( + if _, err := conn.Exec( context.Background(), "SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name LIKE $1", "%_"+suffix, - ) - if err != nil { + ); err != nil { return fmt.Errorf("failed to drop replication slots: %w", err) } @@ -47,8 +45,7 @@ func cleanPostgres(conn *pgx.Conn, suffix string) error { } for _, pubName := range publications { - _, err = conn.Exec(context.Background(), "DROP PUBLICATION "+pubName) - if err != nil { + if _, err := conn.Exec(context.Background(), "DROP PUBLICATION "+pubName); err != nil { return fmt.Errorf("failed to drop publication %s: %w", pubName, err) } } @@ -65,8 +62,7 @@ func setupPostgresSchema(t *testing.T, conn *pgx.Conn, suffix string) error { } // create an e2e_test schema - _, err = setupTx.Exec(context.Background(), "SELECT pg_advisory_xact_lock(hashtext('Megaton Mile'))") - if err != nil { + if _, err := setupTx.Exec(context.Background(), "SELECT pg_advisory_xact_lock(hashtext('Megaton Mile'))"); err != nil { return fmt.Errorf("failed to get lock: %w", err) } defer func() { @@ -77,12 +73,11 @@ func setupPostgresSchema(t *testing.T, conn *pgx.Conn, suffix string) error { }() // create an e2e_test schema - _, err = setupTx.Exec(context.Background(), "CREATE SCHEMA e2e_test_"+suffix) - if err != nil { + if _, err := setupTx.Exec(context.Background(), "CREATE SCHEMA e2e_test_"+suffix); err != nil { return fmt.Errorf("failed to create e2e_test schema: %w", err) } - _, err = setupTx.Exec(context.Background(), ` + if _, err := setupTx.Exec(context.Background(), ` CREATE OR REPLACE FUNCTION random_string( int ) RETURNS TEXT as $$ SELECT string_agg(substring('0123456789bcdfghjkmnpqrstvwxyz', round(random() * 30)::integer, 1), '') FROM generate_series(1, $1); @@ -95,8 +90,7 @@ func setupPostgresSchema(t *testing.T, conn *pgx.Conn, suffix string) error { LANGUAGE 'sql' VOLATILE SET search_path = 'pg_catalog'; - `) - if err != nil { + `); err != nil { return fmt.Errorf("failed to create utility functions: %w", err) } diff --git a/flow/e2e/postgres/postgres.go b/flow/e2e/postgres/postgres.go index 37a0ace06..ea43648f7 100644 --- a/flow/e2e/postgres/postgres.go +++ b/flow/e2e/postgres/postgres.go @@ -54,7 +54,10 @@ func (s PeerFlowE2ETestSuitePG) DestinationTable(table string) string { func (s PeerFlowE2ETestSuitePG) GetRows(table string, cols string) (*model.QRecordBatch, error) { s.t.Helper() - pgQueryExecutor := s.conn.NewQRepQueryExecutor("testflow", "testpart") + pgQueryExecutor, err := s.conn.NewQRepQueryExecutor(context.Background(), "testflow", "testpart") + if err != nil { + return nil, err + } return pgQueryExecutor.ExecuteAndProcessQuery( context.Background(), diff --git a/flow/e2e/s3/qrep_flow_s3_test.go b/flow/e2e/s3/qrep_flow_s3_test.go index 814871514..c52fca7a3 100644 --- a/flow/e2e/s3/qrep_flow_s3_test.go +++ b/flow/e2e/s3/qrep_flow_s3_test.go @@ -50,6 +50,7 @@ func (s PeerFlowE2ETestSuiteS3) Peer() *protos.Peer { } func TestPeerFlowE2ETestSuiteS3(t *testing.T) { + t.Skip("skipping AWS, CI credentials revoked") // TODO fix CI e2eshared.RunSuite(t, SetupSuiteS3) } @@ -57,14 +58,16 @@ func TestPeerFlowE2ETestSuiteGCS(t *testing.T) { e2eshared.RunSuite(t, SetupSuiteGCS) } +func TestPeerFlowE2ETestSuiteMinIO(t *testing.T) { + e2eshared.RunSuite(t, SetupSuiteMinIO) +} + func (s PeerFlowE2ETestSuiteS3) setupSourceTable(tableName string, rowCount int) { - err := e2e.CreateTableForQRep(s.conn.Conn(), s.suffix, tableName) - require.NoError(s.t, err) - err = e2e.PopulateSourceTable(s.conn.Conn(), s.suffix, tableName, rowCount) - require.NoError(s.t, err) + require.NoError(s.t, e2e.CreateTableForQRep(s.conn.Conn(), s.suffix, tableName)) + require.NoError(s.t, e2e.PopulateSourceTable(s.conn.Conn(), s.suffix, tableName, rowCount)) } -func setupSuite(t *testing.T, gcs bool) PeerFlowE2ETestSuiteS3 { +func setupSuite(t *testing.T, s3environment S3Environment) PeerFlowE2ETestSuiteS3 { t.Helper() suffix := "s3_" + strings.ToLower(shared.RandomString(8)) @@ -73,7 +76,7 @@ func setupSuite(t *testing.T, gcs bool) PeerFlowE2ETestSuiteS3 { require.Fail(t, "failed to setup postgres", err) } - helper, err := NewS3TestHelper(gcs) + helper, err := NewS3TestHelper(s3environment) if err != nil { require.Fail(t, "failed to setup S3", err) } @@ -97,12 +100,17 @@ func (s PeerFlowE2ETestSuiteS3) Teardown() { func SetupSuiteS3(t *testing.T) PeerFlowE2ETestSuiteS3 { t.Helper() - return setupSuite(t, false) + return setupSuite(t, Aws) } func SetupSuiteGCS(t *testing.T) PeerFlowE2ETestSuiteS3 { t.Helper() - return setupSuite(t, true) + return setupSuite(t, Gcs) +} + +func SetupSuiteMinIO(t *testing.T) PeerFlowE2ETestSuiteS3 { + t.Helper() + return setupSuite(t, Minio) } func (s PeerFlowE2ETestSuiteS3) Test_Complete_QRep_Flow_S3() { diff --git a/flow/e2e/s3/s3_helper.go b/flow/e2e/s3/s3_helper.go index 20ac3e903..af6be64f9 100644 --- a/flow/e2e/s3/s3_helper.go +++ b/flow/e2e/s3/s3_helper.go @@ -24,28 +24,48 @@ type S3TestHelper struct { prefix string } -func NewS3TestHelper(switchToGCS bool) (*S3TestHelper, error) { - credsPath := os.Getenv("TEST_S3_CREDS") - bucketName := "peerdb-test-bucket" - if switchToGCS { +type S3Environment int + +const ( + Aws S3Environment = iota + Gcs + Minio +) + +func NewS3TestHelper(s3environment S3Environment) (*S3TestHelper, error) { + var config utils.S3PeerCredentials + var endpoint string + var credsPath string + var bucketName string + switch s3environment { + case Aws: + credsPath = os.Getenv("TEST_S3_CREDS") + bucketName = "peerdb-test-bucket" + case Gcs: credsPath = os.Getenv("TEST_GCS_CREDS") bucketName = "peerdb_staging" + endpoint = "https://storage.googleapis.com" + case Minio: + bucketName = "peerdb" + endpoint = os.Getenv("AWS_ENDPOINT_URL_S3") + config.AccessKeyID = os.Getenv("AWS_ACCESS_KEY_ID") + config.SecretAccessKey = os.Getenv("AWS_SECRET_ACCESS_KEY") + config.Region = os.Getenv("AWS_REGION") + default: + panic(fmt.Sprintf("invalid s3environment %d", s3environment)) } - content, err := e2eshared.ReadFileToBytes(credsPath) - if err != nil { - return nil, fmt.Errorf("failed to read file: %w", err) - } + if credsPath != "" { + content, err := e2eshared.ReadFileToBytes(credsPath) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } - var config utils.S3PeerCredentials - err = json.Unmarshal(content, &config) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal json: %w", err) - } - endpoint := "" - if switchToGCS { - endpoint = "https://storage.googleapis.com" + if err := json.Unmarshal(content, &config); err != nil { + return nil, fmt.Errorf("failed to unmarshal json: %w", err) + } } + var endpointUrlPtr *string if endpoint != "" { endpointUrlPtr = &endpoint @@ -62,6 +82,7 @@ func NewS3TestHelper(switchToGCS bool) (*S3TestHelper, error) { if err != nil { return nil, err } + prefix := fmt.Sprintf("peerdb_test/%d_%s", time.Now().Unix(), shared.RandomString(6)) return &S3TestHelper{ client, @@ -106,13 +127,10 @@ func (h *S3TestHelper) CleanUp(ctx context.Context) error { // Delete each object for _, obj := range files.Contents { - deleteInput := &s3.DeleteObjectInput{ + if _, err := h.client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: &h.BucketName, Key: obj.Key, - } - - _, err := h.client.DeleteObject(ctx, deleteInput) - if err != nil { + }); err != nil { return err } } diff --git a/flow/e2e/snowflake/qrep_flow_sf_test.go b/flow/e2e/snowflake/qrep_flow_sf_test.go index 7fed8ada0..0f86ce767 100644 --- a/flow/e2e/snowflake/qrep_flow_sf_test.go +++ b/flow/e2e/snowflake/qrep_flow_sf_test.go @@ -123,6 +123,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_Upsert_Simple() } func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3() { + s.t.Skip("aws s3 broken in ci") // TODO fix tc := e2e.NewTemporalClient(s.t) numRows := 10 @@ -199,6 +200,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_Upsert_XMIN() { } func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3_Integration() { + s.t.Skip("aws s3 broken in ci") // TODO fix tc := e2e.NewTemporalClient(s.t) numRows := 10 diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index 9dadc4985..7fb3f857d 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -89,7 +89,10 @@ func EnvTrue(t *testing.T, env WorkflowRun, val bool) { } func GetPgRows(conn *connpostgres.PostgresConnector, suffix string, table string, cols string) (*model.QRecordBatch, error) { - pgQueryExecutor := conn.NewQRepQueryExecutor("testflow", "testpart") + pgQueryExecutor, err := conn.NewQRepQueryExecutor(context.Background(), "testflow", "testpart") + if err != nil { + return nil, err + } return pgQueryExecutor.ExecuteAndProcessQuery( context.Background(), @@ -195,10 +198,9 @@ func EnvWaitForCount( func RequireEnvCanceled(t *testing.T, env WorkflowRun) { t.Helper() EnvWaitForFinished(t, env, time.Minute) - err := env.Error() var panicErr *temporal.PanicError var canceledErr *temporal.CanceledError - if err == nil { + if err := env.Error(); err == nil { t.Fatal("Expected workflow to be canceled, not completed") } else if errors.As(err, &panicErr) { t.Fatalf("Workflow panic: %s %s", panicErr.Error(), panicErr.StackTrace()) @@ -217,10 +219,9 @@ func SetupCDCFlowStatusQuery(t *testing.T, env WorkflowRun, config *protos.FlowC response, err := env.Query(shared.FlowStatusQuery, config.FlowJobName) if err == nil { var status protos.FlowStatus - err = response.Get(&status) - if err != nil { + if err := response.Get(&status); err != nil { t.Fatal(err) - } else if status == protos.FlowStatus_STATUS_RUNNING { + } else if status == protos.FlowStatus_STATUS_RUNNING || status == protos.FlowStatus_STATUS_COMPLETED { return } else if counter > 30 { env.Cancel() diff --git a/flow/go.mod b/flow/go.mod index e24ffa9fb..a11ffb5a7 100644 --- a/flow/go.mod +++ b/flow/go.mod @@ -4,9 +4,9 @@ go 1.23.1 require ( cloud.google.com/go v0.116.0 - cloud.google.com/go/bigquery v1.63.1 + cloud.google.com/go/bigquery v1.64.0 cloud.google.com/go/pubsub v1.45.1 - cloud.google.com/go/storage v1.45.0 + cloud.google.com/go/storage v1.46.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.3 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.3.0 @@ -17,20 +17,20 @@ require ( github.com/PeerDB-io/gluajson v1.0.2 github.com/PeerDB-io/gluamsgpack v1.0.4 github.com/PeerDB-io/gluautf8 v1.0.0 - github.com/aws/aws-sdk-go-v2 v1.32.2 - github.com/aws/aws-sdk-go-v2/config v1.28.0 - github.com/aws/aws-sdk-go-v2/credentials v1.17.41 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34 - github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 - github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1 - github.com/aws/aws-sdk-go-v2/service/ses v1.28.2 - github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 + github.com/aws/aws-sdk-go-v2 v1.32.3 + github.com/aws/aws-sdk-go-v2/config v1.28.1 + github.com/aws/aws-sdk-go-v2/credentials v1.17.42 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 + github.com/aws/aws-sdk-go-v2/service/kms v1.37.3 + github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 + github.com/aws/aws-sdk-go-v2/service/ses v1.28.3 + github.com/aws/aws-sdk-go-v2/service/sns v1.33.3 github.com/aws/smithy-go v1.22.0 github.com/cockroachdb/pebble v1.1.2 github.com/elastic/go-elasticsearch/v8 v8.15.0 github.com/google/uuid v1.6.0 github.com/grafana/pyroscope-go v1.2.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 github.com/jackc/pglogrepl v0.0.0-20240307033717-828fbfe908e9 github.com/jackc/pgx/v5 v5.7.1 @@ -44,56 +44,59 @@ require ( github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/shopspring/decimal v1.4.0 github.com/slack-go/slack v0.15.0 - github.com/snowflakedb/gosnowflake v1.11.2 + github.com/snowflakedb/gosnowflake v1.12.0 github.com/stretchr/testify v1.9.0 github.com/twmb/franz-go v1.18.0 github.com/twmb/franz-go/pkg/kadm v1.14.0 github.com/twmb/franz-go/plugin/kslog v1.0.0 github.com/twpayne/go-geos v0.19.0 - github.com/urfave/cli/v3 v3.0.0-alpha9.1 + github.com/urfave/cli/v3 v3.0.0-alpha9.2 github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 github.com/yuin/gopher-lua v1.1.1 - go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 - go.opentelemetry.io/otel/metric v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/metric v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 go.opentelemetry.io/otel/sdk/metric v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 - go.temporal.io/api v1.40.0 - go.temporal.io/sdk v1.29.1 + go.opentelemetry.io/otel/trace v1.32.0 + go.temporal.io/api v1.41.0 + go.temporal.io/sdk v1.30.0 go.temporal.io/sdk/contrib/opentelemetry v0.6.0 go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.28.0 - golang.org/x/sync v0.8.0 - google.golang.org/api v0.203.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 + golang.org/x/sync v0.9.0 + google.golang.org/api v0.204.0 + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.35.1 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 ) require ( cel.dev/expr v0.18.0 // indirect - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/monitoring v1.21.2 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/ClickHouse/ch-go v0.63.1 // indirect github.com/DataDog/zstd v1.5.6 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/apache/arrow/go/v15 v15.0.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect @@ -108,18 +111,29 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.6 // indirect github.com/getsentry/sentry-go v0.29.1 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lestrrat-go/blackmagic v1.0.2 // indirect @@ -127,9 +141,12 @@ require ( github.com/lestrrat-go/httprc v1.0.6 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect github.com/lestrrat-go/option v1.0.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nexus-rpc/sdk-go v0.0.10 // indirect + github.com/nexus-rpc/sdk-go v0.0.11 // indirect github.com/paulmach/orb v0.11.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect @@ -141,13 +158,23 @@ require ( github.com/segmentio/asm v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/twmb/franz-go/pkg/kmsg v1.9.0 // indirect + github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/term v0.25.0 // indirect - google.golang.org/grpc/stats/opentelemetry v0.0.0-20241025232817-cb329375b14e // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/api v0.31.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) require ( @@ -157,17 +184,17 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 // indirect github.com/Azure/go-amqp v1.2.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.0 // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/djherbis/buffer v1.2.0 github.com/djherbis/nio/v3 v3.0.1 github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect @@ -195,7 +222,7 @@ require ( github.com/pborman/uuid v1.2.1 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect @@ -203,12 +230,12 @@ require ( golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/flow/go.sum b/flow/go.sum index 8f783af56..7a0380da0 100644 --- a/flow/go.sum +++ b/flow/go.sum @@ -3,32 +3,32 @@ cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/bigquery v1.63.1 h1:/6syiWrSpardKNxdvldS5CUTRJX1iIkSPXCjLjiGL+g= -cloud.google.com/go/bigquery v1.63.1/go.mod h1:ufaITfroCk17WTqBhMpi8CRjsfHjMX07pDrQaRKKX2o= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/bigquery v1.64.0 h1:vSSZisNyhr2ioJE1OuYBQrnrpB7pIhRQm4jfjc7E/js= +cloud.google.com/go/bigquery v1.64.0/go.mod h1:gy8Ooz6HF7QmA+TRtX8tZmXBKH5mCFBwUApGAb3zI7Y= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= -cloud.google.com/go/datacatalog v1.22.1 h1:i0DyKb/o7j+0vgaFtimcRFjYsD6wFw1jpnODYUyiYRs= -cloud.google.com/go/datacatalog v1.22.1/go.mod h1:MscnJl9B2lpYlFoxRjicw19kFTwEke8ReKL5Y/6TWg8= +cloud.google.com/go/datacatalog v1.22.2 h1:9Bi8YO+WBE0YSSQL1tX62Gy/KcdNGLufyVlEJ0eYMrc= +cloud.google.com/go/datacatalog v1.22.2/go.mod h1:9Wamq8TDfL2680Sav7q3zEhBJSPBrDxJU8WtPJ25dBM= cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= -cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY= -cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM= +cloud.google.com/go/kms v1.20.1 h1:og29Wv59uf2FVaZlesaiDAqHFzHaoUyHI3HYp9VUHVg= +cloud.google.com/go/kms v1.20.1/go.mod h1:LywpNiVCvzYNJWS9JUcGJSVTNSwPwi0vBAotzDqn2nc= cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= -cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= -cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/longrunning v0.6.2 h1:xjDfh1pQcWPEvnfjZmwjKQEcHnpz6lHjfy7Fo0MK+hc= +cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= cloud.google.com/go/pubsub v1.45.1 h1:ZC/UzYcrmK12THWn1P72z+Pnp2vu/zCZRXyhAfP1hJY= cloud.google.com/go/pubsub v1.45.1/go.mod h1:3bn7fTmzZFwaUjllitv1WlsNMkqBgGUb3UdMhI54eCc= -cloud.google.com/go/storage v1.45.0 h1:5av0QcIVj77t+44mV4gffFC/LscFRUhto6UBMB5SimM= -cloud.google.com/go/storage v1.45.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE= -cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= -cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= +cloud.google.com/go/storage v1.46.0 h1:OTXISBpFd8KaA2ClT3K3oRk8UGOcTHtrZ1bW88xKiic= +cloud.google.com/go/storage v1.46.0/go.mod h1:lM+gMAW91EfXIeMTBmixRsKL/XCxysytoAgduVikjMk= +cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI= +cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= @@ -63,23 +63,25 @@ github.com/Azure/go-amqp v1.2.0 h1:NNyfN3/cRszWzMvjmm64yaPZDHX/2DJkowv8Ub9y01I= github.com/Azure/go-amqp v1.2.0/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.0 h1:YjxrAyf/5z9yK0ecQsKjgSdaC4FjXUbwlgxLz05E3YY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/ClickHouse/ch-go v0.63.1 h1:s2JyZvWLTCSAGdtjMBBmAgQQHMco6pawLJMOXi0FODM= github.com/ClickHouse/ch-go v0.63.1/go.mod h1:I1kJJCL3WJcBMGe1m+HVK0+nREaG+JOYYBWjrDrF3R0= github.com/ClickHouse/clickhouse-go/v2 v2.30.0 h1:AG4D/hW39qa58+JHQIFOSnxyL46H6h2lrmGGk17dhFo= github.com/ClickHouse/clickhouse-go/v2 v2.30.0/go.mod h1:i9ZQAojcayW3RsdCb3YR+n+wC2h65eJsZCscZ1Z1wyo= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 h1:cb3br57K508pQEFgBxn9GDhPS9HefpyMPK1RzmtMNzk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3 h1:xir5X8TS8UBVPWg2jHL+cSTf0jZgqYQSA54TscSt1/0= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3/go.mod h1:SsdWig2J5PMnfMvfJuEb1uZa8Y+kvNyvrULFo69gTFk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.3 h1:Nl7phYyHjnqofWDpD+6FYdiwtNIxebn0AHLry7Sxb0M= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.3/go.mod h1:pNP/L2wDlaQnQlFvkDKGSruDoYRpmAxB6drgsskfYwg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3 h1:2vcVkrNdSMJpoOVAWi9ApsQR5iqNeFGt5Qx8Xlt3IoI= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 h1:o90wcURuxekmXrtxmYWTyNla0+ZEHhud6DI1ZTxd1vI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0/go.mod h1:6fTWu4m3jocfUZLYF5KsZC1TUfRvEjs7lM4crme/irw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0 h1:jJKWl98inONJAr/IZrdFQUWcwUO95DLY1XMD1ZIut+g= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0/go.mod h1:l2fIqmwB+FKSfvn3bAD/0i+AXAxhIZjTK2svT/mgUXs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 h1:GYUJLfvd++4DMuMhCFLgLXvFwofIxh/qOwoGuS/LTew= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/PeerDB-io/glua64 v1.0.1 h1:biXLlFF/L5pnJCwDon7hkWkuQPozC8NjKS3J7Wzi69I= @@ -102,48 +104,48 @@ github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7X github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE= github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= -github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= -github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk= +github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA= -github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= -github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34 h1:os83HS/WfOwi1LsZWLCSHTyj+whvPGaxUsq/D1Ol2Q0= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34/go.mod h1:tG0BaDCAweumHRsOHm72tuPgAfRLASQThgthWYeTyV8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= +github.com/aws/aws-sdk-go-v2/config v1.28.1 h1:oxIvOUXy8x0U3fR//0eq+RdCKimWI900+SV+10xsCBw= +github.com/aws/aws-sdk-go-v2/config v1.28.1/go.mod h1:bRQcttQJiARbd5JZxw6wG0yIK3eLeSCPdg6uqmmlIiI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42 h1:sBP0RPjBU4neGpIYyx8mkU2QqLPl5u9cmdTWVzIpHkM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42/go.mod h1:FwZBfU530dJ26rv9saAbxa9Ej3eF/AK0OAY86k13n4M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 h1:68jFVtt3NulEzojFesM/WVarlFpCaXLKaBxDpzkQ9OQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18/go.mod h1:Fjnn5jQVIo6VyedMc0/EhPpfNlPl7dHV916O6B+49aE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 h1:ihPPdcCVSN0IvBByXwqVp28/l4VosBZ6sDulcvU2J7w= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35/go.mod h1:JkgEhs3SVF51Dj3m1Bj+yL8IznpxzkwlA3jLg3x7Kls= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 h1:yV+hCAHZZYJQcwAaszoBNwLbPItHvApxT0kVIw6jRgs= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22/go.mod h1:kbR1TL8llqB1eGnVbybcA4/wgScxdylOdyAd51yxPdw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 h1:4FMHqLfk0efmTqhXVRL5xYRqlEBNBiRI7N6w4jsEdd4= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2/go.mod h1:LWoqeWlK9OZeJxsROW2RqrSPvQHKTpp69r/iDjwsSaw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 h1:t7iUP9+4wdc5lt3E41huP+GvQZJD38WLsgVp4iOtAjg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2/go.mod h1:/niFCtmuQNxqx9v8WAPq5qh7EH25U4BF6tjoyq9bObM= -github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 h1:tfBABi5R6aSZlhgTWHxL+opYUDOnIGoNcJLwVYv0jLM= -github.com/aws/aws-sdk-go-v2/service/kms v1.37.2/go.mod h1:dZYFcQwuoh+cLOlFnZItijZptmyDhRIkOKWFO1CfzV8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1 h1:MkQ4unegQEStiQYmfFj+Aq5uTp265ncSmm0XTQwDwi0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1/go.mod h1:cB6oAuus7YXRZhWCc1wIwPywwZ1XwweNp2TVAEGYeB8= -github.com/aws/aws-sdk-go-v2/service/ses v1.28.2 h1:FtmzF/j5v++pa0tuuE0wwvWckHzad+vl/Dy5as0Ateo= -github.com/aws/aws-sdk-go-v2/service/ses v1.28.2/go.mod h1:bSPQlnLDUiQy7XxmKqTBsCVkYrLfnYJbEyAmm/gWcaI= -github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 h1:GeVRrB1aJsGdXxdPY6VOv0SWs+pfdeDlKgiBxi0+V6I= -github.com/aws/aws-sdk-go-v2/service/sns v1.33.2/go.mod h1:c6Sj8zleZXYs4nyU3gpDKTzPWu7+t30YUXoLYRpbUvU= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 h1:kT6BcZsmMtNkP/iYMcRG+mIEA/IbeiUimXtGmqF39y0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3/go.mod h1:Z8uGua2k4PPaGOYn66pK02rhMrot3Xk3tpBuUFPomZU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlgWH3hpPUnd6U0ikcl6LLA9sLkXE2w1fpMvY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.3 h1:VpyBA6KP6JgzwokQps8ArQPGy9rFej8adwuuQGcduH8= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.3/go.mod h1:TT/9V4PcmSPpd8LPUNJ8hBHJmpqcfhx6MrbWTkvyR+4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE= +github.com/aws/aws-sdk-go-v2/service/ses v1.28.3 h1:/ioiCUft5rJL1ufmmVq3Qzg198wgzj/NRm6eHFthEAc= +github.com/aws/aws-sdk-go-v2/service/ses v1.28.3/go.mod h1:IbgnKNoFmzljAdSbce2P4cNT6pLmrdFI+q02Q2PCM7o= +github.com/aws/aws-sdk-go-v2/service/sns v1.33.3 h1:coZW/SqpINT0VWG8vRWWY9TWUof8TDdxublw2Xur0Zc= +github.com/aws/aws-sdk-go-v2/service/sns v1.33.3/go.mod h1:J/G2xuhwNBlDvEi0WR/bnBbac4KSgpkERna/IXEF52w= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 h1:UTpsIf0loCIWEbrqdLb+0RxnTXfWh2vhw4nQmFi4nPc= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3/go.mod h1:FZ9j3PFHHAR+w0BSEjK955w5YD2UwB/l/H0yAK3MJvI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 h1:2YCmIXv3tmiItw0LlYf6v7gEHebLY45kBEnPezbUKyU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3/go.mod h1:u19stRyNPxGhj6dRm+Cdgu6N75qnbW7+QN0q0dsAk58= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 h1:wVnQ6tigGsRqSWDEEyH6lSAJ9OyFUsSnbaUWChuSGzs= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3/go.mod h1:VZa9yTFyj4o10YGsmDO4gbQJUvvhY72fhumT8W4LqsE= github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -178,8 +180,9 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= @@ -195,6 +198,8 @@ github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHo github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/go-elasticsearch/v8 v8.15.0 h1:IZyJhe7t7WI3NEFdcHnf6IJXqpRf+8S8QWLtZYYyBYk= github.com/elastic/go-elasticsearch/v8 v8.15.0/go.mod h1:HCON3zj4btpqs2N1jjsAy4a/fiAul+YBP00mBH4xik8= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -209,6 +214,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= github.com/getsentry/sentry-go v0.29.1 h1:DyZuChN8Hz3ARxGVV8ePaNXh1dQ7d76AiB117xcREwA= @@ -226,9 +233,18 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= @@ -268,6 +284,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -277,10 +295,16 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -300,8 +324,8 @@ github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKt github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -328,6 +352,10 @@ github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -339,6 +367,7 @@ github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -363,18 +392,29 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linkedin/goavro/v2 v2.13.0 h1:L8eI8GcuciwUkt41Ej62joSZS4kKaYIUdze+6for9NU= github.com/linkedin/goavro/v2 v2.13.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA= github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nexus-rpc/sdk-go v0.0.10 h1:7jEPUlsghxoD4OJ2H8YbFJ1t4wbxsUef7yZgBfyY3uA= -github.com/nexus-rpc/sdk-go v0.0.10/go.mod h1:TpfkM2Cw0Rlk9drGkoiSMpFqflKTiQLWUNyKJjF8mKQ= +github.com/nexus-rpc/sdk-go v0.0.11 h1:qH3Us3spfp50t5ca775V1va2eE6z1zMQDZY4mvbw0CI= +github.com/nexus-rpc/sdk-go v0.0.11/go.mod h1:TpfkM2Cw0Rlk9drGkoiSMpFqflKTiQLWUNyKJjF8mKQ= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= @@ -395,8 +435,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= @@ -424,8 +465,10 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/slack-go/slack v0.15.0 h1:LE2lj2y9vqqiOf+qIIy0GvEoxgF1N5yLGZffmEZykt0= github.com/slack-go/slack v0.15.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= -github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= -github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= +github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= +github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -454,8 +497,10 @@ github.com/twmb/franz-go/plugin/kslog v1.0.0 h1:I64oEmF+0PDvmyLgwrlOtg4mfpSE9Gwl github.com/twmb/franz-go/plugin/kslog v1.0.0/go.mod h1:8pMjK3OJJJNNYddBSbnXZkIK5dCKFIk9GcVVCDgvnQc= github.com/twpayne/go-geos v0.19.0 h1:V7vnLe7gY7JOHLTg8+2oykZOw6wpBLHVNlcnzS2FlG0= github.com/twpayne/go-geos v0.19.0/go.mod h1:XGpUjCtZf4Ul6BMii6KA4EmJ9JCNhVP1mohdoReopZ8= -github.com/urfave/cli/v3 v3.0.0-alpha9.1 h1:1fJU+bltkwN8lF4Sni/X0i1d8XwPIrS82ivZ8qsp/q4= -github.com/urfave/cli/v3 v3.0.0-alpha9.1/go.mod h1:FnIeEMYu+ko8zP1F9Ypr3xkZMIDqW3DR92yUtY39q1Y= +github.com/urfave/cli/v3 v3.0.0-alpha9.2 h1:CL8llQj3dGRLVQQzHxS+ZYRLanOuhyK1fXgLKD+qV+Y= +github.com/urfave/cli/v3 v3.0.0-alpha9.2/go.mod h1:FnIeEMYu+ko8zP1F9Ypr3xkZMIDqW3DR92yUtY39q1Y= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= @@ -484,32 +529,32 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.temporal.io/api v1.40.0 h1:rH3HvUUCFr0oecQTBW5tI6DdDQsX2Xb6OFVgt/bvLto= -go.temporal.io/api v1.40.0/go.mod h1:1WwYUMo6lao8yl0371xWUm13paHExN5ATYT/B7QtFis= -go.temporal.io/sdk v1.29.1 h1:y+sUMbUhTU9rj50mwIZAPmcXCtgUdOWS9xHDYRYSgZ0= -go.temporal.io/sdk v1.29.1/go.mod h1:kp//DRvn3CqQVBCtjL51Oicp9wrZYB2s6row1UgzcKQ= +go.temporal.io/api v1.41.0 h1:VYzyWJjJk1jeB9urntA/t7Hiyo2tHdM5xEdtdib4EO8= +go.temporal.io/api v1.41.0/go.mod h1:1WwYUMo6lao8yl0371xWUm13paHExN5ATYT/B7QtFis= +go.temporal.io/sdk v1.30.0 h1:7jzSFZYk+tQ2kIYEP+dvrM7AW9EsCEP52JHCjVGuwbI= +go.temporal.io/sdk v1.30.0/go.mod h1:Pv45F/fVDgWKx+jhix5t/dGgqROVaI+VjPLd3CHWqq0= go.temporal.io/sdk/contrib/opentelemetry v0.6.0 h1:rNBArDj5iTUkcMwKocUShoAW59o6HdS7Nq4CTp4ldj8= go.temporal.io/sdk/contrib/opentelemetry v0.6.0/go.mod h1:Lem8VrE2ks8P+FYcRM3UphPoBr+tfM3v/Kaf0qStzSg= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -560,8 +605,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -576,8 +621,8 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= @@ -585,8 +630,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -609,20 +654,20 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= -google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= -google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw= -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28 h1:KJjNNclfpIkVqrZlTWcgOOaVQ00LdBnoEaRfkUx760s= +google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:mt9/MofW7AWQ+Gy179ChOnvmJatV8YHUmrcedo9CIFI= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -631,8 +676,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20241025232817-cb329375b14e h1:SoMI+r+Qsp379U9BlVzrHtqAqYP3NEv9vNhYqUaAWOg= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20241025232817-cb329375b14e/go.mod h1:jzYlkSMbKypzuu6xoAEijsNVo9ZeDF1u/zCfFgsx7jg= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3 h1:hUfOButuEtpc0UvYiaYRbNwxVYr0mQQOWq6X8beJ9Gc= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3/go.mod h1:jzYlkSMbKypzuu6xoAEijsNVo9ZeDF1u/zCfFgsx7jg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -651,6 +696,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -661,5 +708,23 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/flow/main.go b/flow/main.go index 4001a8891..9d499e957 100644 --- a/flow/main.go +++ b/flow/main.go @@ -70,6 +70,60 @@ func main() { Sources: cli.EnvVars("TEMPORAL_MAX_CONCURRENT_WORKFLOW_TASKS"), } + maintenanceModeWorkflowFlag := &cli.StringFlag{ + Name: "run-maintenance-flow", + Value: "", + Usage: "Run a maintenance flow. Options are 'start' or 'end'", + Sources: cli.EnvVars("RUN_MAINTENANCE_FLOW"), + } + + maintenanceSkipOnApiVersionMatchFlag := &cli.BoolFlag{ + Name: "skip-on-api-version-match", + Value: false, + Usage: "Skip maintenance flow if the API version matches", + Sources: cli.EnvVars("MAINTENANCE_SKIP_ON_API_VERSION_MATCH"), + } + + maintenanceSkipOnNoMirrorsFlag := &cli.BoolFlag{ + Name: "skip-on-no-mirrors", + Value: false, + Usage: "Skip maintenance flow if there are no mirrors", + Sources: cli.EnvVars("MAINTENANCE_SKIP_ON_NO_MIRRORS"), + } + + flowGrpcAddressFlag := &cli.StringFlag{ + Name: "flow-grpc-address", + Value: "", + Usage: "Address of the flow gRPC server", + Sources: cli.EnvVars("FLOW_GRPC_ADDRESS"), + } + + flowTlsEnabledFlag := &cli.BoolFlag{ + Name: "flow-tls-enabled", + Value: false, + Usage: "Enable TLS for the flow gRPC server", + Sources: cli.EnvVars("FLOW_TLS_ENABLED"), + } + + useMaintenanceTaskQueueFlag := &cli.BoolFlag{ + Name: "use-maintenance-task-queue", + Value: false, + Usage: "Use the maintenance task queue for the worker", + Sources: cli.EnvVars("USE_MAINTENANCE_TASK_QUEUE"), + } + + assumedSkippedMaintenanceWorkflowsFlag := &cli.BoolFlag{ + Name: "assume-skipped-workflow", + Value: false, + Usage: "Skip running maintenance workflows and simply output to catalog", + } + + skipIfK8sServiceMissingFlag := &cli.StringFlag{ + Name: "skip-if-k8s-service-missing", + Value: "", + Usage: "Skip maintenance if the k8s service is missing, generally used during pre-upgrade hook", + } + app := &cli.Command{ Name: "PeerDB Flows CLI", Commands: []*cli.Command{ @@ -85,6 +139,7 @@ func main() { TemporalNamespace: clicmd.String("temporal-namespace"), TemporalMaxConcurrentActivities: int(clicmd.Int("temporal-max-concurrent-activities")), TemporalMaxConcurrentWorkflowTasks: int(clicmd.Int("temporal-max-concurrent-workflow-tasks")), + UseMaintenanceTaskQueue: clicmd.Bool(useMaintenanceTaskQueueFlag.Name), }) if err != nil { return err @@ -100,6 +155,7 @@ func main() { temporalNamespaceFlag, temporalMaxConcurrentActivitiesFlag, temporalMaxConcurrentWorkflowTasksFlag, + useMaintenanceTaskQueueFlag, }, }, { @@ -148,6 +204,37 @@ func main() { }) }, }, + { + Name: "maintenance", + Flags: []cli.Flag{ + temporalHostPortFlag, + temporalNamespaceFlag, + maintenanceModeWorkflowFlag, + maintenanceSkipOnApiVersionMatchFlag, + maintenanceSkipOnNoMirrorsFlag, + flowGrpcAddressFlag, + flowTlsEnabledFlag, + useMaintenanceTaskQueueFlag, + assumedSkippedMaintenanceWorkflowsFlag, + skipIfK8sServiceMissingFlag, + }, + Action: func(ctx context.Context, clicmd *cli.Command) error { + temporalHostPort := clicmd.String("temporal-host-port") + + return cmd.MaintenanceMain(ctx, &cmd.MaintenanceCLIParams{ + TemporalHostPort: temporalHostPort, + TemporalNamespace: clicmd.String(temporalNamespaceFlag.Name), + Mode: clicmd.String(maintenanceModeWorkflowFlag.Name), + SkipOnApiVersionMatch: clicmd.Bool(maintenanceSkipOnApiVersionMatchFlag.Name), + SkipOnNoMirrors: clicmd.Bool(maintenanceSkipOnNoMirrorsFlag.Name), + FlowGrpcAddress: clicmd.String(flowGrpcAddressFlag.Name), + FlowTlsEnabled: clicmd.Bool(flowTlsEnabledFlag.Name), + UseMaintenanceTaskQueue: clicmd.Bool(useMaintenanceTaskQueueFlag.Name), + AssumeSkippedMaintenanceWorkflows: clicmd.Bool(assumedSkippedMaintenanceWorkflowsFlag.Name), + SkipIfK8sServiceMissing: clicmd.String(skipIfK8sServiceMissingFlag.Name), + }) + }, + }, }, } @@ -164,5 +251,6 @@ func main() { if err := app.Run(appCtx, os.Args); err != nil { log.Printf("error running app: %+v", err) + panic(err) } } diff --git a/flow/model/qrecord_copy_from_source.go b/flow/model/qrecord_copy_from_source.go index 308676c5f..d633fda99 100644 --- a/flow/model/qrecord_copy_from_source.go +++ b/flow/model/qrecord_copy_from_source.go @@ -1,6 +1,7 @@ package model import ( + "encoding/json" "errors" "fmt" "strings" @@ -82,6 +83,8 @@ func (src *QRecordCopyFromSource) Values() ([]interface{}, error) { values[i] = str case qvalue.QValueTime: values[i] = pgtype.Time{Microseconds: v.Val.UnixMicro(), Valid: true} + case qvalue.QValueTSTZRange: + values[i] = v.Val case qvalue.QValueTimestamp: values[i] = pgtype.Timestamp{Time: v.Val, Valid: true} case qvalue.QValueTimestampTZ: @@ -170,8 +173,16 @@ func (src *QRecordCopyFromSource) Values() ([]interface{}, error) { } values[i] = a case qvalue.QValueJSON: - values[i] = v.Val + if v.IsArray { + var arrayJ []interface{} + if err := json.Unmarshal([]byte(v.Value().(string)), &arrayJ); err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON array: %v", err) + } + values[i] = arrayJ + } else { + values[i] = v.Value() + } // And so on for the other types... default: return nil, fmt.Errorf("unsupported value type %T", qValue) diff --git a/flow/model/qvalue/avro_converter.go b/flow/model/qvalue/avro_converter.go index 9738f46e8..97d9641b6 100644 --- a/flow/model/qvalue/avro_converter.go +++ b/flow/model/qvalue/avro_converter.go @@ -138,7 +138,9 @@ func GetAvroSchemaFromQValueKind(kind QValueKind, targetDWH protos.DBType, preci }, nil } return "string", nil - case QValueKindHStore, QValueKindJSON, QValueKindStruct: + case QValueKindTSTZRange: + return "string", nil + case QValueKindHStore, QValueKindJSON, QValueKindJSONB, QValueKindStruct: return "string", nil case QValueKindArrayFloat32: return AvroSchemaArray{ @@ -193,6 +195,8 @@ func GetAvroSchemaFromQValueKind(kind QValueKind, targetDWH protos.DBType, preci Type: "array", Items: "string", }, nil + case QValueKindArrayJSON, QValueKindArrayJSONB: + return "string", nil case QValueKindArrayString: return AvroSchemaArray{ Type: "array", @@ -315,7 +319,7 @@ func QValueToAvro(value QValue, field *QField, targetDWH protos.DBType, logger l return t, nil case QValueQChar: return c.processNullableUnion("string", string(v.Val)) - case QValueString, QValueCIDR, QValueINET, QValueMacaddr, QValueInterval: + case QValueString, QValueCIDR, QValueINET, QValueMacaddr, QValueInterval, QValueTSTZRange: if c.TargetDWH == protos.DBType_SNOWFLAKE && v.Value() != nil && (len(v.Value().(string)) > 15*1024*1024) { slog.Warn("Clearing TEXT value > 15MB for Snowflake!") diff --git a/flow/model/qvalue/kind.go b/flow/model/qvalue/kind.go index 79e8f89e4..91ab867a0 100644 --- a/flow/model/qvalue/kind.go +++ b/flow/model/qvalue/kind.go @@ -26,10 +26,12 @@ const ( QValueKindTime QValueKind = "time" QValueKindTimeTZ QValueKind = "timetz" QValueKindInterval QValueKind = "interval" + QValueKindTSTZRange QValueKind = "tstzrange" QValueKindNumeric QValueKind = "numeric" QValueKindBytes QValueKind = "bytes" QValueKindUUID QValueKind = "uuid" QValueKindJSON QValueKind = "json" + QValueKindJSONB QValueKind = "jsonb" QValueKindHStore QValueKind = "hstore" QValueKindGeography QValueKind = "geography" QValueKindGeometry QValueKind = "geometry" @@ -51,6 +53,8 @@ const ( QValueKindArrayTimestamp QValueKind = "array_timestamp" QValueKindArrayTimestampTZ QValueKind = "array_timestamptz" QValueKindArrayBoolean QValueKind = "array_bool" + QValueKindArrayJSON QValueKind = "array_json" + QValueKindArrayJSONB QValueKind = "array_jsonb" ) func (kind QValueKind) IsArray() bool { @@ -68,6 +72,7 @@ var QValueKindToSnowflakeTypeMap = map[QValueKind]string{ QValueKindQChar: "CHAR", QValueKindString: "STRING", QValueKindJSON: "VARIANT", + QValueKindJSONB: "VARIANT", QValueKindTimestamp: "TIMESTAMP_NTZ", QValueKindTimestampTZ: "TIMESTAMP_TZ", QValueKindInterval: "VARIANT", @@ -94,6 +99,8 @@ var QValueKindToSnowflakeTypeMap = map[QValueKind]string{ QValueKindArrayTimestamp: "VARIANT", QValueKindArrayTimestampTZ: "VARIANT", QValueKindArrayBoolean: "VARIANT", + QValueKindArrayJSON: "VARIANT", + QValueKindArrayJSONB: "VARIANT", } var QValueKindToClickHouseTypeMap = map[QValueKind]string{ @@ -109,6 +116,7 @@ var QValueKindToClickHouseTypeMap = map[QValueKind]string{ QValueKindJSON: "String", QValueKindTimestamp: "DateTime64(6)", QValueKindTimestampTZ: "DateTime64(6)", + QValueKindTSTZRange: "String", QValueKindTime: "DateTime64(6)", QValueKindTimeTZ: "DateTime64(6)", QValueKindDate: "Date32", @@ -118,7 +126,6 @@ var QValueKindToClickHouseTypeMap = map[QValueKind]string{ QValueKindInvalid: "String", QValueKindHStore: "String", - // array types will be mapped to VARIANT QValueKindArrayFloat32: "Array(Float32)", QValueKindArrayFloat64: "Array(Float64)", QValueKindArrayInt32: "Array(Int32)", @@ -129,6 +136,8 @@ var QValueKindToClickHouseTypeMap = map[QValueKind]string{ QValueKindArrayDate: "Array(Date)", QValueKindArrayTimestamp: "Array(DateTime64(6))", QValueKindArrayTimestampTZ: "Array(DateTime64(6))", + QValueKindArrayJSON: "String", + QValueKindArrayJSONB: "String", } func (kind QValueKind) ToDWHColumnType(dwhType protos.DBType) (string, error) { diff --git a/flow/model/qvalue/qvalue.go b/flow/model/qvalue/qvalue.go index 9b1c13f75..1277881a3 100644 --- a/flow/model/qvalue/qvalue.go +++ b/flow/model/qvalue/qvalue.go @@ -6,7 +6,7 @@ import ( "github.com/google/uuid" "github.com/shopspring/decimal" - "github.com/yuin/gopher-lua" + lua "github.com/yuin/gopher-lua" "github.com/PeerDB-io/glua64" "github.com/PeerDB-io/peer-flow/shared" @@ -294,6 +294,22 @@ func (v QValueInterval) LValue(ls *lua.LState) lua.LValue { return lua.LString(v.Val) } +type QValueTSTZRange struct { + Val string +} + +func (QValueTSTZRange) Kind() QValueKind { + return QValueKindInterval +} + +func (v QValueTSTZRange) Value() any { + return v.Val +} + +func (v QValueTSTZRange) LValue(ls *lua.LState) lua.LValue { + return lua.LString(v.Val) +} + type QValueNumeric struct { Val decimal.Decimal } @@ -343,7 +359,8 @@ func (v QValueUUID) LValue(ls *lua.LState) lua.LValue { } type QValueJSON struct { - Val string + Val string + IsArray bool } func (QValueJSON) Kind() QValueKind { diff --git a/flow/otel_metrics/otel_manager.go b/flow/otel_metrics/otel_manager.go index becf13a16..c59adecd4 100644 --- a/flow/otel_metrics/otel_manager.go +++ b/flow/otel_metrics/otel_manager.go @@ -20,8 +20,8 @@ import ( type OtelManager struct { MetricsProvider *sdkmetric.MeterProvider Meter metric.Meter - Float64GaugesCache map[string]*Float64SyncGauge - Int64GaugesCache map[string]*Int64SyncGauge + Float64GaugesCache map[string]metric.Float64Gauge + Int64GaugesCache map[string]metric.Int64Gauge } // newOtelResource returns a resource describing this application. diff --git a/flow/otel_metrics/peerdb_gauges/gauges.go b/flow/otel_metrics/peerdb_gauges/gauges.go index 767aac094..a3b7d5c3e 100644 --- a/flow/otel_metrics/peerdb_gauges/gauges.go +++ b/flow/otel_metrics/peerdb_gauges/gauges.go @@ -1,6 +1,8 @@ package peerdb_gauges import ( + "go.opentelemetry.io/otel/metric" + "github.com/PeerDB-io/peer-flow/otel_metrics" ) @@ -12,10 +14,10 @@ const ( ) type SlotMetricGauges struct { - SlotLagGauge *otel_metrics.Float64SyncGauge - OpenConnectionsGauge *otel_metrics.Int64SyncGauge - OpenReplicationConnectionsGauge *otel_metrics.Int64SyncGauge - IntervalSinceLastNormalizeGauge *otel_metrics.Float64SyncGauge + SlotLagGauge metric.Float64Gauge + OpenConnectionsGauge metric.Int64Gauge + OpenReplicationConnectionsGauge metric.Int64Gauge + IntervalSinceLastNormalizeGauge metric.Float64Gauge } func BuildGaugeName(baseGaugeName string) string { diff --git a/flow/otel_metrics/sync_gauges.go b/flow/otel_metrics/sync_gauges.go index d2ef4924c..e9da02c87 100644 --- a/flow/otel_metrics/sync_gauges.go +++ b/flow/otel_metrics/sync_gauges.go @@ -1,106 +1,15 @@ package otel_metrics import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" ) -type ObservationMapValue[V comparable] struct { - Value V -} - -// SyncGauge is a generic synchronous gauge that can be used to observe any type of value -// Inspired from https://github.com/open-telemetry/opentelemetry-go/issues/3984#issuecomment-1743231837 -type SyncGauge[V comparable, O metric.Observable] struct { - observableGauge O - observations sync.Map - name string -} - -func (a *SyncGauge[V, O]) Callback(ctx context.Context, observeFunc func(value V, options ...metric.ObserveOption)) error { - a.observations.Range(func(key, value interface{}) bool { - attrs := key.(attribute.Set) - val := value.(*ObservationMapValue[V]) - observeFunc(val.Value, metric.WithAttributeSet(attrs)) - // If the pointer is still same we can safely delete, else it means that the value was overwritten in parallel - a.observations.CompareAndDelete(attrs, val) - return true - }) - return nil -} - -func (a *SyncGauge[V, O]) Set(input V, attrs attribute.Set) { - val := ObservationMapValue[V]{Value: input} - a.observations.Store(attrs, &val) -} - -type Int64SyncGauge struct { - syncGauge *SyncGauge[int64, metric.Int64Observable] -} - -func (a *Int64SyncGauge) Set(input int64, attrs attribute.Set) { - if a == nil { - return - } - a.syncGauge.Set(input, attrs) -} - -func NewInt64SyncGauge(meter metric.Meter, gaugeName string, opts ...metric.Int64ObservableGaugeOption) (*Int64SyncGauge, error) { - syncGauge := &SyncGauge[int64, metric.Int64Observable]{ - name: gaugeName, - } - observableGauge, err := meter.Int64ObservableGauge(gaugeName, - append(opts, metric.WithInt64Callback(func(ctx context.Context, observer metric.Int64Observer) error { - return syncGauge.Callback(ctx, func(value int64, options ...metric.ObserveOption) { - observer.Observe(value, options...) - }) - }))...) - if err != nil { - return nil, fmt.Errorf("failed to create Int64SyncGauge: %w", err) - } - syncGauge.observableGauge = observableGauge - return &Int64SyncGauge{syncGauge: syncGauge}, nil -} - -type Float64SyncGauge struct { - syncGauge *SyncGauge[float64, metric.Float64Observable] -} - -func (a *Float64SyncGauge) Set(input float64, attrs attribute.Set) { - if a == nil { - return - } - a.syncGauge.Set(input, attrs) -} - -func NewFloat64SyncGauge(meter metric.Meter, gaugeName string, opts ...metric.Float64ObservableGaugeOption) (*Float64SyncGauge, error) { - syncGauge := &SyncGauge[float64, metric.Float64Observable]{ - name: gaugeName, - } - observableGauge, err := meter.Float64ObservableGauge(gaugeName, - append(opts, metric.WithFloat64Callback(func(ctx context.Context, observer metric.Float64Observer) error { - return syncGauge.Callback(ctx, func(value float64, options ...metric.ObserveOption) { - observer.Observe(value, options...) - }) - }))...) - if err != nil { - return nil, fmt.Errorf("failed to create Float64SyncGauge: %w", err) - } - syncGauge.observableGauge = observableGauge - return &Float64SyncGauge{syncGauge: syncGauge}, nil -} - -func GetOrInitInt64SyncGauge(meter metric.Meter, cache map[string]*Int64SyncGauge, name string, - opts ...metric.Int64ObservableGaugeOption, -) (*Int64SyncGauge, error) { +func GetOrInitInt64SyncGauge(meter metric.Meter, cache map[string]metric.Int64Gauge, name string, opts ...metric.Int64GaugeOption, +) (metric.Int64Gauge, error) { gauge, ok := cache[name] if !ok { var err error - gauge, err = NewInt64SyncGauge(meter, name, opts...) + gauge, err = meter.Int64Gauge(name, opts...) if err != nil { return nil, err } @@ -109,13 +18,12 @@ func GetOrInitInt64SyncGauge(meter metric.Meter, cache map[string]*Int64SyncGaug return gauge, nil } -func GetOrInitFloat64SyncGauge(meter metric.Meter, cache map[string]*Float64SyncGauge, - name string, opts ...metric.Float64ObservableGaugeOption, -) (*Float64SyncGauge, error) { +func GetOrInitFloat64SyncGauge(meter metric.Meter, cache map[string]metric.Float64Gauge, name string, opts ...metric.Float64GaugeOption, +) (metric.Float64Gauge, error) { gauge, ok := cache[name] if !ok { var err error - gauge, err = NewFloat64SyncGauge(meter, name, opts...) + gauge, err = meter.Float64Gauge(name, opts...) if err != nil { return nil, err } diff --git a/flow/peerdbenv/config.go b/flow/peerdbenv/config.go index e033b8719..9aa9d2c5e 100644 --- a/flow/peerdbenv/config.go +++ b/flow/peerdbenv/config.go @@ -166,3 +166,9 @@ func PeerDBRAPIRequestLoggingEnabled() bool { } return requestLoggingEnabled } + +// PEERDB_MAINTENANCE_MODE_WAIT_ALERT_SECONDS tells how long to wait before alerting that peerdb has been stuck in maintenance mode +// for too long +func PeerDBMaintenanceModeWaitAlertSeconds() int { + return getEnvInt("PEERDB_MAINTENANCE_MODE_WAIT_ALERT_SECONDS", 600) +} diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index 1e2f22590..b0cbe05f5 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -8,8 +8,10 @@ import ( "strconv" "time" + "github.com/aws/smithy-go/ptr" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgtype" + "github.com/jackc/pgx/v5/pgxpool" "golang.org/x/exp/constraints" "github.com/PeerDB-io/peer-flow/generated/protos" @@ -17,14 +19,6 @@ import ( ) var DynamicSettings = [...]*protos.DynamicSetting{ - { - Name: "PEERDB_MAX_SYNCS_PER_CDC_FLOW", - Description: "Experimental setting: changes number of syncs per workflow, affects frequency of replication slot disconnects", - DefaultValue: "32", - ValueType: protos.DynconfValueType_UINT, - ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, - TargetForSetting: protos.DynconfTarget_ALL, - }, { Name: "PEERDB_CDC_CHANNEL_BUFFER_SIZE", Description: "Advanced setting: changes buffer size of channel PeerDB uses while streaming rows read to destination in CDC", @@ -68,17 +62,14 @@ var DynamicSettings = [...]*protos.DynamicSetting{ { Name: "PEERDB_ENABLE_WAL_HEARTBEAT", Description: "Enables WAL heartbeat to prevent replication slot lag from increasing during times of no activity", - DefaultValue: "false", + DefaultValue: "true", ValueType: protos.DynconfValueType_BOOL, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_ALL, }, { - Name: "PEERDB_WAL_HEARTBEAT_QUERY", - DefaultValue: `BEGIN; -DROP AGGREGATE IF EXISTS PEERDB_EPHEMERAL_HEARTBEAT(float4); -CREATE AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4) (SFUNC = float4pl, STYPE = float4); -DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, + Name: "PEERDB_WAL_HEARTBEAT_QUERY", + DefaultValue: "SELECT pg_logical_emit_message(false,'peerdb_heartbeat','')", ValueType: protos.DynconfValueType_STRING, Description: "SQL to run during each WAL heartbeat", ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, @@ -92,6 +83,13 @@ DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_AFTER_RESUME, TargetForSetting: protos.DynconfTarget_ALL, }, + { + Name: "PEERDB_FULL_REFRESH_OVERWRITE_MODE", + Description: "Enables full refresh mode for query replication mirrors of overwrite type", + DefaultValue: "false", + ValueType: protos.DynconfValueType_BOOL, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_NEW_MIRROR, + }, { Name: "PEERDB_NULLABLE", Description: "Propagate nullability in schema", @@ -116,6 +114,15 @@ DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_CLICKHOUSE, }, + { + Name: "PEERDB_S3_PART_SIZE", + Description: "S3 upload part size in bytes, may need to increase for large batches. " + + "https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html", + DefaultValue: "0", + ValueType: protos.DynconfValueType_INT, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_ALL, + }, { Name: "PEERDB_QUEUE_FORCE_TOPIC_CREATION", Description: "Force auto topic creation in mirrors, applies to Kafka and PubSub mirrors", @@ -164,6 +171,22 @@ DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_CLICKHOUSE, }, + { + Name: "PEERDB_CLICKHOUSE_MAX_INSERT_THREADS", + Description: "Configures max_insert_threads setting on clickhouse for inserting into destination table. Setting left unset when 0", + DefaultValue: "0", + ValueType: protos.DynconfValueType_UINT, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_CLICKHOUSE, + }, + { + Name: "PEERDB_CLICKHOUSE_PARALLEL_NORMALIZE", + Description: "Divide tables in batch into N insert selects. Helps distribute load to multiple nodes", + DefaultValue: "0", + ValueType: protos.DynconfValueType_INT, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_CLICKHOUSE, + }, { Name: "PEERDB_INTERVAL_SINCE_LAST_NORMALIZE_THRESHOLD_MINUTES", Description: "Duration in minutes since last normalize to start alerting, 0 disables all alerting entirely", @@ -180,6 +203,14 @@ DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_ALL, }, + { + Name: "PEERDB_MAINTENANCE_MODE_ENABLED", + Description: "Whether PeerDB is in maintenance mode, which disables any modifications to mirrors", + DefaultValue: "false", + ValueType: protos.DynconfValueType_BOOL, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_ALL, + }, } var DynamicIndex = func() map[string]int { @@ -232,8 +263,8 @@ func dynamicConfSigned[T constraints.Signed](ctx context.Context, env map[string return strconv.ParseInt(value, 10, 64) }) if err != nil { - shared.LoggerFromCtx(ctx).Error("Failed to parse as int64", slog.Any("error", err)) - return 0, fmt.Errorf("failed to parse as int64: %w", err) + shared.LoggerFromCtx(ctx).Error("Failed to parse as int64", slog.String("key", key), slog.Any("error", err)) + return 0, fmt.Errorf("failed to parse %s as int64: %w", key, err) } return T(value), nil @@ -244,8 +275,8 @@ func dynamicConfUnsigned[T constraints.Unsigned](ctx context.Context, env map[st return strconv.ParseUint(value, 10, 64) }) if err != nil { - shared.LoggerFromCtx(ctx).Error("Failed to parse as uint64", slog.Any("error", err)) - return 0, fmt.Errorf("failed to parse as uint64: %w", err) + shared.LoggerFromCtx(ctx).Error("Failed to parse as uint64", slog.String("key", key), slog.Any("error", err)) + return 0, fmt.Errorf("failed to parse %s as uint64: %w", key, err) } return T(value), nil @@ -254,13 +285,27 @@ func dynamicConfUnsigned[T constraints.Unsigned](ctx context.Context, env map[st func dynamicConfBool(ctx context.Context, env map[string]string, key string) (bool, error) { value, err := dynLookupConvert(ctx, env, key, strconv.ParseBool) if err != nil { - shared.LoggerFromCtx(ctx).Error("Failed to parse bool", slog.Any("error", err)) - return false, fmt.Errorf("failed to parse bool: %w", err) + shared.LoggerFromCtx(ctx).Error("Failed to parse bool", slog.String("key", key), slog.Any("error", err)) + return false, fmt.Errorf("failed to parse %s as bool: %w", key, err) } return value, nil } +func UpdateDynamicSetting(ctx context.Context, pool *pgxpool.Pool, name string, value *string) error { + if pool == nil { + var err error + pool, err = GetCatalogConnectionPoolFromEnv(ctx) + if err != nil { + shared.LoggerFromCtx(ctx).Error("Failed to get catalog connection pool for dynamic setting update", slog.Any("error", err)) + return fmt.Errorf("failed to get catalog connection pool: %w", err) + } + } + _, err := pool.Exec(ctx, `insert into dynamic_settings (config_name, config_value) values ($1, $2) + on conflict (config_name) do update set config_value = $2`, name, value) + return err +} + // PEERDB_SLOT_LAG_MB_ALERT_THRESHOLD, 0 disables slot lag alerting entirely func PeerDBSlotLagMBAlertThreshold(ctx context.Context, env map[string]string) (uint32, error) { return dynamicConfUnsigned[uint32](ctx, env, "PEERDB_SLOT_LAG_MB_ALERT_THRESHOLD") @@ -324,6 +369,10 @@ func PeerDBEnableParallelSyncNormalize(ctx context.Context, env map[string]strin return dynamicConfBool(ctx, env, "PEERDB_ENABLE_PARALLEL_SYNC_NORMALIZE") } +func PeerDBFullRefreshOverwriteMode(ctx context.Context, env map[string]string) (bool, error) { + return dynamicConfBool(ctx, env, "PEERDB_FULL_REFRESH_OVERWRITE_MODE") +} + func PeerDBNullable(ctx context.Context, env map[string]string) (bool, error) { return dynamicConfBool(ctx, env, "PEERDB_NULLABLE") } @@ -332,6 +381,14 @@ func PeerDBEnableClickHousePrimaryUpdate(ctx context.Context, env map[string]str return dynamicConfBool(ctx, env, "PEERDB_CLICKHOUSE_ENABLE_PRIMARY_UPDATE") } +func PeerDBClickHouseMaxInsertThreads(ctx context.Context, env map[string]string) (int64, error) { + return dynamicConfSigned[int64](ctx, env, "PEERDB_CLICKHOUSE_MAX_INSERT_THREADS") +} + +func PeerDBClickHouseParallelNormalize(ctx context.Context, env map[string]string) (int, error) { + return dynamicConfSigned[int](ctx, env, "PEERDB_CLICKHOUSE_PARALLEL_NORMALIZE") +} + func PeerDBSnowflakeMergeParallelism(ctx context.Context, env map[string]string) (int64, error) { return dynamicConfSigned[int64](ctx, env, "PEERDB_SNOWFLAKE_MERGE_PARALLELISM") } @@ -340,6 +397,10 @@ func PeerDBClickHouseAWSS3BucketName(ctx context.Context, env map[string]string) return dynLookup(ctx, env, "PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME") } +func PeerDBS3PartSize(ctx context.Context, env map[string]string) (int64, error) { + return dynamicConfSigned[int64](ctx, env, "PEERDB_S3_PART_SIZE") +} + // Kafka has topic auto create as an option, auto.create.topics.enable // But non-dedicated cluster maybe can't set config, may want peerdb to create topic. Similar for PubSub func PeerDBQueueForceTopicCreation(ctx context.Context, env map[string]string) (bool, error) { @@ -354,3 +415,11 @@ func PeerDBIntervalSinceLastNormalizeThresholdMinutes(ctx context.Context, env m func PeerDBApplicationNamePerMirrorName(ctx context.Context, env map[string]string) (bool, error) { return dynamicConfBool(ctx, env, "PEERDB_APPLICATION_NAME_PER_MIRROR_NAME") } + +func PeerDBMaintenanceModeEnabled(ctx context.Context, env map[string]string) (bool, error) { + return dynamicConfBool(ctx, env, "PEERDB_MAINTENANCE_MODE_ENABLED") +} + +func UpdatePeerDBMaintenanceModeEnabled(ctx context.Context, pool *pgxpool.Pool, enabled bool) error { + return UpdateDynamicSetting(ctx, pool, "PEERDB_MAINTENANCE_MODE_ENABLED", ptr.String(strconv.FormatBool(enabled))) +} diff --git a/flow/shared/constants.go b/flow/shared/constants.go index 2dc5a8a64..955ecfc4b 100644 --- a/flow/shared/constants.go +++ b/flow/shared/constants.go @@ -11,8 +11,9 @@ type ( const ( // Task Queues - PeerFlowTaskQueue TaskQueueID = "peer-flow-task-queue" - SnapshotFlowTaskQueue TaskQueueID = "snapshot-flow-task-queue" + PeerFlowTaskQueue TaskQueueID = "peer-flow-task-queue" + SnapshotFlowTaskQueue TaskQueueID = "snapshot-flow-task-queue" + MaintenanceFlowTaskQueue TaskQueueID = "maintenance-flow-task-queue" // Queries CDCFlowStateQuery = "q-cdc-flow-state" diff --git a/flow/shared/postgres.go b/flow/shared/postgres.go index be3cf7d07..121fb73bf 100644 --- a/flow/shared/postgres.go +++ b/flow/shared/postgres.go @@ -58,17 +58,17 @@ func GetCustomDataTypes(ctx context.Context, conn *pgx.Conn) (map[uint32]string, AND n.nspname NOT IN ('pg_catalog', 'information_schema'); `) if err != nil { - return nil, fmt.Errorf("failed to get custom types: %w", err) + return nil, fmt.Errorf("failed to get customTypeMapping: %w", err) } customTypeMap := map[uint32]string{} - for rows.Next() { - var typeID pgtype.Uint32 - var typeName pgtype.Text - if err := rows.Scan(&typeID, &typeName); err != nil { - return nil, fmt.Errorf("failed to scan row: %w", err) - } + var typeID pgtype.Uint32 + var typeName pgtype.Text + if _, err := pgx.ForEachRow(rows, []any{&typeID, &typeName}, func() error { customTypeMap[typeID.Uint32] = typeName.String + return nil + }); err != nil { + return nil, fmt.Errorf("failed to scan into customTypeMapping: %w", err) } return customTypeMap, nil } diff --git a/flow/shared/telemetry/event_types.go b/flow/shared/telemetry/event_types.go index 0d87ba354..a68fab869 100644 --- a/flow/shared/telemetry/event_types.go +++ b/flow/shared/telemetry/event_types.go @@ -3,7 +3,11 @@ package telemetry type EventType string const ( - CreatePeer EventType = "CreatePeer" - CreateMirror EventType = "CreateMirror" - Other EventType = "Other" + CreatePeer EventType = "CreatePeer" + CreateMirror EventType = "CreateMirror" + StartMaintenance EventType = "StartMaintenance" + EndMaintenance EventType = "EndMaintenance" + MaintenanceWait EventType = "MaintenanceWait" + + Other EventType = "Other" ) diff --git a/flow/shared/worklow.go b/flow/shared/worklow.go new file mode 100644 index 000000000..c9cafc37e --- /dev/null +++ b/flow/shared/worklow.go @@ -0,0 +1,27 @@ +package shared + +import ( + "context" + "fmt" + "log/slog" + + "go.temporal.io/sdk/client" + + "github.com/PeerDB-io/peer-flow/generated/protos" +) + +func GetWorkflowStatus(ctx context.Context, temporalClient client.Client, workflowID string) (protos.FlowStatus, error) { + res, err := temporalClient.QueryWorkflow(ctx, workflowID, "", FlowStatusQuery) + if err != nil { + slog.Error("failed to query status in workflow with ID "+workflowID, slog.Any("error", err)) + return protos.FlowStatus_STATUS_UNKNOWN, + fmt.Errorf("failed to query status in workflow with ID %s: %w", workflowID, err) + } + var state protos.FlowStatus + if err := res.Get(&state); err != nil { + slog.Error("failed to get status in workflow with ID "+workflowID, slog.Any("error", err)) + return protos.FlowStatus_STATUS_UNKNOWN, + fmt.Errorf("failed to get status in workflow with ID %s: %w", workflowID, err) + } + return state, nil +} diff --git a/flow/workflows/activities.go b/flow/workflows/activities.go index 0b23d10dd..5fe699419 100644 --- a/flow/workflows/activities.go +++ b/flow/workflows/activities.go @@ -3,6 +3,7 @@ package peerflow import "github.com/PeerDB-io/peer-flow/activities" var ( - flowable *activities.FlowableActivity - snapshot *activities.SnapshotActivity + flowable *activities.FlowableActivity + snapshot *activities.SnapshotActivity + maintenance *activities.MaintenanceActivity ) diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index 72e37b01f..0c97af9b7 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -480,13 +480,15 @@ func CDCFlowWorkflow( } } - state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING logger.Info("executed setup flow and snapshot flow") - // if initial_copy_only is opted for, we end the flow here. if cfg.InitialSnapshotOnly { + logger.Info("initial snapshot only, ending flow") + state.CurrentFlowStatus = protos.FlowStatus_STATUS_COMPLETED return state, nil } + + state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING } syncFlowID := GetChildWorkflowID("sync-flow", cfg.FlowJobName, originalRunID) diff --git a/flow/workflows/drop_flow.go b/flow/workflows/drop_flow.go index 51bf0091a..93086157d 100644 --- a/flow/workflows/drop_flow.go +++ b/flow/workflows/drop_flow.go @@ -92,6 +92,15 @@ func DropFlowWorkflow(ctx workflow.Context, input *protos.DropFlowInput) error { } } + if input.FlowConnectionConfigs != nil { + err := executeCDCDropActivities(ctx, input) + if err != nil { + workflow.GetLogger(ctx).Error("failed to drop CDC flow", slog.Any("error", err)) + return err + } + workflow.GetLogger(ctx).Info("CDC flow dropped successfully") + } + removeFlowEntriesCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 1 * time.Minute, }) @@ -103,14 +112,5 @@ func DropFlowWorkflow(ctx workflow.Context, input *protos.DropFlowInput) error { return err } - if input.FlowConnectionConfigs != nil { - err := executeCDCDropActivities(ctx, input) - if err != nil { - workflow.GetLogger(ctx).Error("failed to drop CDC flow", slog.Any("error", err)) - return err - } - workflow.GetLogger(ctx).Info("CDC flow dropped successfully") - } - return nil } diff --git a/flow/workflows/local_activities.go b/flow/workflows/local_activities.go index d163352ca..7a3e80f24 100644 --- a/flow/workflows/local_activities.go +++ b/flow/workflows/local_activities.go @@ -29,6 +29,20 @@ func getParallelSyncNormalize(wCtx workflow.Context, logger log.Logger, env map[ return parallel } +func getQRepOverwriteFullRefreshMode(wCtx workflow.Context, logger log.Logger, env map[string]string) bool { + checkCtx := workflow.WithLocalActivityOptions(wCtx, workflow.LocalActivityOptions{ + StartToCloseTimeout: time.Minute, + }) + + getFullRefreshFuture := workflow.ExecuteLocalActivity(checkCtx, peerdbenv.PeerDBFullRefreshOverwriteMode, env) + var fullRefreshEnabled bool + if err := getFullRefreshFuture.Get(checkCtx, &fullRefreshEnabled); err != nil { + logger.Warn("Failed to check if full refresh mode is enabled", slog.Any("error", err)) + return false + } + return fullRefreshEnabled +} + func localPeerType(ctx context.Context, name string) (protos.DBType, error) { pool, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) if err != nil { diff --git a/flow/workflows/maintenance_flow.go b/flow/workflows/maintenance_flow.go new file mode 100644 index 000000000..c48750a80 --- /dev/null +++ b/flow/workflows/maintenance_flow.go @@ -0,0 +1,305 @@ +package peerflow + +import ( + "context" + "log/slog" + "time" + + tEnums "go.temporal.io/api/enums/v1" + "go.temporal.io/sdk/client" + "go.temporal.io/sdk/log" + "go.temporal.io/sdk/workflow" + + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/peerdbenv" + "github.com/PeerDB-io/peer-flow/shared" +) + +func getMaintenanceWorkflowOptions(workflowIDPrefix string, taskQueueId shared.TaskQueueID) client.StartWorkflowOptions { + maintenanceWorkflowOptions := client.StartWorkflowOptions{ + WorkflowIDReusePolicy: tEnums.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, + WorkflowIDConflictPolicy: tEnums.WORKFLOW_ID_CONFLICT_POLICY_USE_EXISTING, + TaskQueue: peerdbenv.PeerFlowTaskQueueName(taskQueueId), + ID: workflowIDPrefix, + } + if deploymentUid := peerdbenv.PeerDBDeploymentUID(); deploymentUid != "" { + maintenanceWorkflowOptions.ID += "-" + deploymentUid + } + return maintenanceWorkflowOptions +} + +// RunStartMaintenanceWorkflow is a helper function to start the StartMaintenanceWorkflow with sane defaults +func RunStartMaintenanceWorkflow( + ctx context.Context, + temporalClient client.Client, + input *protos.StartMaintenanceFlowInput, + taskQueueId shared.TaskQueueID, +) (client.WorkflowRun, error) { + workflowOptions := getMaintenanceWorkflowOptions("start-maintenance", taskQueueId) + workflowRun, err := temporalClient.ExecuteWorkflow(ctx, workflowOptions, StartMaintenanceWorkflow, input) + if err != nil { + return nil, err + } + return workflowRun, nil +} + +// RunEndMaintenanceWorkflow is a helper function to start the EndMaintenanceWorkflow with sane defaults +func RunEndMaintenanceWorkflow( + ctx context.Context, + temporalClient client.Client, + input *protos.EndMaintenanceFlowInput, + taskQueueId shared.TaskQueueID, +) (client.WorkflowRun, error) { + workflowOptions := getMaintenanceWorkflowOptions("end-maintenance", taskQueueId) + workflowRun, err := temporalClient.ExecuteWorkflow(ctx, workflowOptions, EndMaintenanceWorkflow, &protos.EndMaintenanceFlowInput{}) + if err != nil { + return nil, err + } + return workflowRun, nil +} + +func StartMaintenanceWorkflow(ctx workflow.Context, input *protos.StartMaintenanceFlowInput) (*protos.StartMaintenanceFlowOutput, error) { + logger := workflow.GetLogger(ctx) + logger.Info("Starting StartMaintenance workflow", "input", input) + defer runBackgroundAlerter(ctx)() + + maintenanceFlowOutput, err := startMaintenance(ctx, logger) + if err != nil { + slog.Error("Error in StartMaintenance workflow", "error", err) + return nil, err + } + return maintenanceFlowOutput, nil +} + +func startMaintenance(ctx workflow.Context, logger log.Logger) (*protos.StartMaintenanceFlowOutput, error) { + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + }) + + snapshotWaitCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + HeartbeatTimeout: 1 * time.Minute, + }) + waitSnapshotsFuture := workflow.ExecuteActivity(snapshotWaitCtx, + maintenance.WaitForRunningSnapshots, + ) + err := waitSnapshotsFuture.Get(snapshotWaitCtx, nil) + if err != nil { + return nil, err + } + + enableCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 5 * time.Minute, + }) + enableMaintenanceFuture := workflow.ExecuteActivity(enableCtx, maintenance.EnableMaintenanceMode) + + if err := enableMaintenanceFuture.Get(enableCtx, nil); err != nil { + return nil, err + } + + logger.Info("Waiting for all snapshot mirrors to finish snapshotting") + waitSnapshotsPostEnableFuture := workflow.ExecuteActivity(snapshotWaitCtx, + maintenance.WaitForRunningSnapshots, + ) + + if err := waitSnapshotsPostEnableFuture.Get(snapshotWaitCtx, nil); err != nil { + return nil, err + } + + mirrorsList, err := getAllMirrors(ctx) + if err != nil { + return nil, err + } + + runningMirrors, err := pauseAndGetRunningMirrors(ctx, mirrorsList, logger) + if err != nil { + return nil, err + } + + backupCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 2 * time.Minute, + }) + future := workflow.ExecuteActivity(backupCtx, maintenance.BackupAllPreviouslyRunningFlows, runningMirrors) + + if err := future.Get(backupCtx, nil); err != nil { + return nil, err + } + version, err := GetPeerDBVersion(ctx) + if err != nil { + return nil, err + } + logger.Info("StartMaintenance workflow completed", "version", version) + return &protos.StartMaintenanceFlowOutput{ + Version: version, + }, nil +} + +func pauseAndGetRunningMirrors( + ctx workflow.Context, + mirrorsList *protos.MaintenanceMirrors, + logger log.Logger, +) (*protos.MaintenanceMirrors, error) { + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + HeartbeatTimeout: 1 * time.Minute, + }) + selector := workflow.NewSelector(ctx) + runningMirrors := make([]bool, len(mirrorsList.Mirrors)) + for i, mirror := range mirrorsList.Mirrors { + f := workflow.ExecuteActivity( + ctx, + maintenance.PauseMirrorIfRunning, + mirror, + ) + + selector.AddFuture(f, func(f workflow.Future) { + var wasRunning bool + err := f.Get(ctx, &wasRunning) + if err != nil { + logger.Error("Error checking and pausing mirror", "mirror", mirror, "error", err) + } else { + logger.Info("Finished check and pause for mirror", "mirror", mirror, "wasRunning", wasRunning) + runningMirrors[i] = wasRunning + } + }) + } + onlyRunningMirrors := make([]*protos.MaintenanceMirror, 0, len(mirrorsList.Mirrors)) + for range mirrorsList.Mirrors { + selector.Select(ctx) + if err := ctx.Err(); err != nil { + return nil, err + } + } + for i, mirror := range mirrorsList.Mirrors { + if runningMirrors[i] { + onlyRunningMirrors = append(onlyRunningMirrors, mirror) + } + } + return &protos.MaintenanceMirrors{ + Mirrors: onlyRunningMirrors, + }, nil +} + +func getAllMirrors(ctx workflow.Context) (*protos.MaintenanceMirrors, error) { + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 2 * time.Minute, + }) + getMirrorsFuture := workflow.ExecuteActivity(ctx, maintenance.GetAllMirrors) + var mirrorsList protos.MaintenanceMirrors + err := getMirrorsFuture.Get(ctx, &mirrorsList) + return &mirrorsList, err +} + +func EndMaintenanceWorkflow(ctx workflow.Context, input *protos.EndMaintenanceFlowInput) (*protos.EndMaintenanceFlowOutput, error) { + logger := workflow.GetLogger(ctx) + logger.Info("Starting EndMaintenance workflow", "input", input) + defer runBackgroundAlerter(ctx)() + + flowOutput, err := endMaintenance(ctx, logger) + if err != nil { + slog.Error("Error in EndMaintenance workflow", "error", err) + return nil, err + } + return flowOutput, nil +} + +func endMaintenance(ctx workflow.Context, logger log.Logger) (*protos.EndMaintenanceFlowOutput, error) { + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + HeartbeatTimeout: 1 * time.Minute, + }) + + mirrorsList, err := resumeBackedUpMirrors(ctx, logger) + if err != nil { + return nil, err + } + + clearBackupsFuture := workflow.ExecuteActivity(ctx, maintenance.CleanBackedUpFlows) + if err := clearBackupsFuture.Get(ctx, nil); err != nil { + return nil, err + } + + logger.Info("Resumed backed up mirrors", "mirrors", mirrorsList) + + disableCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 5 * time.Minute, + }) + + future := workflow.ExecuteActivity(disableCtx, maintenance.DisableMaintenanceMode) + if err := future.Get(disableCtx, nil); err != nil { + return nil, err + } + logger.Info("Disabled maintenance mode") + version, err := GetPeerDBVersion(ctx) + if err != nil { + return nil, err + } + + logger.Info("EndMaintenance workflow completed", "version", version) + return &protos.EndMaintenanceFlowOutput{ + Version: version, + }, nil +} + +func resumeBackedUpMirrors(ctx workflow.Context, logger log.Logger) (*protos.MaintenanceMirrors, error) { + future := workflow.ExecuteActivity(ctx, maintenance.GetBackedUpFlows) + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 5 * time.Minute, + }) + var mirrorsList *protos.MaintenanceMirrors + err := future.Get(ctx, &mirrorsList) + if err != nil { + return nil, err + } + + selector := workflow.NewSelector(ctx) + for _, mirror := range mirrorsList.Mirrors { + activityInput := mirror + f := workflow.ExecuteActivity( + ctx, + maintenance.ResumeMirror, + activityInput, + ) + + selector.AddFuture(f, func(f workflow.Future) { + err := f.Get(ctx, nil) + if err != nil { + logger.Error("Error resuming mirror", "mirror", mirror, "error", err) + } else { + logger.Info("Finished resuming mirror", "mirror", mirror) + } + }) + } + + for range mirrorsList.Mirrors { + selector.Select(ctx) + if err := ctx.Err(); err != nil { + return nil, err + } + } + return mirrorsList, nil +} + +// runBackgroundAlerter Alerts every few minutes regarding currently running maintenance workflows +func runBackgroundAlerter(ctx workflow.Context) workflow.CancelFunc { + activityCtx, cancelActivity := workflow.WithCancel(ctx) + alerterCtx := workflow.WithActivityOptions(activityCtx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + HeartbeatTimeout: 1 * time.Minute, + }) + workflow.ExecuteActivity(alerterCtx, maintenance.BackgroundAlerter) + return cancelActivity +} + +func GetPeerDBVersion(wCtx workflow.Context) (string, error) { + activityCtx := workflow.WithLocalActivityOptions(wCtx, workflow.LocalActivityOptions{ + StartToCloseTimeout: time.Minute, + }) + getVersionActivity := func(ctx context.Context) (string, error) { + return peerdbenv.PeerDBVersionShaShort(), nil + } + var version string + future := workflow.ExecuteLocalActivity(activityCtx, getVersionActivity) + err := future.Get(activityCtx, &version) + return version, err +} diff --git a/flow/workflows/qrep_flow.go b/flow/workflows/qrep_flow.go index c7348eefa..f862b4f3d 100644 --- a/flow/workflows/qrep_flow.go +++ b/flow/workflows/qrep_flow.go @@ -32,13 +32,15 @@ type QRepPartitionFlowExecution struct { runUUID string } +var InitialLastPartition = &protos.QRepPartition{ + PartitionId: "not-applicable-partition", + Range: nil, +} + // returns a new empty QRepFlowState func newQRepFlowState() *protos.QRepFlowState { return &protos.QRepFlowState{ - LastPartition: &protos.QRepPartition{ - PartitionId: "not-applicable-partition", - Range: nil, - }, + LastPartition: InitialLastPartition, NumPartitionsProcessed: 0, NeedsResync: true, CurrentFlowStatus: protos.FlowStatus_STATUS_RUNNING, @@ -461,8 +463,10 @@ func QRepWaitForNewRowsWorkflow(ctx workflow.Context, config *protos.QRepConfig, return fmt.Errorf("error checking for new rows: %w", err) } + optedForOverwrite := config.WriteMode.WriteType == protos.QRepWriteType_QREP_WRITE_MODE_OVERWRITE + fullRefresh := optedForOverwrite && getQRepOverwriteFullRefreshMode(ctx, logger, config.Env) // If no new rows are found, continue as new - if !hasNewRows { + if !hasNewRows || fullRefresh { waitBetweenBatches := 5 * time.Second if config.WaitBetweenBatchesSeconds > 0 { waitBetweenBatches = time.Duration(config.WaitBetweenBatchesSeconds) * time.Second @@ -472,6 +476,9 @@ func QRepWaitForNewRowsWorkflow(ctx workflow.Context, config *protos.QRepConfig, return sleepErr } + if fullRefresh { + return nil + } logger.Info("QRepWaitForNewRowsWorkflow: continuing the loop") return workflow.NewContinueAsNewError(ctx, QRepWaitForNewRowsWorkflow, config, lastPartition) } @@ -545,8 +552,16 @@ func QRepFlowWorkflow( return state, err } - if !config.InitialCopyOnly && state.LastPartition != nil { - if err := q.waitForNewRows(ctx, signalChan, state.LastPartition); err != nil { + fullRefresh := false + lastPartition := state.LastPartition + if config.WriteMode.WriteType == protos.QRepWriteType_QREP_WRITE_MODE_OVERWRITE { + if fullRefresh = getQRepOverwriteFullRefreshMode(ctx, q.logger, config.Env); fullRefresh { + lastPartition = InitialLastPartition + } + } + + if !config.InitialCopyOnly && lastPartition != nil { + if err := q.waitForNewRows(ctx, signalChan, lastPartition); err != nil { return state, err } } @@ -580,7 +595,7 @@ func QRepFlowWorkflow( q.logger.Info(fmt.Sprintf("%d partitions processed", len(partitions.Partitions))) state.NumPartitionsProcessed += uint64(len(partitions.Partitions)) - if len(partitions.Partitions) > 0 { + if len(partitions.Partitions) > 0 && !fullRefresh { state.LastPartition = partitions.Partitions[len(partitions.Partitions)-1] } } diff --git a/flow/workflows/register.go b/flow/workflows/register.go index 35adf135b..2c4b32ba3 100644 --- a/flow/workflows/register.go +++ b/flow/workflows/register.go @@ -18,4 +18,7 @@ func RegisterFlowWorkerWorkflows(w worker.WorkflowRegistry) { w.RegisterWorkflow(GlobalScheduleManagerWorkflow) w.RegisterWorkflow(HeartbeatFlowWorkflow) w.RegisterWorkflow(RecordSlotSizeWorkflow) + + w.RegisterWorkflow(StartMaintenanceWorkflow) + w.RegisterWorkflow(EndMaintenanceWorkflow) } diff --git a/flow/workflows/snapshot_flow.go b/flow/workflows/snapshot_flow.go index c8b6a3fd2..9b21b7b38 100644 --- a/flow/workflows/snapshot_flow.go +++ b/flow/workflows/snapshot_flow.go @@ -166,7 +166,7 @@ func (s *SnapshotFlowExecution) cloneTable( numWorkers = s.config.SnapshotMaxParallelWorkers } - numRowsPerPartition := uint32(500000) + numRowsPerPartition := uint32(250000) if s.config.SnapshotNumRowsPerPartition > 0 { numRowsPerPartition = s.config.SnapshotNumRowsPerPartition } @@ -274,6 +274,13 @@ func (s *SnapshotFlowExecution) cloneTablesWithSlot( if err != nil { return fmt.Errorf("failed to setup replication: %w", err) } + defer func() { + dCtx, cancel := workflow.NewDisconnectedContext(sessionCtx) + defer cancel() + if err := s.closeSlotKeepAlive(dCtx); err != nil { + s.logger.Error("failed to close slot keep alive", slog.Any("error", err)) + } + }() s.logger.Info(fmt.Sprintf("cloning %d tables in parallel", numTablesInParallel)) if err := s.cloneTables(ctx, @@ -283,13 +290,10 @@ func (s *SnapshotFlowExecution) cloneTablesWithSlot( slotInfo.SupportsTidScans, numTablesInParallel, ); err != nil { + s.logger.Error("failed to clone tables", slog.Any("error", err)) return fmt.Errorf("failed to clone tables: %w", err) } - if err := s.closeSlotKeepAlive(sessionCtx); err != nil { - return fmt.Errorf("failed to close slot keep alive: %w", err) - } - return nil } diff --git a/nexus/Cargo.lock b/nexus/Cargo.lock index 8dd801e5e..f827849ff 100644 --- a/nexus/Cargo.lock +++ b/nexus/Cargo.lock @@ -95,9 +95,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -110,9 +110,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" @@ -144,9 +144,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "ar" @@ -187,7 +187,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -209,7 +209,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -220,7 +220,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -237,9 +237,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-config" -version = "1.5.9" +version = "1.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6448cfb224dd6a9b9ac734f58622dd0d4751f3589f3b777345745f46b2eb14" +checksum = "9b49afaa341e8dd8577e1a2200468f98956d6eda50bcf4a53246cc00174ba924" dependencies = [ "aws-credential-types", "aws-runtime", @@ -304,9 +304,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.48.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2afbd208dabc6785946d4ef2444eb1f54fe0aaf0f62f2a4f9a9e9c303aeff0be" +checksum = "bfd059dacda4dfd5b57f2bd453fc6555f9acb496cb77508d517da24cf5d73167" dependencies = [ "aws-credential-types", "aws-runtime", @@ -326,9 +326,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.47.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8776850becacbd3a82a4737a9375ddb5c6832a51379f24443a98e61513f852c" +checksum = "09677244a9da92172c8dc60109b4a9658597d4d298b188dd0018b6a66b410ca4" dependencies = [ "aws-credential-types", "aws-runtime", @@ -348,9 +348,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.48.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0007b5b8004547133319b6c4e87193eee2a0bcb3e4c18c75d09febe9dab7b383" +checksum = "81fea2f3a8bb3bd10932ae7ad59cc59f65f270fc9183a7e91f501dc5efbef7ee" dependencies = [ "aws-credential-types", "aws-runtime", @@ -370,9 +370,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.47.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fffaa356e7f1c725908b75136d53207fa714e348f365671df14e95a60530ad3" +checksum = "53dcf5e7d9bd1517b8b998e170e650047cea8a2b85fe1835abe3210713e541b7" dependencies = [ "aws-credential-types", "aws-runtime", @@ -493,9 +493,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.7.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e086682a53d3aa241192aa110fa8dfce98f2f5ac2ead0de84d41582c7e8fdb96" +checksum = "92165296a47a812b267b4f41032ff8069ab7ff783696d217f0994a0d7ab585cd" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -510,9 +510,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.2.8" +version = "1.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c9cdc179e6afbf5d391ab08c85eac817b51c87e1892a5edb5f7bbdc64314b4" +checksum = "4fbd94a32b3a7d55d3806fe27d98d3ad393050439dd05eb53ece36ec5e3d3510" dependencies = [ "base64-simd", "bytes", @@ -663,7 +663,7 @@ version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ - "bitflags", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools", @@ -672,9 +672,15 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.87", ] +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.6.0" @@ -713,9 +719,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +checksum = "f5327f6c99920069d1fe374aa743be1af0031dea9f250852cdf1ae6a0861ee24" dependencies = [ "borsh-derive", "cfg_aliases", @@ -723,16 +729,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +checksum = "10aedd8f1a81a8aafbfde924b0e3061cd6fedd6f6bbcfc6a76e6fd426d7bfe26" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.85", - "syn_derive", + "syn 2.0.87", ] [[package]] @@ -796,9 +801,9 @@ dependencies = [ [[package]] name = "cargo-deb" -version = "2.7.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acc4fba3972a758843b4908ccc47e7b376a9c2d35f05f0d8ed54b6890d0647b" +checksum = "9103cb60c68ef7ce14a3d17c6d697e8b180356a447685784f7951074bce0b844" dependencies = [ "ar", "cargo_toml", @@ -814,7 +819,6 @@ dependencies = [ "regex", "serde", "serde_json", - "sha2", "tar", "tempfile", "toml", @@ -865,9 +869,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "baee610e9452a8f6f0a1b6194ec09ff9e2d85dea54432acdae41aa0761c95d70" dependencies = [ "jobserver", "libc", @@ -958,9 +962,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -968,9 +972,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -987,7 +991,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1128,7 +1132,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core", + "parking_lot_core 0.9.10", ] [[package]] @@ -1186,7 +1190,7 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1201,13 +1205,13 @@ dependencies = [ [[package]] name = "derive-new" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" +checksum = "2cdc8d50f426189eef89dac62fabfa0abb27d5cc008f25bf4156a0203325becc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1222,6 +1226,17 @@ dependencies = [ "subtle", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "dotenvy" version = "0.15.7" @@ -1434,7 +1449,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1469,9 +1484,9 @@ dependencies = [ [[package]] name = "gcp-bigquery-client" -version = "0.23.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51480b6aca9d7997b8575b7e8b68441a847235673cdd739ae576bbfc708dbd3d" +checksum = "7642bdf60deda83c29b045188527fbc8b633636e0337a65c026436e7cdb26d3b" dependencies = [ "async-stream", "async-trait", @@ -1594,9 +1609,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" dependencies = [ "allocator-api2", "equivalent", @@ -1775,7 +1790,7 @@ dependencies = [ "http 1.1.0", "hyper 1.5.0", "hyper-util", - "rustls 0.23.15", + "rustls 0.23.17", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -1786,9 +1801,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper 1.5.0", "hyper-util", @@ -1799,9 +1814,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1839,14 +1854,143 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -1866,7 +2010,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -1879,6 +2023,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipnet" version = "2.10.1" @@ -1948,6 +2101,29 @@ dependencies = [ "indexmap 2.6.0", ] +[[package]] +name = "lazy-regex" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d8e41c97e6bc7ecb552016274b99fbb5d035e8de288c582d9b933af6677bfda" +dependencies = [ + "lazy-regex-proc_macros", + "once_cell", + "regex-lite", +] + +[[package]] +name = "lazy-regex-proc_macros" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76e1d8b05d672c53cb9c7b920bbba8783845ae4f0b076e02a3db1d02c81b4163" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.87", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -1959,9 +2135,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -1970,14 +2146,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] name = "libm" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bda4c6077b0b08da2c48b172195795498381a7c8988c9e6212a6c55c5b9bd70" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libredox" @@ -1985,9 +2161,35 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags", + "bitflags 2.6.0", "libc", - "redox_syscall", + "redox_syscall 0.5.7", +] + +[[package]] +name = "libssh2-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dc8a030b787e2119a731f1951d6a773e2280c660f8ec4b0f5e1505a386e71ee" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", ] [[package]] @@ -1996,6 +2198,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "lock_api" version = "0.4.12" @@ -2024,7 +2232,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -2171,7 +2379,7 @@ checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" dependencies = [ "base64 0.21.7", "bindgen", - "bitflags", + "bitflags 2.6.0", "btoi", "byteorder", "bytes", @@ -2327,6 +2535,18 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "outref" version = "0.5.1" @@ -2339,6 +2559,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + [[package]] name = "parking_lot" version = "0.12.3" @@ -2346,7 +2577,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core", + "parking_lot_core 0.9.10", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", ] [[package]] @@ -2357,7 +2602,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.7", "smallvec", "windows-targets 0.52.6", ] @@ -2517,6 +2762,7 @@ dependencies = [ "serde_bytes", "serde_json", "sqlparser", + "ssh2", "tokio", "tokio-postgres", "tracing", @@ -2647,9 +2893,9 @@ dependencies = [ [[package]] name = "pgwire" -version = "0.23.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fb7a8b4570b74080587c5f3e187553375d18e72a38c72ca7f70a065972c65d" +checksum = "99e0f273b9ffa92a06b0a900c012df432de901c1854b2411cd7b27e2db165cc8" dependencies = [ "async-trait", "base64 0.22.1", @@ -2658,10 +2904,12 @@ dependencies = [ "derive-new", "futures", "hex", + "lazy-regex", "md5", "postgres-types", "rand", "ring", + "rust_decimal", "stringprep", "thiserror", "tokio", @@ -2705,7 +2953,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2794,11 +3042,15 @@ name = "postgres-connection" version = "0.1.0" dependencies = [ "anyhow", + "futures-util", "pt", - "rustls 0.23.15", + "rustls 0.23.17", + "ssh2", "tokio", "tokio-postgres", "tokio-postgres-rustls", + "tokio-stream", + "tokio-util", "tracing", "urlencoding", ] @@ -2869,7 +3121,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2881,29 +3133,6 @@ dependencies = [ "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro2" version = "1.0.89" @@ -2930,7 +3159,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", - "heck 0.5.0", + "heck 0.4.1", "itertools", "log", "multimap", @@ -2942,7 +3171,7 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.85", + "syn 2.0.87", "tempfile", ] @@ -2956,7 +3185,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3010,7 +3239,7 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" dependencies = [ - "bitflags", + "bitflags 2.6.0", "memchr", "unicase", ] @@ -3041,7 +3270,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.17", "socket2", "thiserror", "tokio", @@ -3058,7 +3287,7 @@ dependencies = [ "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.17", "slab", "thiserror", "tinyvec", @@ -3067,15 +3296,16 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3143,13 +3373,22 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags", + "bitflags 2.6.0", ] [[package]] @@ -3192,7 +3431,7 @@ dependencies = [ "quote", "refinery-core", "regex", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3256,9 +3495,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "async-compression", "base64 0.22.1", @@ -3279,7 +3518,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.15", + "rustls 0.23.17", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", @@ -3408,11 +3647,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.38" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ - "bitflags", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -3447,9 +3686,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" dependencies = [ "log", "once_cell", @@ -3623,7 +3862,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -3648,9 +3887,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.213" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -3666,20 +3905,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -3863,9 +4102,27 @@ source = "git+https://github.com/peerdb-io/sqlparser-rs.git?branch=main#8c341b80 dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] +[[package]] +name = "ssh2" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7fe461910559f6d5604c3731d00d2aafc4a83d1665922e280f42f9a168d5455" +dependencies = [ + "bitflags 1.3.2", + "libc", + "libssh2-sys", + "parking_lot 0.11.2", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -3918,27 +4175,15 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] -[[package]] -name = "syn_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.85", -] - [[package]] name = "sync_wrapper" version = "0.1.2" @@ -3954,6 +4199,17 @@ dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "tap" version = "1.0.1" @@ -3962,9 +4218,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" +checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6" dependencies = [ "filetime", "libc", @@ -3985,22 +4241,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4046,6 +4302,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" version = "1.8.0" @@ -4079,20 +4345,20 @@ checksum = "8d9ef545650e79f30233c0003bcc2504d7efac6dad25fca40744de773fe2049c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", "libc", "mio 1.0.2", - "parking_lot", + "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", "socket2", @@ -4108,7 +4374,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4124,7 +4390,7 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot", + "parking_lot 0.12.3", "percent-encoding", "phf", "pin-project-lite", @@ -4145,7 +4411,7 @@ checksum = "27d684bad428a0f2481f42241f821db42c54e2dc81d8c00db8536c506b0a0144" dependencies = [ "const-oid", "ring", - "rustls 0.23.15", + "rustls 0.23.17", "tokio", "tokio-postgres", "tokio-rustls 0.26.0", @@ -4179,7 +4445,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.15", + "rustls 0.23.17", "rustls-pki-types", "tokio", ] @@ -4203,6 +4469,7 @@ checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -4286,7 +4553,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4392,7 +4659,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4523,7 +4790,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.15", + "rustls 0.23.17", "rustls-pki-types", "serde", "serde_json", @@ -4533,9 +4800,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", "idna", @@ -4548,6 +4815,18 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -4588,6 +4867,12 @@ dependencies = [ "uuid", ] +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.5" @@ -4653,7 +4938,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -4687,7 +4972,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4733,7 +5018,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall", + "redox_syscall 0.5.7", "wasite", "web-sys", ] @@ -4760,7 +5045,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -4965,6 +5250,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wyz" version = "0.5.1" @@ -5020,6 +5317,30 @@ dependencies = [ "lzma-sys", ] +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", +] + [[package]] name = "yup-oauth2" version = "11.0.0" @@ -5037,7 +5358,7 @@ dependencies = [ "hyper-util", "log", "percent-encoding", - "rustls 0.23.15", + "rustls 0.23.17", "rustls-pemfile 2.2.0", "seahash", "serde", @@ -5065,7 +5386,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", ] [[package]] @@ -5085,7 +5427,29 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", ] [[package]] diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 081df6671..6efea5f4b 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -28,9 +28,11 @@ dashmap = "6" rust_decimal = { version = "1", default-features = false, features = [ "tokio-pg", ] } +ssh2 = "0.9" sqlparser = { git = "https://github.com/peerdb-io/sqlparser-rs.git", branch = "main" } +tokio = { version = "1", features = ["full"] } tracing = "0.1" -pgwire = { version = "0.23", default-features = false, features = [ +pgwire = { version = "0.26", default-features = false, features = [ "scram", "server-api-ring", ] } diff --git a/nexus/analyzer/src/lib.rs b/nexus/analyzer/src/lib.rs index 49c05a22b..830da627d 100644 --- a/nexus/analyzer/src/lib.rs +++ b/nexus/analyzer/src/lib.rs @@ -18,7 +18,7 @@ use qrep::process_options; use sqlparser::ast::{ self, visit_relations, visit_statements, CreateMirror::{Select, CDC}, - Expr, FetchDirection, SqlOption, Statement, + DollarQuotedString, Expr, FetchDirection, SqlOption, Statement, Value, }; mod qrep; @@ -116,6 +116,10 @@ pub enum PeerDDL { peer_name: String, if_exists: bool, }, + ExecutePeer { + peer_name: String, + query: String, + }, CreateMirrorForCDC { if_not_exists: bool, flow_job: Box, @@ -388,6 +392,30 @@ impl StatementAnalyzer for PeerDDLAnalyzer { } } } + Statement::Execute { + name, parameters, .. + } => { + if let Some(Expr::Value(query)) = parameters.first() { + if let Some(query) = match query { + Value::DoubleQuotedString(query) + | Value::SingleQuotedString(query) + | Value::EscapedStringLiteral(query) => Some(query.clone()), + Value::DollarQuotedString(DollarQuotedString { value, .. }) => { + Some(value.clone()) + } + _ => None, + } { + Ok(Some(PeerDDL::ExecutePeer { + peer_name: name.to_string().to_lowercase(), + query: query.to_string(), + })) + } else { + Ok(None) + } + } else { + Ok(None) + } + } Statement::ExecuteMirror { mirror_name } => Ok(Some(PeerDDL::ExecuteMirrorForSelect { flow_job_name: mirror_name.to_string().to_lowercase(), })), diff --git a/nexus/catalog/Cargo.toml b/nexus/catalog/Cargo.toml index 162df1da2..af7b79963 100644 --- a/nexus/catalog/Cargo.toml +++ b/nexus/catalog/Cargo.toml @@ -18,7 +18,7 @@ pt = { path = "../pt" } refinery = { version = "0.8", default-features = false, features = ["tokio-postgres"] } serde_json = "1.0" sqlparser.workspace = true -tokio = { version = "1.13.0", features = ["full"] } +tokio.workspace = true tokio-postgres = { version = "0.7.6", features = [ "with-chrono-0_4", "with-serde_json-1", diff --git a/nexus/catalog/migrations/V40__maintenance_flows.sql b/nexus/catalog/migrations/V40__maintenance_flows.sql new file mode 100644 index 000000000..e43e8eb92 --- /dev/null +++ b/nexus/catalog/migrations/V40__maintenance_flows.sql @@ -0,0 +1,29 @@ +CREATE SCHEMA IF NOT EXISTS maintenance; + +CREATE TABLE IF NOT EXISTS maintenance.maintenance_flows +( + id SERIAL PRIMARY KEY, + flow_id BIGINT NOT NULL, + flow_name TEXT NOT NULL, + workflow_id TEXT NOT NULL, + flow_created_at TIMESTAMP NOT NULL, + is_cdc BOOLEAN NOT NULL, + state TEXT NOT NULL, + restored_at TIMESTAMP, + from_version TEXT, + to_version TEXT +); + +CREATE INDEX IF NOT EXISTS idx_maintenance_flows_state ON maintenance.maintenance_flows (state); + +CREATE TABLE IF NOT EXISTS maintenance.start_maintenance_outputs +( + id SERIAL PRIMARY KEY, + api_version TEXT NOT NULL, + cli_version TEXT NOT NULL, + skipped BOOLEAN NOT NULL, + skipped_reason TEXT, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_start_maintenance_outputs_created_at ON maintenance.start_maintenance_outputs (created_at DESC); diff --git a/nexus/catalog/src/lib.rs b/nexus/catalog/src/lib.rs index f103fce02..d5d023e57 100644 --- a/nexus/catalog/src/lib.rs +++ b/nexus/catalog/src/lib.rs @@ -72,8 +72,8 @@ impl<'a> CatalogConfig<'a> { impl Catalog { pub async fn new(pt_config: pt::peerdb_peers::PostgresConfig) -> anyhow::Result { - let client = connect_postgres(&pt_config).await?; - Ok(Self { pg: client }) + let (pg, _) = connect_postgres(&pt_config).await?; + Ok(Self { pg }) } pub async fn run_migrations(&mut self) -> anyhow::Result<()> { @@ -201,7 +201,7 @@ impl Catalog { let stmt = self .pg .prepare_typed( - "SELECT id, name, type, options, enc_key_id FROM public.peers WHERE name = $1", + "SELECT name, type, options, enc_key_id FROM public.peers WHERE name = $1", &[], ) .await?; @@ -516,6 +516,10 @@ impl Catalog { #[async_trait::async_trait] impl QueryExecutor for Catalog { + async fn execute_raw(&self, query: &str) -> PgWireResult { + peer_postgres::pg_execute_raw(&self.pg, query).await + } + #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] async fn execute(&self, stmt: &Statement) -> PgWireResult { peer_postgres::pg_execute(&self.pg, ast::PostgresAst { peername: None }, stmt).await diff --git a/nexus/parser/Cargo.toml b/nexus/parser/Cargo.toml index b6aac7d88..45bdc558c 100644 --- a/nexus/parser/Cargo.toml +++ b/nexus/parser/Cargo.toml @@ -14,5 +14,5 @@ pgwire.workspace = true pt = { path = "../pt" } rand = "0.8" sqlparser.workspace = true -tokio = { version = "1", features = ["full"] } +tokio.workspace = true tracing.workspace = true diff --git a/nexus/peer-bigquery/Cargo.toml b/nexus/peer-bigquery/Cargo.toml index 8fdd68b5e..c3cf3c18b 100644 --- a/nexus/peer-bigquery/Cargo.toml +++ b/nexus/peer-bigquery/Cargo.toml @@ -21,7 +21,7 @@ serde_json = "1.0" serde_bytes = "0.11" sqlparser.workspace = true tracing.workspace = true -tokio = { version = "1.0", features = ["full"] } -gcp-bigquery-client = "0.23" +tokio.workspace = true +gcp-bigquery-client = "0.24" uuid = { version = "1.0", features = ["serde", "v4"] } value = { path = "../value" } diff --git a/nexus/peer-bigquery/src/lib.rs b/nexus/peer-bigquery/src/lib.rs index 9cdb01c0c..23437f622 100644 --- a/nexus/peer-bigquery/src/lib.rs +++ b/nexus/peer-bigquery/src/lib.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context; use gcp_bigquery_client::{ - model::{query_request::QueryRequest, query_response::ResultSet}, + model::{query_request::QueryRequest, query_response::QueryResponse}, yup_oauth2, Client, }; use peer_connections::PeerConnectionTracker; @@ -62,7 +62,7 @@ impl BigQueryQueryExecutor { }) } - async fn run_tracked(&self, query: &str) -> PgWireResult { + async fn run_tracked(&self, query: &str) -> PgWireResult { let mut query_req = QueryRequest::new(query); query_req.timeout_ms = Some(Duration::from_secs(120).as_millis() as i32); @@ -91,6 +91,17 @@ impl BigQueryQueryExecutor { #[async_trait::async_trait] impl QueryExecutor for BigQueryQueryExecutor { + async fn execute_raw(&self, query: &str) -> PgWireResult { + let query_response = self.run_tracked(query).await?; + let cursor = BqRecordStream::from(query_response); + tracing::info!( + "retrieved {} rows for query {}", + cursor.get_num_records(), + query + ); + Ok(QueryOutput::Stream(Box::pin(cursor))) + } + #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] async fn execute(&self, stmt: &Statement) -> PgWireResult { // only support SELECT statements @@ -105,15 +116,7 @@ impl QueryExecutor for BigQueryQueryExecutor { let query = query.to_string(); tracing::info!("bq rewritten query: {}", query); - let result_set = self.run_tracked(&query).await?; - - let cursor = BqRecordStream::new(result_set); - tracing::info!( - "retrieved {} rows for query {}", - cursor.get_num_records(), - query - ); - Ok(QueryOutput::Stream(Box::pin(cursor))) + self.execute_raw(&query).await } Statement::Declare { stmts } => { if stmts.len() != 1 { @@ -220,8 +223,8 @@ impl QueryExecutor for BigQueryQueryExecutor { query.limit = Some(Expr::Value(Value::Number("1".to_owned(), false))); let query = query.to_string(); - let result_set = self.run_tracked(&query).await?; - let schema = BqSchema::from_result_set(&result_set); + let query_response = self.run_tracked(&query).await?; + let schema = BqSchema::from(&query_response); // log the schema tracing::info!("[bigquery] schema: {:?}", schema); diff --git a/nexus/peer-bigquery/src/stream.rs b/nexus/peer-bigquery/src/stream.rs index d0f7e5b5b..76f2f7615 100644 --- a/nexus/peer-bigquery/src/stream.rs +++ b/nexus/peer-bigquery/src/stream.rs @@ -8,7 +8,9 @@ use std::{ use chrono::DateTime; use futures::Stream; use gcp_bigquery_client::model::{ - field_type::FieldType, query_response::ResultSet, table_field_schema::TableFieldSchema, + field_type::FieldType, + query_response::{QueryResponse, ResultSet}, + table_field_schema::TableFieldSchema, }; use peer_cursor::{Record, RecordStream, Schema}; use pgwire::{ @@ -57,10 +59,9 @@ fn convert_field_type(field_type: &FieldType) -> Type { } } -impl BqSchema { - pub fn from_result_set(result_set: &ResultSet) -> Self { - let bq_schema = result_set - .query_response() +impl From<&QueryResponse> for BqSchema { + fn from(query_response: &QueryResponse) -> Self { + let bq_schema = query_response .schema .as_ref() .expect("Schema is not present"); @@ -84,24 +85,29 @@ impl BqSchema { fields: fields.clone(), } } +} +impl BqSchema { pub fn schema(&self) -> Schema { self.schema.clone() } } -impl BqRecordStream { - pub fn new(result_set: ResultSet) -> Self { - let bq_schema = BqSchema::from_result_set(&result_set); +impl From for BqRecordStream { + fn from(query_response: QueryResponse) -> Self { + let schema = BqSchema::from(&query_response); + let result_set = ResultSet::new_from_query_response(query_response); let num_records = result_set.row_count(); Self { result_set, - schema: bq_schema, + schema, num_records, } } +} +impl BqRecordStream { pub fn get_num_records(&self) -> usize { self.num_records } diff --git a/nexus/peer-connections/Cargo.toml b/nexus/peer-connections/Cargo.toml index 8aa69c7f0..01b172c74 100644 --- a/nexus/peer-connections/Cargo.toml +++ b/nexus/peer-connections/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" anyhow = "1.0" chrono.workspace = true deadpool-postgres = { version = "0.14", features = ["rt_tokio_1"] } -tokio = { version = "1", features = ["full"] } +tokio.workspace = true tokio-postgres = { version = "0.7.6", features = [ "with-chrono-0_4", "with-serde_json-1", diff --git a/nexus/peer-cursor/Cargo.toml b/nexus/peer-cursor/Cargo.toml index 74a2fe9de..3db4eecba 100644 --- a/nexus/peer-cursor/Cargo.toml +++ b/nexus/peer-cursor/Cargo.toml @@ -12,6 +12,6 @@ dashmap.workspace = true futures = "0.3" pgwire.workspace = true sqlparser.workspace = true -tokio = { version = "1.0", features = ["full"] } +tokio.workspace = true tracing.workspace = true value = { path = "../value" } diff --git a/nexus/peer-cursor/src/lib.rs b/nexus/peer-cursor/src/lib.rs index 3a31531f4..306bf0dfc 100644 --- a/nexus/peer-cursor/src/lib.rs +++ b/nexus/peer-cursor/src/lib.rs @@ -46,6 +46,7 @@ pub enum QueryOutput { #[async_trait::async_trait] pub trait QueryExecutor: Send + Sync { + async fn execute_raw(&self, stmt: &str) -> PgWireResult; async fn execute(&self, stmt: &Statement) -> PgWireResult; async fn describe(&self, stmt: &Statement) -> PgWireResult>; } diff --git a/nexus/peer-mysql/Cargo.toml b/nexus/peer-mysql/Cargo.toml index 2fe32d845..a6fed50b8 100644 --- a/nexus/peer-mysql/Cargo.toml +++ b/nexus/peer-mysql/Cargo.toml @@ -22,6 +22,6 @@ serde_json = "1.0" serde_bytes = "0.11" sqlparser.workspace = true tracing.workspace = true -tokio = { version = "1.0", features = ["full"] } +tokio.workspace = true tokio-stream = "0.1" value = { path = "../value" } diff --git a/nexus/peer-mysql/src/lib.rs b/nexus/peer-mysql/src/lib.rs index 6868a53a1..831acd56d 100644 --- a/nexus/peer-mysql/src/lib.rs +++ b/nexus/peer-mysql/src/lib.rs @@ -59,7 +59,11 @@ impl MySqlQueryExecutor { #[async_trait::async_trait] impl QueryExecutor for MySqlQueryExecutor { - // #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] + async fn execute_raw(&self, query: &str) -> PgWireResult { + let cursor = self.query(query.to_string()).await?; + Ok(QueryOutput::Stream(Box::pin(cursor))) + } + async fn execute(&self, stmt: &Statement) -> PgWireResult { // only support SELECT statements match stmt { diff --git a/nexus/peer-postgres/Cargo.toml b/nexus/peer-postgres/Cargo.toml index 873baa267..78b055500 100644 --- a/nexus/peer-postgres/Cargo.toml +++ b/nexus/peer-postgres/Cargo.toml @@ -8,7 +8,6 @@ edition = "2021" [dependencies] anyhow = "1.0" async-trait = "0.1" -rust_decimal.workspace = true bytes = "1.0" chrono.workspace = true futures = "0.3" @@ -16,13 +15,15 @@ peer-cursor = { path = "../peer-cursor" } peer-connections = { path = "../peer-connections" } pgwire.workspace = true postgres-connection = { path = "../postgres-connection" } +postgres-inet = "0.19.0" pt = { path = "../pt" } +rust_decimal.workspace = true serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_bytes = "0.11" -postgres-inet = "0.19.0" +ssh2.workspace = true sqlparser.workspace = true -tokio = { version = "1.0", features = ["full"] } +tokio.workspace = true tokio-postgres = { version = "0.7.6", features = [ "with-chrono-0_4", "with-serde_json-1", diff --git a/nexus/peer-postgres/src/lib.rs b/nexus/peer-postgres/src/lib.rs index cb29104d8..36676618a 100644 --- a/nexus/peer-postgres/src/lib.rs +++ b/nexus/peer-postgres/src/lib.rs @@ -16,19 +16,29 @@ pub mod stream; // backing store. pub struct PostgresQueryExecutor { peername: String, - client: Box, + client: Client, + session: Option, } impl PostgresQueryExecutor { pub async fn new(peername: String, config: &PostgresConfig) -> anyhow::Result { - let client = postgres_connection::connect_postgres(config).await?; + let (client, session) = postgres_connection::connect_postgres(config).await?; Ok(Self { peername, - client: Box::new(client), + client, + session, }) } } +impl Drop for PostgresQueryExecutor { + fn drop(&mut self) { + if let Some(session) = &mut self.session { + session.disconnect(None, "", None).ok(); + } + } +} + async fn schema_from_query(client: &Client, query: &str) -> anyhow::Result { let prepared = client.prepare_typed(query, &[]).await?; @@ -44,6 +54,34 @@ async fn schema_from_query(client: &Client, query: &str) -> anyhow::Result PgWireResult { + // first fetch the schema as this connection will be + // short lived, only then run the query as the query + // could hold the pin on the connection for a long time. + let schema = schema_from_query(client, query).await.map_err(|e| { + tracing::error!("error getting schema: {}", e); + PgWireError::ApiError(format!("error getting schema: {}", e).into()) + })?; + + tracing::info!("[peer-postgres] rewritten query: {}", query); + // given that there could be a lot of rows returned, we + // need to use a cursor to stream the rows back to the + // client. + let stream = client + .query_raw(query, std::iter::empty::<&str>()) + .await + .map_err(|e| { + tracing::error!("error executing query: {}", e); + PgWireError::ApiError(format!("error executing query: {}", e).into()) + })?; + + // log that raw query execution has completed + tracing::info!("[peer-postgres] raw query execution completed"); + + let cursor = stream::PgRecordStream::new(stream, schema); + Ok(QueryOutput::Stream(Box::pin(cursor))) +} + pub async fn pg_execute( client: &Client, ast: ast::PostgresAst, @@ -58,33 +96,7 @@ pub async fn pg_execute( ast.rewrite_query(&mut query); let rewritten_query = query.to_string(); - // first fetch the schema as this connection will be - // short lived, only then run the query as the query - // could hold the pin on the connection for a long time. - let schema = schema_from_query(client, &rewritten_query) - .await - .map_err(|e| { - tracing::error!("error getting schema: {}", e); - PgWireError::ApiError(format!("error getting schema: {}", e).into()) - })?; - - tracing::info!("[peer-postgres] rewritten query: {}", rewritten_query); - // given that there could be a lot of rows returned, we - // need to use a cursor to stream the rows back to the - // client. - let stream = client - .query_raw(&rewritten_query, std::iter::empty::<&str>()) - .await - .map_err(|e| { - tracing::error!("error executing query: {}", e); - PgWireError::ApiError(format!("error executing query: {}", e).into()) - })?; - - // log that raw query execution has completed - tracing::info!("[peer-postgres] raw query execution completed"); - - let cursor = stream::PgRecordStream::new(stream, schema); - Ok(QueryOutput::Stream(Box::pin(cursor))) + pg_execute_raw(client, &rewritten_query).await } _ => { let mut rewritten_stmt = stmt.clone(); @@ -120,6 +132,10 @@ pub async fn pg_describe(client: &Client, stmt: &Statement) -> PgWireResult PgWireResult { + pg_execute_raw(&self.client, query).await + } + #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] async fn execute(&self, stmt: &Statement) -> PgWireResult { pg_execute( diff --git a/nexus/peer-snowflake/Cargo.toml b/nexus/peer-snowflake/Cargo.toml index e74c54318..bb33eed01 100644 --- a/nexus/peer-snowflake/Cargo.toml +++ b/nexus/peer-snowflake/Cargo.toml @@ -25,7 +25,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" sha2 = "0.10" sqlparser.workspace = true -tokio = { version = "1.21", features = ["full"] } +tokio.workspace = true tracing.workspace = true ureq = { version = "2", features = ["json", "charset"] } value = { path = "../value" } diff --git a/nexus/peer-snowflake/src/lib.rs b/nexus/peer-snowflake/src/lib.rs index e0af65b58..7905be724 100644 --- a/nexus/peer-snowflake/src/lib.rs +++ b/nexus/peer-snowflake/src/lib.rs @@ -292,6 +292,22 @@ impl SnowflakeQueryExecutor { #[async_trait::async_trait] impl QueryExecutor for SnowflakeQueryExecutor { + async fn execute_raw(&self, query: &str) -> PgWireResult { + let result_set = self + .process_query(query) + .await + .map_err(|err| PgWireError::ApiError(err.into()))?; + + let cursor = stream::SnowflakeRecordStream::new( + result_set, + self.partition_index, + self.partition_number, + self.endpoint_url.clone(), + self.auth.clone(), + ); + Ok(QueryOutput::Stream(Box::pin(cursor))) + } + #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] async fn execute(&self, stmt: &Statement) -> PgWireResult { match stmt { diff --git a/nexus/postgres-connection/Cargo.toml b/nexus/postgres-connection/Cargo.toml index 93da58109..4d981c7c6 100644 --- a/nexus/postgres-connection/Cargo.toml +++ b/nexus/postgres-connection/Cargo.toml @@ -7,10 +7,14 @@ edition = "2021" [dependencies] anyhow = "1" +futures-util = { version = "0.3", default-features = false, features = ["io"] } pt = { path = "../pt" } rustls = { version = "0.23", default-features = false, features = ["ring"] } -urlencoding = "2" +ssh2.workspace = true +tokio.workspace = true tokio-postgres = "0.7.2" tokio-postgres-rustls = "0.13" -tokio = { version = "1", features = ["full"] } +tokio-util = { version = "0.7", features = ["compat"] } +tokio-stream = "0.1" tracing.workspace = true +urlencoding = "2" diff --git a/nexus/postgres-connection/src/lib.rs b/nexus/postgres-connection/src/lib.rs index 7b2591687..69dafbde6 100644 --- a/nexus/postgres-connection/src/lib.rs +++ b/nexus/postgres-connection/src/lib.rs @@ -1,9 +1,12 @@ -use pt::peerdb_peers::PostgresConfig; +use pt::peerdb_peers::{PostgresConfig, SshConfig}; use rustls::pki_types::{CertificateDer, ServerName, UnixTime}; use rustls::{ClientConfig, DigitallySignedStruct, RootCertStore, SignatureScheme}; use std::fmt::Write; +use std::io; use std::sync::Arc; +use tokio::net::UnixStream; use tokio_postgres_rustls::MakeRustlsConnect; +use tokio_util::compat::FuturesAsyncReadCompatExt; #[derive(Copy, Clone, Debug)] struct NoCertificateVerification; @@ -77,25 +80,99 @@ pub fn get_pg_connection_string(config: &PostgresConfig) -> String { connection_string } -pub async fn connect_postgres(config: &PostgresConfig) -> anyhow::Result { - let connection_string = get_pg_connection_string(config); - - let mut config = ClientConfig::builder() - .with_root_certificates(RootCertStore::empty()) - .with_no_client_auth(); - config - .dangerous() - .set_certificate_verifier(Arc::new(NoCertificateVerification)); - let tls_connector = MakeRustlsConnect::new(config); - let (client, connection) = tokio_postgres::connect(&connection_string, tls_connector) - .await - .map_err(|e| anyhow::anyhow!("error encountered while connecting to postgres {:?}", e))?; +pub async fn create_tunnel( + tcp: std::net::TcpStream, + ssh_config: &SshConfig, + remote_server: String, + remote_port: u16, +) -> io::Result<(ssh2::Session, UnixStream)> { + let mut session = ssh2::Session::new()?; + session.set_tcp_stream(tcp); + session.set_compress(true); + session.handshake()?; + if !ssh_config.password.is_empty() { + session.userauth_password(&ssh_config.user, &ssh_config.password)?; + } + if !ssh_config.private_key.is_empty() { + session.userauth_pubkey_memory(&ssh_config.user, None, &ssh_config.private_key, None)?; + } + if !ssh_config.host_key.is_empty() { + let mut known_hosts = session.known_hosts()?; + known_hosts.read_str(&ssh_config.host_key, ssh2::KnownHostFileKind::OpenSSH)?; + } + let (mut stream1, stream2) = tokio::net::UnixStream::pair()?; + let channel = session.channel_direct_tcpip(remote_server.as_str(), remote_port, None)?; + tracing::info!( + "tunnel to {:}:{:} opened", + remote_server.as_str(), + remote_port + ); - tokio::task::spawn(async move { - if let Err(e) = connection.await { - tracing::info!("connection error: {}", e) + session.set_blocking(false); + tokio::spawn(async move { + let mut channel_stream = futures_util::io::AllowStdIo::new(channel.stream(0)).compat(); + loop { + if let Err(err) = tokio::io::copy_bidirectional(&mut stream1, &mut channel_stream).await + { + if err.kind() == io::ErrorKind::WouldBlock { + tokio::time::sleep(std::time::Duration::new(0, 123456789)).await; + continue; + } + tracing::error!( + "tunnel to {:}:{:} failed: {:}", + remote_server.as_str(), + remote_port, + err + ); + } + break; } }); - Ok(client) + Ok((session, stream2)) +} + +pub async fn connect_postgres( + config: &PostgresConfig, +) -> anyhow::Result<(tokio_postgres::Client, Option)> { + if let Some(ssh_config) = &config.ssh_config { + let tcp = std::net::TcpStream::connect((ssh_config.host.as_str(), ssh_config.port as u16))?; + tcp.set_nodelay(true)?; + let (session, stream) = + create_tunnel(tcp, ssh_config, config.host.clone(), config.port as u16).await?; + let (client, connection) = tokio_postgres::Config::default() + .user(&config.user) + .password(&config.password) + .dbname(&config.database) + .application_name("peerdb_nexus") + .connect_raw(stream, tokio_postgres::NoTls) + .await?; + tokio::task::spawn(async move { + if let Err(e) = connection.await { + tracing::info!("connection error: {}", e) + } + }); + Ok((client, Some(session))) + } else { + let connection_string = get_pg_connection_string(config); + + let mut tls_config = ClientConfig::builder() + .with_root_certificates(RootCertStore::empty()) + .with_no_client_auth(); + tls_config + .dangerous() + .set_certificate_verifier(Arc::new(NoCertificateVerification)); + let tls_connector = MakeRustlsConnect::new(tls_config); + let (client, connection) = tokio_postgres::connect(&connection_string, tls_connector) + .await + .map_err(|e| { + anyhow::anyhow!("error encountered while connecting to postgres {:?}", e) + })?; + tokio::task::spawn(async move { + if let Err(e) = connection.await { + tracing::info!("connection error: {}", e) + } + }); + Ok((client, None)) + } } diff --git a/nexus/server/Cargo.toml b/nexus/server/Cargo.toml index bb0e181c4..b7f05da27 100644 --- a/nexus/server/Cargo.toml +++ b/nexus/server/Cargo.toml @@ -53,7 +53,7 @@ sqlparser = { workspace = true, features = ["visitor"] } serde_json = "1.0" rand = "0.8" time = "0.3" -tokio = { version = "1", features = ["full"] } +tokio.workspace = true tracing.workspace = true tracing-appender = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/nexus/server/src/main.rs b/nexus/server/src/main.rs index e1207066c..ca082d6de 100644 --- a/nexus/server/src/main.rs +++ b/nexus/server/src/main.rs @@ -105,14 +105,12 @@ impl NexusBackend { } // execute a statement on a peer - async fn execute_statement<'a>( + async fn process_execution<'a>( &self, - executor: &dyn QueryExecutor, - stmt: &sqlparser::ast::Statement, + result: QueryOutput, peer_holder: Option>, ) -> PgWireResult>> { - let res = executor.execute(stmt).await?; - match res { + match result { QueryOutput::AffectedRows(rows) => { Ok(vec![Response::Execution(Tag::new("OK").with_rows(rows))]) } @@ -413,6 +411,20 @@ impl NexusBackend { )))) } } + PeerDDL::ExecutePeer { peer_name, query } => { + let peer = self.catalog.get_peer(peer_name).await.map_err(|err| { + PgWireError::ApiError( + format!("unable to get peer config: {:?}", err).into(), + ) + })?; + let executor = self.get_peer_executor(&peer).await.map_err(|err| { + PgWireError::ApiError( + format!("unable to get peer executor: {:?}", err).into(), + ) + })?; + let res = executor.execute_raw(query).await?; + self.process_execution(res, Some(Box::new(peer))).await + } PeerDDL::DropMirror { .. } => self.handle_drop_mirror(&nexus_stmt).await, PeerDDL::DropPeer { if_exists, @@ -578,14 +590,8 @@ impl NexusBackend { } }; - let res = self - .execute_statement(executor.as_ref(), &stmt, peer_holder) - .await; - // log the error if execution failed - if let Err(err) = &res { - tracing::error!("query execution failed: {:?}", err); - } - res + let res = executor.execute(&stmt).await?; + self.process_execution(res, peer_holder).await } NexusStatement::PeerCursor { stmt, cursor } => { @@ -606,12 +612,13 @@ impl NexusBackend { } }; - self.execute_statement(executor.as_ref(), &stmt, None).await + let res = executor.execute(&stmt).await?; + self.process_execution(res, None).await } NexusStatement::Rollback { stmt } => { - self.execute_statement(self.catalog.as_ref(), &stmt, None) - .await + let res = self.catalog.execute(&stmt).await?; + self.process_execution(res, None).await } NexusStatement::Empty => Ok(vec![Response::EmptyQuery]), @@ -1105,7 +1112,9 @@ pub async fn main() -> anyhow::Result<()> { let catalog_config = get_catalog_config(&args).await?; if args.migrations_disabled && args.migrations_only { - return Err(anyhow::anyhow!("Invalid configuration, migrations cannot be enabled and disabled at the same time")); + return Err(anyhow::anyhow!( + "Invalid configuration, migrations cannot be enabled and disabled at the same time" + )); } if !args.migrations_disabled { diff --git a/protos/flow.proto b/protos/flow.proto index d1681fd8d..42170a563 100644 --- a/protos/flow.proto +++ b/protos/flow.proto @@ -385,6 +385,7 @@ enum FlowStatus { STATUS_SNAPSHOT = 5; STATUS_TERMINATING = 6; STATUS_TERMINATED = 7; + STATUS_COMPLETED = 8; } message CDCFlowConfigUpdate { @@ -466,3 +467,28 @@ message DropFlowActivityInput { string peer_name = 2; } +message StartMaintenanceFlowInput { +} + +message StartMaintenanceFlowOutput { + string version = 1; +} + +message EndMaintenanceFlowInput { +} + +message EndMaintenanceFlowOutput { + string version = 1; +} + +message MaintenanceMirror { + int64 mirror_id = 1; + string mirror_name = 2; + string workflow_id = 3; + bool is_cdc = 4; + google.protobuf.Timestamp mirror_created_at = 5; +} + +message MaintenanceMirrors { + repeated MaintenanceMirror mirrors = 1; +} diff --git a/protos/route.proto b/protos/route.proto index 0265f221e..1c6d38ed6 100644 --- a/protos/route.proto +++ b/protos/route.proto @@ -441,6 +441,38 @@ message ResyncMirrorRequest { message ResyncMirrorResponse { } +message PeerDBStateRequest { +} + +enum InstanceStatus { + INSTANCE_STATUS_UNKNOWN = 0; + INSTANCE_STATUS_READY = 1; + INSTANCE_STATUS_MAINTENANCE = 3; +} + +message InstanceInfoRequest { +} + +message InstanceInfoResponse { + InstanceStatus status = 1; +} + +enum MaintenanceStatus { + MAINTENANCE_STATUS_UNKNOWN = 0; + MAINTENANCE_STATUS_START = 1; + MAINTENANCE_STATUS_END = 2; +} + +message MaintenanceRequest { + MaintenanceStatus status = 1; + bool use_peerflow_task_queue = 2; +} + +message MaintenanceResponse { + string workflow_id = 1; + string run_id = 2; +} + service FlowService { rpc ValidatePeer(ValidatePeerRequest) returns (ValidatePeerResponse) { option (google.api.http) = { @@ -595,4 +627,12 @@ service FlowService { rpc ResyncMirror(ResyncMirrorRequest) returns (ResyncMirrorResponse) { option (google.api.http) = { post: "/v1/mirrors/resync", body: "*" }; } + + rpc GetInstanceInfo(InstanceInfoRequest) returns (InstanceInfoResponse) { + option (google.api.http) = { get: "/v1/instance/info" }; + } + + rpc Maintenance(MaintenanceRequest) returns (MaintenanceResponse) { + option (google.api.http) = { post: "/v1/instance/maintenance", body: "*" }; + } } diff --git a/renovate.json b/renovate.json new file mode 100644 index 000000000..b9a8ffef6 --- /dev/null +++ b/renovate.json @@ -0,0 +1,44 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "local>PeerDB-io/.github:renovate-config" + ], + "packageRules": [ + { + "groupName": "{{manager}} dependencies", + "groupSlug": "{{manager}}", + "packageRules": [ + { + "groupName": "{{manager}} dependencies", + "groupSlug": "{{manager}}", + "matchPackagePatterns": [ + "*" + ] + } + ], + "separateMajorMinor": false, + "automerge": true + }, + { + "matchPackageNames": ["mysql_async"], + "matchManagers": ["cargo"], + "enabled": false + }, + { + "matchPackageNames": ["next"], + "matchManagers": ["npm"], + "matchUpdateTypes": ["major"], + "enabled": false + } + ], + "vulnerabilityAlerts": { + "enabled": true + }, + "timezone": "Etc/UTC", + "schedule": [ + "after 5pm on monday" + ], + "additionalReviewers": [ + "team:eng" + ] +} diff --git a/stacks/flow.Dockerfile b/stacks/flow.Dockerfile index de9dbb0d5..2c6f375d0 100644 --- a/stacks/flow.Dockerfile +++ b/stacks/flow.Dockerfile @@ -1,6 +1,6 @@ -# syntax=docker/dockerfile:1.2 +# syntax=docker/dockerfile:1.11@sha256:10c699f1b6c8bdc8f6b4ce8974855dd8542f1768c26eb240237b8f1c9c6c9976 -FROM golang:1.23-alpine AS builder +FROM golang:1.23-alpine@sha256:c694a4d291a13a9f9d94933395673494fc2cc9d4777b85df3a7e70b3492d3574 AS builder RUN apk add --no-cache gcc geos-dev musl-dev WORKDIR /root/flow @@ -18,7 +18,7 @@ WORKDIR /root/flow ENV CGO_ENABLED=1 RUN go build -ldflags="-s -w" -o /root/peer-flow -FROM alpine:3.20 AS flow-base +FROM alpine:3.20@sha256:1e42bbe2508154c9126d48c2b8a75420c3544343bf86fd041fb7527e017a4b4a AS flow-base RUN apk add --no-cache ca-certificates geos && \ adduser -s /bin/sh -D peerdb USER peerdb @@ -45,6 +45,8 @@ FROM flow-base AS flow-worker # Sane defaults for OpenTelemetry ENV OTEL_METRIC_EXPORT_INTERVAL=10000 ENV OTEL_EXPORTER_OTLP_COMPRESSION=gzip +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} ENTRYPOINT [\ "./peer-flow",\ @@ -52,7 +54,20 @@ ENTRYPOINT [\ ] FROM flow-base AS flow-snapshot-worker + +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} ENTRYPOINT [\ "./peer-flow",\ "snapshot-worker"\ ] + + +FROM flow-base AS flow-maintenance + +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} +ENTRYPOINT [\ + "./peer-flow",\ + "maintenance"\ + ] diff --git a/stacks/peerdb-server.Dockerfile b/stacks/peerdb-server.Dockerfile index e30dd8bfc..50c69f807 100644 --- a/stacks/peerdb-server.Dockerfile +++ b/stacks/peerdb-server.Dockerfile @@ -1,6 +1,6 @@ -# syntax=docker/dockerfile:1 +# syntax=docker/dockerfile:1@sha256:865e5dd094beca432e8c0a1d5e1c465db5f998dca4e439981029b3b81fb39ed5 -FROM lukemathwalker/cargo-chef:latest-rust-alpine3.20 as chef +FROM lukemathwalker/cargo-chef:latest-rust-alpine3.20@sha256:75f772fe2d870acb77ffdb2206810cd694a6720263f94c74fcc75080963dbff5 as chef WORKDIR /root FROM chef as planner @@ -9,7 +9,8 @@ WORKDIR /root/nexus RUN cargo chef prepare --recipe-path recipe.json FROM chef as builder -RUN apk add --no-cache build-base pkgconfig curl unzip +ENV OPENSSL_STATIC=1 +RUN apk add --no-cache build-base pkgconfig curl unzip openssl-dev openssl-libs-static WORKDIR /root/nexus COPY scripts /root/scripts RUN /root/scripts/install-protobuf.sh @@ -21,11 +22,15 @@ COPY protos /root/protos WORKDIR /root/nexus RUN cargo build --release --bin peerdb-server -FROM alpine:3.20 +FROM alpine:3.20@sha256:1e42bbe2508154c9126d48c2b8a75420c3544343bf86fd041fb7527e017a4b4a RUN apk add --no-cache ca-certificates postgresql-client curl iputils && \ adduser -s /bin/sh -D peerdb && \ install -d -m 0755 -o peerdb /var/log/peerdb USER peerdb WORKDIR /home/peerdb COPY --from=builder --chown=peerdb /root/nexus/target/release/peerdb-server . + +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} + ENTRYPOINT ["./peerdb-server"] diff --git a/stacks/peerdb-ui.Dockerfile b/stacks/peerdb-ui.Dockerfile index 8f281273b..42cedca11 100644 --- a/stacks/peerdb-ui.Dockerfile +++ b/stacks/peerdb-ui.Dockerfile @@ -1,7 +1,7 @@ -# syntax=docker/dockerfile:1.2 +# syntax=docker/dockerfile:1.11@sha256:10c699f1b6c8bdc8f6b4ce8974855dd8542f1768c26eb240237b8f1c9c6c9976 # Base stage -FROM node:22-alpine AS base +FROM node:22-alpine@sha256:b64ced2e7cd0a4816699fe308ce6e8a08ccba463c757c00c14cd372e3d2c763e AS base ENV NPM_CONFIG_UPDATE_NOTIFIER=false RUN apk add --no-cache openssl && \ mkdir /app && \ @@ -35,5 +35,8 @@ ENV PORT 3000 # set hostname to localhost ENV HOSTNAME "0.0.0.0" +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} + ENTRYPOINT ["/app/entrypoint.sh"] CMD ["node", "server.js"] diff --git a/ui/app/mirrors/create/helpers/cdc.ts b/ui/app/mirrors/create/helpers/cdc.ts index 99dd229cb..957564d67 100644 --- a/ui/app/mirrors/create/helpers/cdc.ts +++ b/ui/app/mirrors/create/helpers/cdc.ts @@ -22,12 +22,12 @@ export const cdcSettings: MirrorSetting[] = [ setter( (curr: CDCConfig): CDCConfig => ({ ...curr, - maxBatchSize: (value as number) || 1000000, + maxBatchSize: (value as number) || 250000, }) ), - tips: 'The number of rows PeerDB will pull from source at a time. If left empty, the default value is 1,000,000 rows.', + tips: 'The number of rows PeerDB will pull from source at a time. If left empty, the default value is 250,000 rows.', type: 'number', - default: '1000000', + default: '250000', advanced: AdvancedSettingType.ALL, }, { @@ -78,11 +78,11 @@ export const cdcSettings: MirrorSetting[] = [ setter( (curr: CDCConfig): CDCConfig => ({ ...curr, - snapshotNumRowsPerPartition: parseInt(value as string, 10) || 1000000, + snapshotNumRowsPerPartition: parseInt(value as string, 10) || 250000, }) ), - tips: 'PeerDB splits up table data into partitions for increased performance. This setting controls the number of rows per partition. The default value is 1000000.', - default: '1000000', + tips: 'PeerDB splits up table data into partitions for increased performance. This setting controls the number of rows per partition. The default value is 250000.', + default: '250000', type: 'number', advanced: AdvancedSettingType.ALL, }, diff --git a/ui/app/mirrors/create/helpers/common.ts b/ui/app/mirrors/create/helpers/common.ts index d4ba5747a..f29a2376c 100644 --- a/ui/app/mirrors/create/helpers/common.ts +++ b/ui/app/mirrors/create/helpers/common.ts @@ -25,10 +25,10 @@ export const blankCDCSetting: CDCConfig = { destinationName: '', flowJobName: '', tableMappings: [], - maxBatchSize: 1000000, + maxBatchSize: 250000, doInitialSnapshot: true, publicationName: '', - snapshotNumRowsPerPartition: 1000000, + snapshotNumRowsPerPartition: 250000, snapshotMaxParallelWorkers: 4, snapshotNumTablesInParallel: 1, snapshotStagingPath: '', diff --git a/ui/app/peers/[peerName]/lagGraph.tsx b/ui/app/peers/[peerName]/lagGraph.tsx index 87b90fa8c..d971bee8f 100644 --- a/ui/app/peers/[peerName]/lagGraph.tsx +++ b/ui/app/peers/[peerName]/lagGraph.tsx @@ -21,9 +21,10 @@ type LagGraphProps = { function parseLSN(lsn: string): number { if (!lsn) return 0; const [lsn1, lsn2] = lsn.split('/'); - return Number( - (BigInt(parseInt(lsn1, 16)) << BigInt(32)) | BigInt(parseInt(lsn2, 16)) - ); + const parsedLsn1 = parseInt(lsn1, 16); + const parsedLsn2 = parseInt(lsn2, 16); + if (isNaN(parsedLsn1) || isNaN(parsedLsn2)) return 0; + return Number((BigInt(parsedLsn1) << BigInt(32)) | BigInt(parsedLsn2)); } export default function LagGraph({ peerName }: LagGraphProps) { diff --git a/ui/app/settings/page.tsx b/ui/app/settings/page.tsx index 7ebb1b4cd..c1d51a228 100644 --- a/ui/app/settings/page.tsx +++ b/ui/app/settings/page.tsx @@ -9,10 +9,7 @@ import { Button } from '@/lib/Button'; import { Icon } from '@/lib/Icon'; import { Label } from '@/lib/Label'; import { SearchField } from '@/lib/SearchField'; -import { Table, TableCell, TableRow } from '@/lib/Table'; import { TextField } from '@/lib/TextField'; -import { Tooltip } from '@/lib/Tooltip'; -import { MaterialSymbol } from 'material-symbols'; import { useEffect, useMemo, useState } from 'react'; import { ToastContainer } from 'react-toastify'; import 'react-toastify/dist/ReactToastify.css'; @@ -22,40 +19,32 @@ const ROWS_PER_PAGE = 7; const ApplyModeIconWithTooltip = ({ applyMode }: { applyMode: number }) => { let tooltipText = ''; - let iconName: MaterialSymbol = 'help'; + switch (applyMode.toString()) { case DynconfApplyMode[DynconfApplyMode.APPLY_MODE_IMMEDIATE].toString(): tooltipText = 'Changes to this configuration will apply immediately'; - iconName = 'bolt'; break; case DynconfApplyMode[DynconfApplyMode.APPLY_MODE_AFTER_RESUME].toString(): tooltipText = 'Changes to this configuration will apply after resume'; - iconName = 'cached'; break; case DynconfApplyMode[DynconfApplyMode.APPLY_MODE_RESTART].toString(): tooltipText = 'Changes to this configuration will apply after server restart.'; - iconName = 'restart_alt'; break; case DynconfApplyMode[DynconfApplyMode.APPLY_MODE_NEW_MIRROR].toString(): tooltipText = 'Changes to this configuration will apply only to new mirrors'; - iconName = 'new_window'; break; default: tooltipText = 'Unknown apply mode'; - iconName = 'help'; } return (
- - - +
); }; - const DynamicSettingItem = ({ setting, onSettingUpdate, @@ -65,7 +54,7 @@ const DynamicSettingItem = ({ }) => { const [editMode, setEditMode] = useState(false); const [newValue, setNewValue] = useState(setting.value); - + const [showDescription, setShowDescription] = useState(false); const handleEdit = () => { setEditMode(true); }; @@ -130,41 +119,80 @@ const DynamicSettingItem = ({ }; return ( - - - - - - {editMode ? ( -
- setNewValue(e.target.value)} - variant='simple' - /> - +
+
+ +
+
+
+
+
+ setNewValue(e.target.value)} + variant='simple' + readOnly={!editMode} + disabled={!editMode} + /> + +
+
+ +
+
- ) : ( -
- {setting.value || 'N/A'} - +
+
- )} - - - {setting.defaultValue || 'N/A'} - - - {setting.description || 'N/A'} - - - - - + + {showDescription && ( +
+ +
+ )} +
+
+
); }; @@ -172,10 +200,7 @@ const SettingsPage = () => { const [settings, setSettings] = useState({ settings: [], }); - const [currentPage, setCurrentPage] = useState(1); const [searchQuery, setSearchQuery] = useState(''); - const [sortDir, setSortDir] = useState<'asc' | 'dsc'>('asc'); - const sortField = 'name'; const fetchSettings = async () => { const response = await fetch('/api/v1/dynamic_settings'); @@ -189,101 +214,44 @@ const SettingsPage = () => { const filteredSettings = useMemo( () => - settings.settings - .filter((setting) => - setting.name.toLowerCase().includes(searchQuery.toLowerCase()) - ) - .sort((a, b) => { - const aValue = a[sortField]; - const bValue = b[sortField]; - if (aValue < bValue) return sortDir === 'dsc' ? 1 : -1; - if (aValue > bValue) return sortDir === 'dsc' ? -1 : 1; - return 0; - }), - [settings, searchQuery, sortDir] + settings.settings.filter((setting) => + setting.name.toLowerCase().includes(searchQuery.toLowerCase()) + ), + [settings, searchQuery] ); - const totalPages = Math.ceil(filteredSettings.length / ROWS_PER_PAGE); - const displayedSettings = useMemo(() => { - const startRow = (currentPage - 1) * ROWS_PER_PAGE; - const endRow = startRow + ROWS_PER_PAGE; - return filteredSettings.slice(startRow, endRow); - }, [filteredSettings, currentPage]); - - const handlePrevPage = () => { - if (currentPage > 1) setCurrentPage(currentPage - 1); - }; - - const handleNextPage = () => { - if (currentPage < totalPages) setCurrentPage(currentPage + 1); - }; return ( -
- Settings List} - toolbar={{ - left: ( -
- - - - - - -
- ), - right: ( - setSearchQuery(e.target.value)} - /> - ), +
+ + setSearchQuery(e.target.value)} + style={{ fontSize: 13 }} + /> +
- {[ - { header: 'Configuration Name', width: '35%' }, - { header: 'Current Value', width: '10%' }, - { header: 'Default Value', width: '10%' }, - { header: 'Description', width: '35%' }, - { header: 'Apply Mode', width: '10%' }, - ].map(({ header, width }) => ( - - {header} - - ))} - - } > - {displayedSettings.map((setting) => ( + {filteredSettings.map((setting) => ( ))} -
- + +
); }; diff --git a/ui/package-lock.json b/ui/package-lock.json index 636f50209..f233b02b9 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -29,8 +29,8 @@ "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "classnames": "^2.5.1", - "lucide-react": "^0.453.0", - "material-symbols": "^0.25.0", + "lucide-react": "^0.460.0", + "material-symbols": "^0.27.0", "moment": "^2.30.1", "moment-timezone": "^0.5.46", "next": "^14.2.14", @@ -48,20 +48,20 @@ "zod": "^3.23.8" }, "devDependencies": { - "autoprefixer": "^10.4.20", - "copy-webpack-plugin": "^12.0.2", - "eslint": "^8.57.1", - "eslint-config-next": "^14.2.14", - "eslint-config-prettier": "^9.1.0", - "less": "^4.2.0", - "postcss": "^8.4.47", - "prettier": "^3.3.3", - "prettier-plugin-organize-imports": "^4.1.0", - "string-width": "^7.2.0", - "tailwindcss": "^3.4.13", - "tailwindcss-animate": "^1.0.7", - "typescript": "^5.6.2", - "webpack": "^5.95.0" + "autoprefixer": "10.4.20", + "copy-webpack-plugin": "12.0.2", + "eslint": "8.57.1", + "eslint-config-next": "14.2.17", + "eslint-config-prettier": "9.1.0", + "less": "4.2.0", + "postcss": "8.4.47", + "prettier": "3.3.3", + "prettier-plugin-organize-imports": "4.1.0", + "string-width": "7.2.0", + "tailwindcss": "3.4.14", + "tailwindcss-animate": "1.0.7", + "typescript": "5.6.3", + "webpack": "5.96.1" } }, "node_modules/@alloc/quick-lru": { @@ -77,9 +77,9 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.0.tgz", - "integrity": "sha512-INCKxTtbXtcNbUZ3YXutwMpEleqttcswhAdee7dhuoVrD2cnuc3PqtERBtxkX5nziX9vnBL8WXmSGwv8CuPV6g==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", "license": "MIT", "dependencies": { "@babel/helper-validator-identifier": "^7.25.9", @@ -91,12 +91,12 @@ } }, "node_modules/@babel/generator": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.0.tgz", - "integrity": "sha512-/AIkAmInnWwgEAJGQr9vY0c66Mj6kjkE2ZPB1PurTRaRAh3U+J45sAQMjQDJdh4WbR3l0x5xkimXBKyBXXAu2w==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.2.tgz", + "integrity": "sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==", "license": "MIT", "dependencies": { - "@babel/parser": "^7.26.0", + "@babel/parser": "^7.26.2", "@babel/types": "^7.26.0", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", @@ -138,9 +138,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.26.1", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.1.tgz", - "integrity": "sha512-reoQYNiAJreZNsJzyrDNzFQ+IQ5JFiIzAHJg9bn94S3l+4++J7RsIhNMoB+lgP/9tpmiAQqspv+xfdxTSzREOw==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz", + "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==", "license": "MIT", "dependencies": { "@babel/types": "^7.26.0" @@ -376,9 +376,9 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.0.tgz", - "integrity": "sha512-gh7PdNombP8ftL8TinYC8Xd7WEypB8EKV4PI2h0eMzndKjPCXuo2zUiZtD2Hu+MSPt02Ty2MdS788ADl9ai1rA==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", "dev": true, "license": "MIT", "engines": { @@ -429,9 +429,9 @@ } }, "node_modules/@floating-ui/dom": { - "version": "1.6.11", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.11.tgz", - "integrity": "sha512-qkMCxSR24v2vGkhYDo/UzxfJN3D4syqSjyuTFz6C7XcpU1pASPRieNI0Kj5VP3/503mOfYiGY891ugBX1GlABQ==", + "version": "1.6.12", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.12.tgz", + "integrity": "sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w==", "license": "MIT", "dependencies": { "@floating-ui/core": "^1.6.0", @@ -699,15 +699,15 @@ } }, "node_modules/@next/env": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.16.tgz", - "integrity": "sha512-fLrX5TfJzHCbnZ9YUSnGW63tMV3L4nSfhgOQ0iCcX21Pt+VSTDuaLsSuL8J/2XAiVA5AnzvXDpf6pMs60QxOag==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.17.tgz", + "integrity": "sha512-MCgO7VHxXo8sYR/0z+sk9fGyJJU636JyRmkjc7ZJY8Hurl8df35qG5hoAh5KMs75FLjhlEo9bb2LGe89Y/scDA==", "license": "MIT" }, "node_modules/@next/eslint-plugin-next": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.2.16.tgz", - "integrity": "sha512-noORwKUMkKc96MWjTOwrsUCjky0oFegHbeJ1yEnQBGbMHAaTEIgLZIIfsYF0x3a06PiS+2TXppfifR+O6VWslg==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.2.17.tgz", + "integrity": "sha512-fW6/u1jjlBQrMs1ExyINehaK3B+LEW5UqdF6QYL07QK+SECkX0hnEyPMaNKj0ZFzirQ9D8jLWQ00P8oua4yx9g==", "dev": true, "license": "MIT", "dependencies": { @@ -715,9 +715,9 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.16.tgz", - "integrity": "sha512-uFT34QojYkf0+nn6MEZ4gIWQ5aqGF11uIZ1HSxG+cSbj+Mg3+tYm8qXYd3dKN5jqKUm5rBVvf1PBRO/MeQ6rxw==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.17.tgz", + "integrity": "sha512-WiOf5nElPknrhRMTipXYTJcUz7+8IAjOYw3vXzj3BYRcVY0hRHKWgTgQ5439EvzQyHEko77XK+yN9x9OJ0oOog==", "cpu": [ "arm64" ], @@ -731,9 +731,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.16.tgz", - "integrity": "sha512-mCecsFkYezem0QiZlg2bau3Xul77VxUD38b/auAjohMA22G9KTJneUYMv78vWoCCFkleFAhY1NIvbyjj1ncG9g==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.17.tgz", + "integrity": "sha512-29y425wYnL17cvtxrDQWC3CkXe/oRrdt8ie61S03VrpwpPRI0XsnTvtKO06XCisK4alaMnZlf8riwZIbJTaSHQ==", "cpu": [ "x64" ], @@ -747,9 +747,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.16.tgz", - "integrity": "sha512-yhkNA36+ECTC91KSyZcgWgKrYIyDnXZj8PqtJ+c2pMvj45xf7y/HrgI17hLdrcYamLfVt7pBaJUMxADtPaczHA==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.17.tgz", + "integrity": "sha512-SSHLZls3ZwNEHsc+d0ynKS+7Af0Nr8+KTUBAy9pm6xz9SHkJ/TeuEg6W3cbbcMSh6j4ITvrjv3Oi8n27VR+IPw==", "cpu": [ "arm64" ], @@ -763,9 +763,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.16.tgz", - "integrity": "sha512-X2YSyu5RMys8R2lA0yLMCOCtqFOoLxrq2YbazFvcPOE4i/isubYjkh+JCpRmqYfEuCVltvlo+oGfj/b5T2pKUA==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.17.tgz", + "integrity": "sha512-VFge37us5LNPatB4F7iYeuGs9Dprqe4ZkW7lOEJM91r+Wf8EIdViWHLpIwfdDXinvCdLl6b4VyLpEBwpkctJHA==", "cpu": [ "arm64" ], @@ -779,9 +779,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.16.tgz", - "integrity": "sha512-9AGcX7VAkGbc5zTSa+bjQ757tkjr6C/pKS7OK8cX7QEiK6MHIIezBLcQ7gQqbDW2k5yaqba2aDtaBeyyZh1i6Q==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.17.tgz", + "integrity": "sha512-aaQlpxUVb9RZ41adlTYVQ3xvYEfBPUC8+6rDgmQ/0l7SvK8S1YNJzPmDPX6a4t0jLtIoNk7j+nroS/pB4nx7vQ==", "cpu": [ "x64" ], @@ -795,9 +795,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.16.tgz", - "integrity": "sha512-Klgeagrdun4WWDaOizdbtIIm8khUDQJ/5cRzdpXHfkbY91LxBXeejL4kbZBrpR/nmgRrQvmz4l3OtttNVkz2Sg==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.17.tgz", + "integrity": "sha512-HSyEiFaEY3ay5iATDqEup5WAfrhMATNJm8dYx3ZxL+e9eKv10XKZCwtZByDoLST7CyBmyDz+OFJL1wigyXeaoA==", "cpu": [ "x64" ], @@ -811,9 +811,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.16.tgz", - "integrity": "sha512-PwW8A1UC1Y0xIm83G3yFGPiOBftJK4zukTmk7DI1CebyMOoaVpd8aSy7K6GhobzhkjYvqS/QmzcfsWG2Dwizdg==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.17.tgz", + "integrity": "sha512-h5qM9Btqv87eYH8ArrnLoAHLyi79oPTP2vlGNSg4CDvUiXgi7l0+5KuEGp5pJoMhjuv9ChRdm7mRlUUACeBt4w==", "cpu": [ "arm64" ], @@ -827,9 +827,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.16.tgz", - "integrity": "sha512-jhPl3nN0oKEshJBNDAo0etGMzv0j3q3VYorTSFqH1o3rwv1MQRdor27u1zhkgsHPNeY1jxcgyx1ZsCkDD1IHgg==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.17.tgz", + "integrity": "sha512-BD/G++GKSLexQjdyoEUgyo5nClU7er5rK0sE+HlEqnldJSm96CIr/+YOTT063LVTT/dUOeQsNgp5DXr86/K7/A==", "cpu": [ "ia32" ], @@ -843,9 +843,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.16.tgz", - "integrity": "sha512-OA7NtfxgirCjfqt+02BqxC3MIgM/JaGjw9tOe4fyZgPsqfseNiMPnCRP44Pfs+Gpo9zPN+SXaFsgP6vk8d571A==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.17.tgz", + "integrity": "sha512-vkQfN1+4V4KqDibkW2q0sJ6CxQuXq5l2ma3z0BRcfIqkAMZiiW67T9yCpwqJKP68QghBtPEFjPAlaqe38O6frw==", "cpu": [ "x64" ], @@ -1337,12 +1337,12 @@ } }, "node_modules/@radix-ui/react-icons": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.0.tgz", - "integrity": "sha512-jQxj/0LKgp+j9BiTXz3O3sgs26RNet2iLWmsPyRz2SIcR4q/4SbazXfnYwbAr+vLYKSfc7qxzyGQA1HLlYiuNw==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.2.tgz", + "integrity": "sha512-fyQIhGDhzfc9pK2kH6Pl9c4BDJGfMkPqkyIgYDthyNYoNg3wVhoJMMh19WS4Up/1KMPFVpNsT2q3WmXn2N1m6g==", "license": "MIT", "peerDependencies": { - "react": "^16.x || ^17.x || ^18.x" + "react": "^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc" } }, "node_modules/@radix-ui/react-id": { @@ -1879,9 +1879,9 @@ } }, "node_modules/@radix-ui/react-tooltip": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.3.tgz", - "integrity": "sha512-Z4w1FIS0BqVFI2c1jZvb/uDVJijJjJ2ZMuPV81oVgTZ7g3BZxobplnMVvXtFWgtozdvYJ+MFWtwkM5S2HnAong==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.4.tgz", + "integrity": "sha512-QpObUH/ZlpaO4YgHSaYzrLO2VuO+ZBFFgGzjMUPwtiYnAzzNNDPJeEGRrT7qNOrWm/Jr08M1vlp+vTHtnSQ0Uw==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.0", @@ -2129,9 +2129,9 @@ } }, "node_modules/@tremor/react": { - "version": "3.18.3", - "resolved": "https://registry.npmjs.org/@tremor/react/-/react-3.18.3.tgz", - "integrity": "sha512-7QyGE2W9f2FpwH24TKy3/mqBgLl4sHZeQcXP3rxXZ8W2AUq7AVaG1+vIT3xXxISrkh7zknjWlZsuhoF8NWNVDw==", + "version": "3.18.4", + "resolved": "https://registry.npmjs.org/@tremor/react/-/react-3.18.4.tgz", + "integrity": "sha512-HDjYbuzxQIZvosGzB1j1nCSuLLRdKRHPfRmoGUyI57cesbThFzWuFHz07Sio9Vhk/ew3TKJUZPy+ljfZ3u1M4g==", "license": "Apache 2.0", "dependencies": { "@floating-ui/react": "^0.19.2", @@ -2140,7 +2140,7 @@ "date-fns": "^3.6.0", "react-day-picker": "^8.10.1", "react-transition-state": "^2.1.2", - "recharts": "^2.12.7", + "recharts": "^2.13.3", "tailwind-merge": "^2.5.2" }, "peerDependencies": { @@ -2211,6 +2211,28 @@ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", "license": "MIT" }, + "node_modules/@types/eslint": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", + "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, "node_modules/@types/estree": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", @@ -2233,9 +2255,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.8.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.8.1.tgz", - "integrity": "sha512-k6Gi8Yyo8EtrNtkHXutUu2corfDf9su95VYVP10aGYMMROM6SAItZi0w1XszA6RtWTHSVp5OeFof37w0IEqCQg==", + "version": "22.9.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.9.0.tgz", + "integrity": "sha512-vuyHg81vvWA1Z1ELfvLko2c8f34gyA0zaic0+Rllc5lbCnbSyuvb2Oxpm6TAUAC/2xZN3QGqxBNggD1nNR2AfQ==", "license": "MIT", "dependencies": { "undici-types": "~6.19.8" @@ -2288,17 +2310,17 @@ "license": "MIT" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.11.0.tgz", - "integrity": "sha512-KhGn2LjW1PJT2A/GfDpiyOfS4a8xHQv2myUagTM5+zsormOmBlYsnQ6pobJ8XxJmh6hnHwa2Mbe3fPrDJoDhbA==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.13.0.tgz", + "integrity": "sha512-nQtBLiZYMUPkclSeC3id+x4uVd1SGtHuElTxL++SfP47jR0zfkZBJHc+gL4qPsgTuypz0k8Y2GheaDYn6Gy3rg==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.11.0", - "@typescript-eslint/type-utils": "8.11.0", - "@typescript-eslint/utils": "8.11.0", - "@typescript-eslint/visitor-keys": "8.11.0", + "@typescript-eslint/scope-manager": "8.13.0", + "@typescript-eslint/type-utils": "8.13.0", + "@typescript-eslint/utils": "8.13.0", + "@typescript-eslint/visitor-keys": "8.13.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2322,16 +2344,16 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.11.0.tgz", - "integrity": "sha512-lmt73NeHdy1Q/2ul295Qy3uninSqi6wQI18XwSpm8w0ZbQXUpjCAWP1Vlv/obudoBiIjJVjlztjQ+d/Md98Yxg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.13.0.tgz", + "integrity": "sha512-w0xp+xGg8u/nONcGw1UXAr6cjCPU1w0XVyBs6Zqaj5eLmxkKQAByTdV/uGgNN5tVvN/kKpoQlP2cL7R+ajZZIQ==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/scope-manager": "8.11.0", - "@typescript-eslint/types": "8.11.0", - "@typescript-eslint/typescript-estree": "8.11.0", - "@typescript-eslint/visitor-keys": "8.11.0", + "@typescript-eslint/scope-manager": "8.13.0", + "@typescript-eslint/types": "8.13.0", + "@typescript-eslint/typescript-estree": "8.13.0", + "@typescript-eslint/visitor-keys": "8.13.0", "debug": "^4.3.4" }, "engines": { @@ -2351,14 +2373,14 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.11.0.tgz", - "integrity": "sha512-Uholz7tWhXmA4r6epo+vaeV7yjdKy5QFCERMjs1kMVsLRKIrSdM6o21W2He9ftp5PP6aWOVpD5zvrvuHZC0bMQ==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.13.0.tgz", + "integrity": "sha512-XsGWww0odcUT0gJoBZ1DeulY1+jkaHUciUq4jKNv4cpInbvvrtDoyBH9rE/n2V29wQJPk8iCH1wipra9BhmiMA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.11.0", - "@typescript-eslint/visitor-keys": "8.11.0" + "@typescript-eslint/types": "8.13.0", + "@typescript-eslint/visitor-keys": "8.13.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2369,14 +2391,14 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.11.0.tgz", - "integrity": "sha512-ItiMfJS6pQU0NIKAaybBKkuVzo6IdnAhPFZA/2Mba/uBjuPQPet/8+zh5GtLHwmuFRShZx+8lhIs7/QeDHflOg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.13.0.tgz", + "integrity": "sha512-Rqnn6xXTR316fP4D2pohZenJnp+NwQ1mo7/JM+J1LWZENSLkJI8ID8QNtlvFeb0HnFSK94D6q0cnMX6SbE5/vA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/typescript-estree": "8.11.0", - "@typescript-eslint/utils": "8.11.0", + "@typescript-eslint/typescript-estree": "8.13.0", + "@typescript-eslint/utils": "8.13.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2394,9 +2416,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.11.0.tgz", - "integrity": "sha512-tn6sNMHf6EBAYMvmPUaKaVeYvhUsrE6x+bXQTxjQRp360h1giATU0WvgeEys1spbvb5R+VpNOZ+XJmjD8wOUHw==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.13.0.tgz", + "integrity": "sha512-4cyFErJetFLckcThRUFdReWJjVsPCqyBlJTi6IDEpc1GWCIIZRFxVppjWLIMcQhNGhdWJJRYFHpHoDWvMlDzng==", "dev": true, "license": "MIT", "engines": { @@ -2408,14 +2430,14 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.11.0.tgz", - "integrity": "sha512-yHC3s1z1RCHoCz5t06gf7jH24rr3vns08XXhfEqzYpd6Hll3z/3g23JRi0jM8A47UFKNc3u/y5KIMx8Ynbjohg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.13.0.tgz", + "integrity": "sha512-v7SCIGmVsRK2Cy/LTLGN22uea6SaUIlpBcO/gnMGT/7zPtxp90bphcGf4fyrCQl3ZtiBKqVTG32hb668oIYy1g==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/types": "8.11.0", - "@typescript-eslint/visitor-keys": "8.11.0", + "@typescript-eslint/types": "8.13.0", + "@typescript-eslint/visitor-keys": "8.13.0", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", @@ -2463,16 +2485,16 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.11.0.tgz", - "integrity": "sha512-CYiX6WZcbXNJV7UNB4PLDIBtSdRmRI/nb0FMyqHPTQD1rMjA0foPLaPUV39C/MxkTd/QKSeX+Gb34PPsDVC35g==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.13.0.tgz", + "integrity": "sha512-A1EeYOND6Uv250nybnLZapeXpYMl8tkzYUxqmoKAWnI4sei3ihf2XdZVd+vVOmHGcp3t+P7yRrNsyyiXTvShFQ==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.11.0", - "@typescript-eslint/types": "8.11.0", - "@typescript-eslint/typescript-estree": "8.11.0" + "@typescript-eslint/scope-manager": "8.13.0", + "@typescript-eslint/types": "8.13.0", + "@typescript-eslint/typescript-estree": "8.13.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2486,13 +2508,13 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.11.0.tgz", - "integrity": "sha512-EaewX6lxSjRJnc+99+dqzTeoDZUfyrA52d2/HRrkI830kgovWsmIiTfmr0NZorzqic7ga+1bS60lRBUgR3n/Bw==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.13.0.tgz", + "integrity": "sha512-7N/+lztJqH4Mrf0lb10R/CbI1EaAMMGyF5y0oJvFoAhafwgiRA7TXyd8TFn8FC8k5y2dTsYogg238qavRGNnlw==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.11.0", + "@typescript-eslint/types": "8.13.0", "eslint-visitor-keys": "^3.4.3" }, "engines": { @@ -2698,16 +2720,6 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-import-attributes": { - "version": "1.9.5", - "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", - "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", - "dev": true, - "license": "MIT", - "peerDependencies": { - "acorn": "^8" - } - }, "node_modules/acorn-jsx": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", @@ -3259,9 +3271,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001672", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001672.tgz", - "integrity": "sha512-XhW1vRo1ob6aeK2w3rTohwTPBLse/rvjq+s3RTSBwnlZqoFFjx9cHsShJjAIbLsLjyoacaTxpLZy9v3gg6zypw==", + "version": "1.0.30001677", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001677.tgz", + "integrity": "sha512-fmfjsOlJUpMWu+mAAtZZZHz7UEwsUxIIvu1TJfO1HqFQvB/B+ii0xr9B5HpbZY/mC4XZ8SvjHJqtAY6pDPQEog==", "funding": [ { "type": "opencollective", @@ -3832,9 +3844,9 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.47", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.47.tgz", - "integrity": "sha512-zS5Yer0MOYw4rtK2iq43cJagHZ8sXN0jDHDKzB+86gSBSAI4v07S97mcq+Gs2vclAxSh1j7vOAHxSVgduiiuVQ==", + "version": "1.5.52", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.52.tgz", + "integrity": "sha512-xtoijJTZ+qeucLBDNztDOuQBE1ksqjvNjvqFoST3nGC7fSpqJ+X6BdTBaY5BHG+IhWWmpc6b/KfpeuEDupEPOQ==", "dev": true, "license": "ISC" }, @@ -3966,9 +3978,9 @@ } }, "node_modules/es-iterator-helpers": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.1.0.tgz", - "integrity": "sha512-/SurEfycdyssORP/E+bj4sEu1CWw4EmLDsHynHwSXQ7utgbrMRWW195pTrCjFgFCddf/UkYm3oqKPRq5i8bJbw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.0.tgz", + "integrity": "sha512-tpxqxncxnpw3c93u8n3VOzACmRFoVmWJqbWXvX/JfKbkhBw1oslgPrUfeSt2psuqyEJFD6N/9lg5i7bsKpoq+Q==", "dev": true, "license": "MIT", "dependencies": { @@ -3980,6 +3992,7 @@ "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", "globalthis": "^1.0.4", + "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2", "has-proto": "^1.0.3", "has-symbols": "^1.0.3", @@ -4134,13 +4147,13 @@ } }, "node_modules/eslint-config-next": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.2.16.tgz", - "integrity": "sha512-HOcnCJsyLXR7B8wmjaCgkTSpz+ijgOyAkP8OlvANvciP8PspBYFEBTmakNMxOf71fY0aKOm/blFIiKnrM4K03Q==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.2.17.tgz", + "integrity": "sha512-5tVFG/BoJ4xZwMmumBe3xcDXb2dvVEvy4BeBCXTxrl+DTHjHv687FN2qBjYx6xVH/Se7YRhsH0KoxvZkJOGRVA==", "dev": true, "license": "MIT", "dependencies": { - "@next/eslint-plugin-next": "14.2.16", + "@next/eslint-plugin-next": "14.2.17", "@rushstack/eslint-patch": "^1.3.3", "@typescript-eslint/eslint-plugin": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", @@ -5988,9 +6001,9 @@ } }, "node_modules/lucide-react": { - "version": "0.453.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.453.0.tgz", - "integrity": "sha512-kL+RGZCcJi9BvJtzg2kshO192Ddy9hv3ij+cPrVPWSRzgCWCVazoQJxOjAwgK53NomL07HB7GPHW120FimjNhQ==", + "version": "0.460.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.460.0.tgz", + "integrity": "sha512-BVtq/DykVeIvRTJvRAgCsOwaGL8Un3Bxh8MbDxMhEWlZay3T4IpEKDEpwt5KZ0KJMHzgm6jrltxlT5eXOWXDHg==", "license": "ISC", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" @@ -6023,9 +6036,9 @@ } }, "node_modules/material-symbols": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.25.1.tgz", - "integrity": "sha512-0HopmXLjRs4H99LWajFWIXAt8DpaVMf9lyhKp35HQ+ocb7JJ3eXJTJNkOwccfbJ34qIuwYDwLJQtlzheMFmizw==", + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.27.0.tgz", + "integrity": "sha512-nRHpnw2Cz7eNl6GptgHHhkjauL0zvkYsuiqy1HBifOYCY4fdbZ/PwtdZN4RNmwA+9jQPoymvlArVPPX5nYTdZg==", "license": "Apache-2.0" }, "node_modules/memoize-one": { @@ -6228,12 +6241,12 @@ "license": "MIT" }, "node_modules/next": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/next/-/next-14.2.16.tgz", - "integrity": "sha512-LcO7WnFu6lYSvCzZoo1dB+IO0xXz5uEv52HF1IUN0IqVTUIZGHuuR10I5efiLadGt+4oZqTcNZyVVEem/TM5nA==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.17.tgz", + "integrity": "sha512-hNo/Zy701DDO3nzKkPmsLRlDfNCtb1OJxFUvjGEl04u7SFa3zwC6hqsOUzMajcaEOEV8ey1GjvByvrg0Qr5AiQ==", "license": "MIT", "dependencies": { - "@next/env": "14.2.16", + "@next/env": "14.2.17", "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", @@ -6248,15 +6261,15 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.2.16", - "@next/swc-darwin-x64": "14.2.16", - "@next/swc-linux-arm64-gnu": "14.2.16", - "@next/swc-linux-arm64-musl": "14.2.16", - "@next/swc-linux-x64-gnu": "14.2.16", - "@next/swc-linux-x64-musl": "14.2.16", - "@next/swc-win32-arm64-msvc": "14.2.16", - "@next/swc-win32-ia32-msvc": "14.2.16", - "@next/swc-win32-x64-msvc": "14.2.16" + "@next/swc-darwin-arm64": "14.2.17", + "@next/swc-darwin-x64": "14.2.17", + "@next/swc-linux-arm64-gnu": "14.2.17", + "@next/swc-linux-arm64-musl": "14.2.17", + "@next/swc-linux-x64-gnu": "14.2.17", + "@next/swc-linux-x64-musl": "14.2.17", + "@next/swc-win32-arm64-msvc": "14.2.17", + "@next/swc-win32-ia32-msvc": "14.2.17", + "@next/swc-win32-x64-msvc": "14.2.17" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", @@ -7152,9 +7165,9 @@ } }, "node_modules/react-select": { - "version": "5.8.2", - "resolved": "https://registry.npmjs.org/react-select/-/react-select-5.8.2.tgz", - "integrity": "sha512-a/LkOckoI62710gGPQSQqUp7A10fGbH/ya3/IR49qaq3XoBvwymgD5mJgtiHxBDsutyEQfdKNycWVh8Cg8UCjw==", + "version": "5.8.3", + "resolved": "https://registry.npmjs.org/react-select/-/react-select-5.8.3.tgz", + "integrity": "sha512-lVswnIq8/iTj1db7XCG74M/3fbGB6ZaluCzvwPGT5ZOjCdL/k0CLWhEK0vCBLuU5bHTEf6Gj8jtSvi+3v+tO1w==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.12.0", @@ -7290,9 +7303,9 @@ } }, "node_modules/recharts": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.13.0.tgz", - "integrity": "sha512-sbfxjWQ+oLWSZEWmvbq/DFVdeRLqqA6d0CDjKx2PkxVVdoXo16jvENCE+u/x7HxOO+/fwx//nYRwb8p8X6s/lQ==", + "version": "2.13.3", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.13.3.tgz", + "integrity": "sha512-YDZ9dOfK9t3ycwxgKbrnDlRC4BHdjlY73fet3a0C1+qGMjXVZe6+VXmpOIIhzkje5MMEL8AN4hLIe4AMskBzlA==", "license": "MIT", "dependencies": { "clsx": "^2.0.0", @@ -8404,9 +8417,9 @@ } }, "node_modules/ts-api-utils": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz", - "integrity": "sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.0.tgz", + "integrity": "sha512-032cPxaEKwM+GT3vA5JXNzIaizx388rhsSW79vGRNGXfRRAdEAn2mvk36PvK5HnOchyWZ7afLEXqYCvPCrzuzQ==", "dev": true, "license": "MIT", "engines": { @@ -8436,9 +8449,9 @@ } }, "node_modules/tslib": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.0.tgz", - "integrity": "sha512-jWVzBLplnCmoaTr13V9dYbiQ99wvZRd0vNWaDRg+aVYRcjDF3nDksxFDE/+fkXnKhpnUUkmx5pK/v8mCtLVqZA==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, "node_modules/type-check": { @@ -8767,19 +8780,19 @@ } }, "node_modules/webpack": { - "version": "5.95.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.95.0.tgz", - "integrity": "sha512-2t3XstrKULz41MNMBF+cJ97TyHdyQ8HCt//pqErqDvNjU9YQBnZxIHa11VXsi7F3mb5/aO2tuDxdeTPdU7xu9Q==", + "version": "5.96.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.96.1.tgz", + "integrity": "sha512-l2LlBSvVZGhL4ZrPwyr8+37AunkcYj5qh8o6u2/2rzoPc8gxFJkLj1WxNgooi9pnoc06jh0BjuXnamM4qlujZA==", "dev": true, "license": "MIT", "dependencies": { - "@types/estree": "^1.0.5", + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.6", "@webassemblyjs/ast": "^1.12.1", "@webassemblyjs/wasm-edit": "^1.12.1", "@webassemblyjs/wasm-parser": "^1.12.1", - "acorn": "^8.7.1", - "acorn-import-attributes": "^1.9.5", - "browserslist": "^4.21.10", + "acorn": "^8.14.0", + "browserslist": "^4.24.0", "chrome-trace-event": "^1.0.2", "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", diff --git a/ui/package.json b/ui/package.json index 816cbac3a..d755ce864 100644 --- a/ui/package.json +++ b/ui/package.json @@ -31,8 +31,8 @@ "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "classnames": "^2.5.1", - "lucide-react": "^0.453.0", - "material-symbols": "^0.25.0", + "lucide-react": "^0.460.0", + "material-symbols": "^0.27.0", "moment": "^2.30.1", "moment-timezone": "^0.5.46", "next": "^14.2.14", @@ -50,19 +50,19 @@ "zod": "^3.23.8" }, "devDependencies": { - "autoprefixer": "^10.4.20", - "copy-webpack-plugin": "^12.0.2", - "eslint": "^8.57.1", - "eslint-config-next": "^14.2.14", - "eslint-config-prettier": "^9.1.0", - "less": "^4.2.0", - "postcss": "^8.4.47", - "prettier": "^3.3.3", - "prettier-plugin-organize-imports": "^4.1.0", - "string-width": "^7.2.0", - "tailwindcss": "^3.4.13", - "tailwindcss-animate": "^1.0.7", - "typescript": "^5.6.2", - "webpack": "^5.95.0" + "autoprefixer": "10.4.20", + "copy-webpack-plugin": "12.0.2", + "eslint": "8.57.1", + "eslint-config-next": "14.2.17", + "eslint-config-prettier": "9.1.0", + "less": "4.2.0", + "postcss": "8.4.47", + "prettier": "3.3.3", + "prettier-plugin-organize-imports": "4.1.0", + "string-width": "7.2.0", + "tailwindcss": "3.4.14", + "tailwindcss-animate": "1.0.7", + "typescript": "5.6.3", + "webpack": "5.96.1" } }