From 0d52d7f8a31fd6d3c0b5c689380aa7e5cec2c3b9 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Mon, 18 Nov 2024 04:23:17 +0500 Subject: [PATCH] add process-compose for sqlite, fix migration, update query-service --- .github/workflows/lint.yml | 5 +- .github/workflows/slowtest.yaml | 40 +- Cargo.lock | 41 +- justfile | 4 + process-compose-sqlite.yaml | 531 ++++++++++++++++++ .../sqlite/V16__merkle_root_columns.sql | 11 +- sequencer/src/api.rs | 2 +- sequencer/src/lib.rs | 6 +- sequencer/src/main.rs | 9 +- sequencer/src/persistence/fs.rs | 20 +- sequencer/src/persistence/no_storage.rs | 4 +- sequencer/src/persistence/sql.rs | 96 ++-- types/src/v0/traits.rs | 5 +- 13 files changed, 704 insertions(+), 70 deletions(-) create mode 100644 process-compose-sqlite.yaml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f9daac209..b9421e791 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,5 +32,8 @@ jobs: - name: Format Check run: cargo fmt -- --check - - name: Check + - name: Check (embedded-db) run: cargo clippy --workspace --all-features --all-targets -- -D warnings + + - name: Check (postgres) + run: cargo clippy --workspace --features testing --all-targets -- -D warnings diff --git a/.github/workflows/slowtest.yaml b/.github/workflows/slowtest.yaml index 38b364109..cc19d8b2d 100644 --- a/.github/workflows/slowtest.yaml +++ b/.github/workflows/slowtest.yaml @@ -23,7 +23,7 @@ env: RUST_LOG: info,libp2p=off,node=error jobs: - slow-tests: + slow-tests-sqlite: runs-on: ubuntu-latest steps: - name: Fix submodule permissions check @@ -61,3 +61,41 @@ jobs: NEXTEST_PROFILE: slow run: cargo nextest run --locked --release --workspace --all-features --verbose --no-fail-fast --nocapture timeout-minutes: 40 +slow-tests-postgres: + runs-on: ubuntu-latest + steps: + - name: Fix submodule permissions check + run: | + git config --global --add safe.directory '*' + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly + + - uses: taiki-e/install-action@nextest + + - name: Checkout Repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Configure Environment + run: PATH="$PWD/target/release:$PATH" + + - name: Enable Rust Caching + uses: Swatinem/rust-cache@v2 + with: + cache-all-crates: true + + - name: Build + run: | + cargo build --locked --bin diff-test --release + cargo nextest run --locked --release --workspace --features testing --no-run + timeout-minutes: 90 + + - name: Slow Test + env: + NEXTEST_PROFILE: slow + run: cargo nextest run --locked --release --workspace --features testing --verbose --no-fail-fast --nocapture + timeout-minutes: 40 \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index a39d6a3b3..98ef92405 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,13 +14,19 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + [[package]] name = "adler2" version = "2.0.0" @@ -1048,17 +1054,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", + "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.7.4", "object", "rustc-demangle", - "windows-targets 0.52.6", ] [[package]] @@ -3317,7 +3323,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] @@ -3653,9 +3659,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.1" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" @@ -4242,7 +4248,7 @@ dependencies = [ [[package]] name = "hotshot-query-service" version = "0.1.62" -source = "git+https://github.com/EspressoSystems/hotshot-query-service?branch=ab/sqlite-support#8889ea92e9437af221ecc25165183b6502b18057" +source = "git+https://github.com/EspressoSystems/hotshot-query-service?branch=ab/sqlite-support#4833f7f19c33d760611cf5aa895f591a3f7f99cf" dependencies = [ "anyhow", "ark-serialize", @@ -6461,6 +6467,15 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "miniz_oxide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +dependencies = [ + "adler", +] + [[package]] name = "miniz_oxide" version = "0.8.0" @@ -6857,9 +6872,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -7579,7 +7594,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", "syn 2.0.87", diff --git a/justfile b/justfile index 716410e47..db18fa8b7 100644 --- a/justfile +++ b/justfile @@ -11,6 +11,10 @@ demo-native: cargo build --profile test scripts/demo-native +demo-native-sqlite: + cargo build --profile test --features embedded-db + scripts/demo-native -f process-compose-sqlite.yaml + demo-native-mp: cargo build --release scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml diff --git a/process-compose-sqlite.yaml b/process-compose-sqlite.yaml new file mode 100644 index 000000000..ab8521415 --- /dev/null +++ b/process-compose-sqlite.yaml @@ -0,0 +1,531 @@ +version: "3" + +environment: + - ESPRESSO_SEQUENCER_CDN_ENDPOINT=127.0.0.1:$ESPRESSO_CDN_SERVER_PORT + - ESPRESSO_SEQUENCER_ORCHESTRATOR_URL=http://localhost:$ESPRESSO_ORCHESTRATOR_PORT + - ESPRESSO_SEQUENCER_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT + - ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo.toml + - ESPRESSO_BUILDER_GENESIS_FILE=data/genesis/demo.toml + - ESPRESSO_STATE_RELAY_SERVER_URL=http://localhost:$ESPRESSO_STATE_RELAY_SERVER_PORT + - QUERY_SERVICE_URI=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/v0/ + - NODE_VALIDATOR_URI=ws://localhost:$ESPRESSO_NODE_VALIDATOR_PORT/v0/ +processes: + # Cheating a bit here but since we don't usually have to debug go-ethereum + # it's using the docker compose service which is a bit easier. + demo-l1-network: + command: docker compose up demo-l1-network --force-recreate --renew-anon-volumes + readiness_probe: + exec: + command: "[ $(docker inspect -f '{{.State.Health.Status}}' espresso-sequencer-demo-l1-network-1) = 'healthy' ]" + initial_delay_seconds: 5 + period_seconds: 6 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 20 + + deploy-sequencer-contracts: + # The contract addresses are implicitly inherited from .env. We need to unset these or else the + # script will think they're already deployed. + command: + unset ESPRESSO_SEQUENCER_HOTSHOT_ADDRESS ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS && deploy --only + fee-contract + namespace: setup + depends_on: + demo-l1-network: + condition: process_healthy + + deploy-prover-contracts: + # The contract addresses are implicitly inherited from .env. We need to unset these or else the + # script will think they're already deployed. + command: + unset ESPRESSO_SEQUENCER_HOTSHOT_ADDRESS ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS && deploy + --use-mock-contract --only light-client + namespace: setup + depends_on: + demo-l1-network: + condition: process_healthy + sequencer0: + condition: process_healthy + # Make sure this doesn't start until the other contracts have been deployed, since we use the same mnemonic. + deploy-sequencer-contracts: + condition: process_completed + + fund-builder: + command: espresso-bridge deposit + namespace: setup + environment: + - L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT + - ESPRESSO_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT + - CONTRACT_ADDRESS=0xa15bb66138824a1c7167f5e85b957d04dd34e468 + - MNEMONIC=$ESPRESSO_BUILDER_ETH_MNEMONIC + - ACCOUNT_INDEX=$ESPRESSO_BUILDER_ETH_ACCOUNT_INDEX + - AMOUNT=1000000000000000000 + - CONFIRMATIONS=1 + depends_on: + deploy-sequencer-contracts: + condition: process_completed + sequencer1: + condition: process_healthy + + orchestrator: + command: orchestrator + environment: + - ESPRESSO_ORCHESTRATOR_BUILDER_URLS=http://localhost:$ESPRESSO_BUILDER_SERVER_PORT + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_ORCHESTRATOR_PORT + path: /healthcheck + initial_delay_seconds: 0 + period_seconds: 1 + timeout_seconds: 1 + success_threshold: 1 + failure_threshold: 100 + availability: + restart: "exit_on_failure" + + state-relay-server: + command: state-relay-server + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_STATE_RELAY_SERVER_PORT + path: /healthcheck + + prover-service: + command: state-prover -d + environment: + - MNEMONIC=$ESPRESSO_SEQUENCER_ETH_MNEMONIC + - RAYON_NUM_THREADS=$PROVER_RAYON_NUM_THREADS + depends_on: + sequencer0: + condition: process_healthy + state-relay-server: + condition: process_healthy + demo-l1-network: + condition: process_healthy + deploy-prover-contracts: + condition: process_completed + + sequencer0: + command: sequencer -- storage-sql -- http -- query -- catchup -- status -- submit -- hotshot-events -- config + environment: + - ESPRESSO_SEQUENCER_API_PORT=$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_LIBP2P_BIND_ADDRESS=0.0.0.0:$ESPRESSO_DEMO_SEQUENCER_LIBP2P_PORT_0 + - ESPRESSO_SEQUENCER_LIBP2P_ADVERTISE_ADDRESS=localhost:$ESPRESSO_DEMO_SEQUENCER_LIBP2P_PORT_0 + - ESPRESSO_SEQUENCER_API_PEERS=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT + - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT + - ESPRESSO_SEQUENCER_SQLITE_PATH=seq0 + - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_0 + - ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY=$ESPRESSO_DEMO_SEQUENCER_STATE_PRIVATE_KEY_0 + - ESPRESSO_SEQUENCER_IS_DA=true + - ESPRESSO_SEQUENCER_IDENTITY_NODE_NAME=sequencer0 + - ESPRESSO_SEQUENCER_IDENTITY_WALLET_ADDRESS=0x0000000000000000000000000000000000000000 + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_NAME=Espresso Systems + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_WEBSITE=https://www.espressosys.com/ + - ESPRESSO_SEQUENCER_IDENTITY_OPERATING_SYSTEM=Linux 5.15.153.1 + - ESPRESSO_SEQUENCER_IDENTITY_NETWORK_TYPE=local + - ESPRESSO_SEQUENCER_IDENTITY_COUNTRY_CODE=US + - ESPRESSO_SEQUENCER_IDENTITY_LATITUDE=40.7128 + - ESPRESSO_SEQUENCER_IDENTITY_LONGITUDE=-74.0060 + - ESPRESSO_SEQUENCER_PUBLIC_API_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT/ + depends_on: + orchestrator: + condition: process_healthy + demo-l1-network: + condition: process_healthy + state-relay-server: + condition: process_healthy + broker_0: + condition: process_healthy + broker_1: + condition: process_healthy + marshal_0: + condition: process_healthy + deploy-sequencer-contracts: + condition: process_completed + availability: + exit_on_skipped: true + + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_SEQUENCER_API_PORT + path: /healthcheck + failure_threshold: 100 + + sequencer1: + command: sequencer -- storage-sql -- http -- query -- catchup -- status -- state -- explorer + environment: + - ESPRESSO_SEQUENCER_API_PORT=$ESPRESSO_SEQUENCER1_API_PORT + - ESPRESSO_SEQUENCER_LIBP2P_BIND_ADDRESS=0.0.0.0:$ESPRESSO_DEMO_SEQUENCER_LIBP2P_PORT_1 + - ESPRESSO_SEQUENCER_LIBP2P_ADVERTISE_ADDRESS=localhost:$ESPRESSO_DEMO_SEQUENCER_LIBP2P_PORT_1 + - ESPRESSO_SEQUENCER_API_PEERS=http://localhost:$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER2_API_PORT + - ESPRESSO_SEQUENCER_SQLITE_PATH=seq1 + - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_1 + - ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY=$ESPRESSO_DEMO_SEQUENCER_STATE_PRIVATE_KEY_1 + - ESPRESSO_SEQUENCER_IS_DA=true + - ESPRESSO_SEQUENCER_IDENTITY_NODE_NAME=sequencer1 + - ESPRESSO_SEQUENCER_IDENTITY_WALLET_ADDRESS=0x0000000000000000000000000000000000000001 + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_NAME=Espresso Systems + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_WEBSITE=https://www.espressosys.com/ + - ESPRESSO_SEQUENCER_IDENTITY_OPERATING_SYSTEM=Darwin 23.5.0 + - ESPRESSO_SEQUENCER_IDENTITY_NETWORK_TYPE=local + - ESPRESSO_SEQUENCER_IDENTITY_COUNTRY_CODE=GR + - ESPRESSO_SEQUENCER_IDENTITY_LATITUDE=39.0742 + - ESPRESSO_SEQUENCER_IDENTITY_LONGITUDE=21.8243 + - ESPRESSO_SEQUENCER_PUBLIC_API_URL=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/ + - ESPRESSO_SEQUENCER_L1_PROVIDER=ws://localhost:$ESPRESSO_SEQUENCER_L1_WS_PORT + depends_on: + orchestrator: + condition: process_healthy + demo-l1-network: + condition: process_healthy + state-relay-server: + condition: process_healthy + broker_0: + condition: process_healthy + broker_1: + condition: process_healthy + marshal_0: + condition: process_healthy + deploy-sequencer-contracts: + condition: process_completed + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_SEQUENCER1_API_PORT + path: /healthcheck + failure_threshold: 100 + availability: + exit_on_skipped: true + + sequencer2: + command: sequencer -- http -- catchup -- status + environment: + - ESPRESSO_SEQUENCER_API_PORT=$ESPRESSO_SEQUENCER2_API_PORT + - ESPRESSO_SEQUENCER_LIBP2P_BIND_ADDRESS=0.0.0.0:$ESPRESSO_DEMO_SEQUENCER_LIBP2P_PORT_2 + - ESPRESSO_SEQUENCER_LIBP2P_ADVERTISE_ADDRESS=localhost:$ESPRESSO_DEMO_SEQUENCER_LIBP2P_PORT_2 + - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER3_API_PORT + - ESPRESSO_SEQUENCER_STORAGE_PATH=$ESPRESSO_BASE_STORAGE_PATH/seq2 + - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_2 + - ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY=$ESPRESSO_DEMO_SEQUENCER_STATE_PRIVATE_KEY_2 + - ESPRESSO_SEQUENCER_IS_DA=true + - ESPRESSO_SEQUENCER_IDENTITY_NODE_NAME=sequencer2 + - ESPRESSO_SEQUENCER_IDENTITY_WALLET_ADDRESS=0x0000000000000000000000000000000000000002 + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_NAME=Espresso Systems + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_WEBSITE=https://www.espressosys.com/ + - ESPRESSO_SEQUENCER_IDENTITY_OPERATING_SYSTEM=Darwin 23.5.0 + - ESPRESSO_SEQUENCER_IDENTITY_NETWORK_TYPE=local + - ESPRESSO_SEQUENCER_IDENTITY_COUNTRY_CODE=CN + - ESPRESSO_SEQUENCER_IDENTITY_LATITUDE=35.8617 + - ESPRESSO_SEQUENCER_IDENTITY_LONGITUDE=104.1954 + - ESPRESSO_SEQUENCER_PUBLIC_API_URL=http://localhost:$ESPRESSO_SEQUENCER2_API_PORT/ + depends_on: + orchestrator: + condition: process_healthy + demo-l1-network: + condition: process_healthy + state-relay-server: + condition: process_healthy + broker_0: + condition: process_healthy + broker_1: + condition: process_healthy + marshal_0: + condition: process_healthy + deploy-sequencer-contracts: + condition: process_completed + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_SEQUENCER2_API_PORT + path: /healthcheck + failure_threshold: 100 + availability: + exit_on_skipped: true + + sequencer3: + command: sequencer -- http -- catchup -- status + environment: + - ESPRESSO_SEQUENCER_API_PORT=$ESPRESSO_SEQUENCER3_API_PORT + - ESPRESSO_SEQUENCER_LIBP2P_BIND_ADDRESS=0.0.0.0:$ESPRESSO_DEMO_SEQUENCER_LIBP2P_PORT_3 + - ESPRESSO_SEQUENCER_LIBP2P_ADVERTISE_ADDRESS=localhost:$ESPRESSO_DEMO_SEQUENCER_LIBP2P_PORT_3 + - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER4_API_PORT + - ESPRESSO_SEQUENCER_STORAGE_PATH=$ESPRESSO_BASE_STORAGE_PATH/seq3 + - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_3 + - ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY=$ESPRESSO_DEMO_SEQUENCER_STATE_PRIVATE_KEY_3 + - ESPRESSO_SEQUENCER_IDENTITY_NODE_NAME=sequencer3 + - ESPRESSO_SEQUENCER_IDENTITY_WALLET_ADDRESS=0x0000000000000000000000000000000000000003 + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_NAME=Espresso Systems + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_WEBSITE=https://www.espressosys.com/ + - ESPRESSO_SEQUENCER_IDENTITY_OPERATING_SYSTEM=Microsoft Windows NT 10.0.22621.0 + - ESPRESSO_SEQUENCER_IDENTITY_E=espresso-sequencer@0.1.0 + - ESPRESSO_SEQUENCER_IDENTITY_NETWORK_TYPE=local + - ESPRESSO_SEQUENCER_IDENTITY_COUNTRY_CODE=CN + - ESPRESSO_SEQUENCER_IDENTITY_LATITUDE=35.8617 + - ESPRESSO_SEQUENCER_IDENTITY_LONGITUDE=104.1954 + - ESPRESSO_SEQUENCER_PUBLIC_API_URL=http://localhost:$ESPRESSO_SEQUENCER3_API_PORT/ + - ESPRESSO_SEQUENCER_L1_PROVIDER=ws://localhost:$ESPRESSO_SEQUENCER_L1_WS_PORT + depends_on: + orchestrator: + condition: process_healthy + demo-l1-network: + condition: process_healthy + state-relay-server: + condition: process_healthy + broker_0: + condition: process_healthy + broker_1: + condition: process_healthy + marshal_0: + condition: process_healthy + deploy-sequencer-contracts: + condition: process_completed + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_SEQUENCER3_API_PORT + path: /healthcheck + failure_threshold: 100 + availability: + exit_on_skipped: true + + sequencer4: + command: sequencer -- http -- catchup -- status + environment: + - ESPRESSO_SEQUENCER_API_PORT=$ESPRESSO_SEQUENCER4_API_PORT + - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_STORAGE_PATH=$ESPRESSO_BASE_STORAGE_PATH/seq4 + - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_4 + - ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY=$ESPRESSO_DEMO_SEQUENCER_STATE_PRIVATE_KEY_4 + - ESPRESSO_SEQUENCER_IDENTITY_NODE_NAME=sequencer4 + - ESPRESSO_SEQUENCER_IDENTITY_WALLET_ADDRESS=0x0000000000000000000000000000000000000004 + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_NAME=Espresso Systems + - ESPRESSO_SEQUENCER_IDENTITY_COMPANY_WEBSITE=https://www.espressosys.com/ + - ESPRESSO_SEQUENCER_IDENTITY_OPERATING_SYSTEM=TempleOS 5.03 + - ESPRESSO_SEQUENCER_IDENTITY_NETWORK_TYPE=local + - ESPRESSO_SEQUENCER_IDENTITY_COUNTRY_CODE=AU + - ESPRESSO_SEQUENCER_IDENTITY_LATITUDE=-25.2744 + - ESPRESSO_SEQUENCER_IDENTITY_LONGITUDE=133.7751 + - ESPRESSO_SEQUENCER_PUBLIC_API_URL=http://localhost:$ESPRESSO_SEQUENCER4_API_PORT/ + depends_on: + orchestrator: + condition: process_healthy + demo-l1-network: + condition: process_healthy + state-relay-server: + condition: process_healthy + broker_0: + condition: process_healthy + broker_1: + condition: process_healthy + marshal_0: + condition: process_healthy + deploy-sequencer-contracts: + condition: process_completed + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_SEQUENCER4_API_PORT + path: /healthcheck + failure_threshold: 100 + availability: + exit_on_skipped: true + + node_validator: + command: RUST_LOG=debug node-metrics -- + environment: + - ESPRESSO_NODE_VALIDATOR_STAKE_TABLE_SOURCE_BASE_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT/v0/ + - ESPRESSO_NODE_VALIDATOR_LEAF_STREAM_SOURCE_BASE_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT/v0/ + - ESPRESSO_NODE_VALIDATOR_INITIAL_NODE_PUBLIC_BASE_URLS=http://localhost:$ESPRESSO_SEQUENCER_API_PORT/,http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/,http://localhost:$ESPRESSO_SEQUENCER2_API_PORT/,http://localhost:$ESPRESSO_SEQUENCER3_API_PORT/,http://localhost:$ESPRESSO_SEQUENCER4_API_PORT/ + depends_on: + broker_0: + condition: process_healthy + broker_1: + condition: process_healthy + sequencer0: + condition: process_healthy + sequencer1: + condition: process_healthy + sequencer2: + condition: process_healthy + sequencer3: + condition: process_healthy + sequencer4: + condition: process_healthy + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_SEQUENCER1_API_PORT + path: /healthcheck + failure_threshold: 100 + + + # We use KeyDB (a Redis variant) to maintain consistency between + # different parts of the CDN + # Cheating a bit here too, but KeyDB is not available as a Nix package. + # Could do local (SQLite) discovery, but removes some of the spirit + # from the local demo. + keydb: + command: docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb --requirepass changeme! + readiness_probe: + exec: + command: nc -zv localhost 6379 + period_seconds: 5 + timeout_seconds: 4 + failure_threshold: 20 + + # The CDN system's main entry point; where users contact first. + marshal_0: + command: cdn-marshal -d "redis://:changeme!@localhost:6379" -b $ESPRESSO_CDN_SERVER_PORT -m 127.0.0.1:9093 + depends_on: + keydb: + condition: process_healthy + readiness_probe: + http_get: + scheme: http + host: 127.0.0.1 + port: 9093 + path: /metrics + failure_threshold: 100 + + # A broker is the main message-routing unit of the CDN + broker_0: + command: RUST_LOG=debug cdn-broker -d "redis://:changeme!@localhost:6379" -m 127.0.0.1:9091 + depends_on: + keydb: + condition: process_healthy + readiness_probe: + http_get: + scheme: http + host: 127.0.0.1 + port: 9091 + path: /metrics + failure_threshold: 100 + + # A broker is the main message-routing unit of the CDN + # as we're running a local demo, we need to operate on different ports + broker_1: + command: + RUST_LOG=debug cdn-broker --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 + --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 -d + "redis://:changeme!@localhost:6379" -m 127.0.0.1:9092 + depends_on: + keydb: + condition: process_healthy + readiness_probe: + http_get: + scheme: http + host: 127.0.0.1 + port: 9092 + path: /metrics + failure_threshold: 100 + + cdn-whitelist: + command: cdn-whitelist -d redis://:changeme!@localhost:6379 -o http://localhost:$ESPRESSO_ORCHESTRATOR_PORT + namespace: setup + depends_on: + orchestrator: + condition: process_healthy + keydb: + condition: process_healthy + + submit-transactions-public: + command: submit-transactions + environment: + - ESPRESSO_SUBMIT_TRANSACTIONS_PORT=$ESPRESSO_SUBMIT_TRANSACTIONS_PUBLIC_PORT + depends_on: + sequencer0: + condition: process_healthy + # We don't directly depend on the builder, but transactions will not be finalized until it has + # started, so there is no point in starting before then. + permissionless-builder: + condition: process_healthy + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_SUBMIT_TRANSACTIONS_PUBLIC_PORT + path: /healthcheck + failure_threshold: 100 + availability: + exit_on_skipped: true + + submit-transactions-private: + command: submit-transactions + environment: + - ESPRESSO_SUBMIT_TRANSACTIONS_PORT=$ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT + - ESPRESSO_SUBMIT_TRANSACTIONS_SUBMIT_URL=http://localhost:$ESPRESSO_BUILDER_SERVER_PORT/txn_submit + depends_on: + sequencer0: + condition: process_healthy + permissionless-builder: + condition: process_healthy + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT + path: /healthcheck + failure_threshold: 100 + availability: + exit_on_skipped: true + + permissionless-builder: + command: permissionless-builder + ports: + - "$ESPRESSO_BUILDER_SERVER_PORT:$ESPRESSO_BUILDER_SERVER_PORT" + environment: + - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_HOTSHOT_EVENT_STREAMING_API_URL=http://localhost:$ESPRESSO_SEQUENCER_HOTSHOT_EVENT_STREAMING_API_PORT + - ESPRESSO_BUILDER_GENESIS_FILE + depends_on: + sequencer0: + condition: process_healthy + fund-builder: + condition: process_completed + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_BUILDER_SERVER_PORT + path: /healthcheck + failure_threshold: 100 + availability: + restart: "exit_on_failure" + + nasty-client: + command: nasty-client + environment: + # Point the nasty client at sequencer1, the only one running the state API. + - ESPRESSO_SEQUENCER_URL=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT + depends_on: + sequencer0: + condition: process_healthy + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_NASTY_CLIENT_PORT + path: /healthcheck + failure_threshold: 100 + + block-explorer: + command: + docker run --rm -p $ESPRESSO_BLOCK_EXPLORER_PORT:3000 -e QUERY_SERVICE_URI -e NODE_VALIDATOR_URI + ghcr.io/espressosystems/espresso-block-explorer:main + depends_on: + sequencer1: + condition: process_healthy + diff --git a/sequencer/api/migrations/sqlite/V16__merkle_root_columns.sql b/sequencer/api/migrations/sqlite/V16__merkle_root_columns.sql index 11735f1ba..6d84f38e5 100644 --- a/sequencer/api/migrations/sqlite/V16__merkle_root_columns.sql +++ b/sequencer/api/migrations/sqlite/V16__merkle_root_columns.sql @@ -1,10 +1,11 @@ +-- Add block_merkle_tree_root column as a generated column ALTER TABLE header -ADD column block_merkle_tree_root text -GENERATED ALWAYS AS (data->>'block_merkle_tree_root') STORED NOT NULL; +ADD COLUMN block_merkle_tree_root TEXT +GENERATED ALWAYS AS (json_extract(data, '$.fields.block_merkle_tree_root')) STORED NOT NULL; +-- Add fee_merkle_tree_root column as a generated column ALTER TABLE header -ADD column fee_merkle_tree_root text -GENERATED ALWAYS as (data->>'fee_merkle_tree_root') STORED NOT NULL; - +ADD COLUMN fee_merkle_tree_root TEXT +GENERATED ALWAYS AS (json_extract(data, '$.fields.fee_merkle_tree_root')) STORED NOT NULL; CREATE INDEX header_block_merkle_tree_root_idx ON header (block_merkle_tree_root); CREATE INDEX header_fee_merkle_tree_root_idx ON header (fee_merkle_tree_root); \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 8faecc8ff..fa48b2d65 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1198,7 +1198,7 @@ mod api_tests { setup_test(); let storage = D::create_storage().await; - let persistence = D::persistence_options(&storage).create().await.unwrap(); + let (_, persistence) = D::persistence_options(&storage).create().await.unwrap(); let data_source: Arc> = Arc::new(StorageState::new( D::create(D::persistence_options(&storage), Default::default(), false) diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index a4e7ed4fc..b7da3aa2f 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -301,7 +301,7 @@ pub async fn init_node( ) .with_context(|| "Failed to derive Libp2p peer ID")?; - let persistence = persistence_opt.clone().create().await?; + let (_, persistence) = persistence_opt.clone().create().await?; let (mut network_config, wait_for_orchestrator) = match ( persistence.load_config().await?, network_params.config_peers, @@ -1044,6 +1044,8 @@ pub mod testing { state_key = %my_peer_config.state_ver_key, "starting node", ); + + let (_, persistence) = persistence_opt.create().await.unwrap(); SequencerContext::init( NetworkConfig { config, @@ -1054,7 +1056,7 @@ pub mod testing { validator_config, memberships, node_state, - persistence_opt.create().await.unwrap(), + persistence, network, self.state_relay_url.clone(), metrics, diff --git a/sequencer/src/main.rs b/sequencer/src/main.rs index 4ddf31fff..c55ae1dd0 100644 --- a/sequencer/src/main.rs +++ b/sequencer/src/main.rs @@ -180,6 +180,9 @@ where fallback_builder_url: opt.fallback_builder_url, }; + // TODO: cleanup this by using the persistence and passing it to init_node() + let (persistence_opt, _) = storage_opt.create().await?; + // Initialize HotShot. If the user requested the HTTP module, we must initialize the handle in // a special way, in order to populate the API with consensus metrics. Otherwise, we initialize // the handle directly, with no metrics. @@ -188,7 +191,7 @@ where // Add optional API modules as requested. let mut http_opt = api::Options::from(http_opt); if let Some(query) = modules.query { - http_opt = storage_opt.enable_query_module(http_opt, query); + http_opt = persistence_opt.enable_query_module(http_opt, query); } if let Some(submit) = modules.submit { http_opt = http_opt.submit(submit); @@ -220,7 +223,7 @@ where genesis, network_params, &*metrics, - storage_opt, + persistence_opt, l1_params, versions, consumer, @@ -239,7 +242,7 @@ where genesis, network_params, &NoMetrics, - storage_opt, + persistence_opt, l1_params, versions, NullEventConsumer, diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 8a492d2ff..7997956e3 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -62,11 +62,17 @@ impl Options { impl PersistenceOptions for Options { type Persistence = Persistence; - async fn create(self) -> anyhow::Result { - Ok(Persistence { - store_undecided_state: self.store_undecided_state, - inner: Arc::new(RwLock::new(Inner { path: self.path })), - }) + async fn create(self) -> anyhow::Result<(Self, Self::Persistence)> { + let path = self.path.clone(); + let store_undecided_state = self.store_undecided_state; + + Ok(( + self, + Persistence { + store_undecided_state, + inner: Arc::new(RwLock::new(Inner { path })), + }, + )) } async fn reset(self) -> anyhow::Result<()> { @@ -827,7 +833,9 @@ mod testing { } async fn connect(storage: &Self::Storage) -> Self { - Options::new(storage.path().into()).create().await.unwrap() + let (_, storage) = Options::new(storage.path().into()).create().await.unwrap(); + + storage } } } diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index cf6394eed..d3167c0bf 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -29,8 +29,8 @@ pub struct Options; impl PersistenceOptions for Options { type Persistence = NoStorage; - async fn create(self) -> anyhow::Result { - Ok(NoStorage) + async fn create(self) -> anyhow::Result<(Self, Self::Persistence)> { + Ok((self, NoStorage)) } async fn reset(self) -> anyhow::Result<()> { diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index e59b19393..fb322b1ad 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -10,11 +10,11 @@ use espresso_types::{ BackoffParams, Leaf, NetworkConfig, Payload, }; use futures::stream::StreamExt; -use hotshot_query_service::data_source::storage::sql::Write; +use hotshot_query_service::data_source::storage::sql::Db; use hotshot_query_service::data_source::{ storage::{ pruning::PrunerCfg, - sql::{include_migrations, query_as, Config, SqlStorage, Transaction}, + sql::{include_migrations, query_as, Config, SqlStorage}, }, Transaction as _, VersionedDataSource, }; @@ -147,6 +147,16 @@ pub struct Options { /// fetching from peers. #[clap(long, env = "ESPRESSO_SEQUENCER_ARCHIVE", conflicts_with = "prune")] pub(crate) archive: bool, + + // Keep the database connection pool when persistence is created, + // allowing it to be reused across multiple instances instead of creating + // a new pool each time such as for API, consensus storage etc + // This also ensures all storage instances adhere to the MAX_CONNECTIONS limit if set + // + // Note: Cloning the `Pool` is lightweight and efficient because it simply + // creates a new reference-counted handle to the underlying pool state. + #[clap(skip)] + pub(crate) pool: Option>, } #[cfg(not(feature = "embedded-db"))] @@ -220,6 +230,10 @@ impl TryFrom for Config { None => Self::default(), }; + if let Some(pool) = opt.pool { + cfg = cfg.pool(pool); + } + #[cfg(not(feature = "embedded-db"))] { cfg = cfg.migrations(include_migrations!( @@ -351,13 +365,16 @@ impl From for PrunerCfg { impl PersistenceOptions for Options { type Persistence = Persistence; - async fn create(self) -> anyhow::Result { + async fn create(mut self) -> anyhow::Result<(Self, Self::Persistence)> { + let store_undecided_state = self.store_undecided_state; + let config = self.clone().try_into()?; let persistence = Persistence { - store_undecided_state: self.store_undecided_state, - db: SqlStorage::connect(self.try_into()?).await?, + store_undecided_state, + db: SqlStorage::connect(config).await?, }; persistence.migrate_quorum_proposal_leaf_hashes().await?; - Ok(persistence) + self.pool = Some(persistence.db.pool()); + Ok((self, persistence)) } async fn reset(self) -> anyhow::Result<()> { @@ -473,8 +490,8 @@ impl SequencerPersistence for Persistence { // Generate an event for the new leaves and, only if it succeeds, clean up data we no longer // need. let consumer = dyn_clone::clone(consumer); - let tx = self.db.write().await?; - if let Err(err) = collect_garbage(tx, view, consumer).await { + + if let Err(err) = collect_garbage(self, view, consumer).await { // GC/event processing failure is not an error, since by this point we have at least // managed to persist the decided leaves successfully, and GC will just run again at the // next decide. Log an error but do not return it. @@ -767,17 +784,21 @@ impl SequencerPersistence for Persistence { } async fn collect_garbage( - mut tx: Transaction, + storage: &Persistence, view: ViewNumber, consumer: impl EventConsumer, ) -> anyhow::Result<()> { - // Clean up and collect VID shares. - + // In SQLite, overlapping read and write transactions can lead to database errors. + // To avoid this: + // - start a read transaction to query and collect all the necessary data. + // - Commit (or implicitly drop) the read transaction once the data is fetched. + // - use the collected data to generate a "decide" event for the consumer. + // - begin a write transaction to delete the data and update the event stream. + let mut tx = storage.db.read().await?; + + // collect VID shares. let mut vid_shares = tx - .fetch_all( - query("DELETE FROM vid_share where view <= $1 RETURNING view, data") - .bind(view.u64() as i64), - ) + .fetch_all(query("SELECT * FROM vid_share where view <= $1").bind(view.u64() as i64)) .await? .into_iter() .map(|row| { @@ -789,12 +810,9 @@ async fn collect_garbage( }) .collect::>>()?; - // Clean up and collect DA proposals. + // collect DA proposals. let mut da_proposals = tx - .fetch_all( - query("DELETE FROM da_proposal where view <= $1 RETURNING view, data") - .bind(view.u64() as i64), - ) + .fetch_all(query("SELECT * FROM da_proposal where view <= $1").bind(view.u64() as i64)) .await? .into_iter() .map(|row| { @@ -806,10 +824,7 @@ async fn collect_garbage( }) .collect::>>()?; - // Clean up and collect leaves, except do not delete the most recent leaf: we need to remember - // this so that in case we restart, we can pick up from the last decided leaf. We still do - // include this leaf in the query results (the `UNION` clause) so we can include it in the - // decide event we send to the consumer. + // collect leaves let mut leaves = tx .fetch_all( query("SELECT view, leaf, qc FROM anchor_leaf WHERE view <= $1") @@ -827,14 +842,6 @@ async fn collect_garbage( }) .collect::>>()?; - tx.execute(query("DELETE FROM anchor_leaf WHERE view < $1").bind(view.u64() as i64)) - .await?; - - // Clean up old proposals. These are not part of the decide event we generate for the consumer, - // so we don't need to return them. - tx.execute(query("DELETE FROM quorum_proposals where view <= $1").bind(view.u64() as i64)) - .await?; - // Exclude from the decide event any leaves which have definitely already been processed. We may // have selected an already-processed leaf because the oldest leaf -- the last leaf processed in // the previous decide event -- remained in the database to serve as the anchor leaf, so our @@ -870,6 +877,8 @@ async fn collect_garbage( leaves }; + drop(tx); + // Generate a decide event for each leaf, to be processed by the event consumer. We make a // separate event for each leaf because it is possible we have non-consecutive leaves in our // storage, which would not be valid as a single decide with a single leaf chain. @@ -910,6 +919,7 @@ async fn collect_garbage( .await?; } + let mut tx = storage.db.write().await?; // Now that we have definitely processed leaves up to `view`, we can update // `last_processed_view` so we don't process these leaves again. We may still fail at this // point, or shut down, and fail to complete this update. At worst this will lead to us sending @@ -923,6 +933,22 @@ async fn collect_garbage( ) .await?; + tx.execute(query("DELETE FROM vid_share where view <= $1").bind(view.u64() as i64)) + .await?; + + tx.execute(query("DELETE FROM da_proposal where view <= $1").bind(view.u64() as i64)) + .await?; + + // Clean up leaves, but do not delete the most recent one (all leaves with a view number less than the given value). + // This is necessary to ensure that, in case of a restart, we can resume from the last decided leaf. + tx.execute(query("DELETE FROM anchor_leaf WHERE view < $1").bind(view.u64() as i64)) + .await?; + + // Clean up old proposals. These are not part of the decide event we generate for the consumer, + // so we don't need to return them. + tx.execute(query("DELETE FROM quorum_proposals where view <= $1").bind(view.u64() as i64)) + .await?; + tx.commit().await } @@ -951,13 +977,15 @@ mod testing { ..Default::default() } .into(); - opt.create().await.unwrap() + let (_, storage) = opt.create().await.unwrap(); + storage } #[cfg(feature = "embedded-db")] { let opt: Options = SqliteOptions { path: db.path() }.into(); - opt.create().await.unwrap() + let (_, storage) = opt.create().await.unwrap(); + storage } } } diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 02b2b6140..8afc651ef 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -371,14 +371,15 @@ impl StateCatchup for Vec { pub trait PersistenceOptions: Clone + Send + Sync + 'static { type Persistence: SequencerPersistence; - async fn create(self) -> anyhow::Result; + async fn create(self) -> anyhow::Result<(Self, Self::Persistence)>; async fn reset(self) -> anyhow::Result<()>; async fn create_catchup_provider( self, backoff: BackoffParams, ) -> anyhow::Result> { - self.create().await?.into_catchup_provider(backoff) + let (_, persistence) = self.create().await?; + persistence.into_catchup_provider(backoff) } }