diff --git a/.dockerignore b/.dockerignore index 05cec369d9..763aeda1be 100644 --- a/.dockerignore +++ b/.dockerignore @@ -13,6 +13,9 @@ solgen/go **/node_modules target/**/* +!target/machines +!target/machines/* +!target/machines/**/* brotli/buildfiles/**/* # these are used by environment outside the docker: diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index f2b141fb46..807d165760 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -22,7 +22,7 @@ env: jobs: coverage: name: Run Arbitrator tests - runs-on: ubuntu-8 + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0734aecfd0..f406c6a209 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,9 @@ on: jobs: test: name: Go Tests - runs-on: ubuntu-8 + container: + image: ghcr.io/catthehacker/ubuntu:js-22.04 + runs-on: [self-hosted, X64] # Creates a redis container for redis tests services: @@ -34,7 +36,9 @@ jobs: submodules: true - name: Install dependencies - run: sudo apt update && sudo apt install -y wabt gotestsum + run: > + sudo apt update && sudo apt install -y wabt gotestsum + cmake build-essential bison golang clang make wabt - name: Setup nodejs uses: actions/setup-node@v3 @@ -75,13 +79,16 @@ jobs: toolchain: "stable" override: true + - name: Install cbindgen + run: cargo install cbindgen + - name: Cache Build Products uses: actions/cache@v3 with: path: | ~/go/pkg/mod ~/.cache/go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}-${{ matrix.test-mode }} + key: ${{ runner.os }}-go-${{ hashFiles('go.sum') }}-${{ matrix.test-mode }} restore-keys: ${{ runner.os }}-go- - name: Cache Rust Build Products @@ -137,8 +144,8 @@ jobs: - name: Set environment variables run: | - mkdir -p target/tmp/deadbeefbee - echo "TMPDIR=$(pwd)/target/tmp/deadbeefbee" >> "$GITHUB_ENV" + mkdir -p target/tmp/x + echo "TMPDIR=$(pwd)/target/tmp/x" >> "$GITHUB_ENV" echo "GOMEMLIMIT=6GiB" >> "$GITHUB_ENV" echo "GOGC=80" >> "$GITHUB_ENV" @@ -156,7 +163,7 @@ jobs: - name: run redis tests if: matrix.test-mode == 'defaults' - run: TEST_REDIS=redis://localhost:6379/0 gotestsum --format short-verbose -- -p 1 -run TestRedis ./arbnode/... ./system_tests/... -coverprofile=coverage-redis.txt -covermode=atomic -coverpkg=./... + run: TEST_REDIS=redis://redis:6379/0 gotestsum --format short-verbose -- -p 1 -run TestRedis ./arbnode/... ./system_tests/... -coverprofile=coverage-redis.txt -covermode=atomic -coverpkg=./... - name: run challenge tests if: matrix.test-mode == 'challenge' @@ -172,4 +179,3 @@ jobs: files: ./coverage.txt,./coverage-redis.txt verbose: false token: ${{ secrets.CODECOV_TOKEN }} - diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index cfb5b6eda6..b20abd5da9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -26,7 +26,7 @@ jobs: analyze: name: Analyze if: github.repository == 'OffchainLabs/nitro' # don't run in any forks without "Advanced Security" enabled - runs-on: ubuntu-8 + runs-on: ubuntu-latest permissions: actions: read contents: read diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 6192f65a4e..21a7048cf1 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -13,7 +13,7 @@ on: jobs: docker: name: Docker build - runs-on: ubuntu-8 + runs-on: ubuntu-latest services: # local registery registry: @@ -59,6 +59,17 @@ jobs: cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + - name: Start background nitro-testnode + shell: bash + run: | + cd nitro-testnode + ./test-node.bash --init --dev & + + - name: Wait for rpc to come up + shell: bash + run: | + ${{ github.workspace }}/.github/workflows/waitForNitro.sh + - name: Print WAVM module root id: module-root run: | diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh new file mode 100755 index 0000000000..e196b38d88 --- /dev/null +++ b/.github/workflows/waitForNitro.sh @@ -0,0 +1,10 @@ +# poll the nitro endpoint until we get a 0 return code +while true +do + curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' + if [ "$?" -eq "0" ]; then + exit 0 + else + sleep 20 + fi +done \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index e794cdb844..2828582486 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -45,6 +45,7 @@ linters-settings: gosec: excludes: - G404 # checks that random numbers are securely generated + - G114 govet: enable-all: true diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000..72a0a83034 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,5 @@ +# These owners will be the default owners for everything in the repo. Unless a +# later match takes precedence, they will be requested for review when someone +# opens a pull request. + +* @ImJeremyHe @nomaxg @philippecamacho @sveitser @jbearer diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index f820b03f4f..2c72d0b577 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -383,9 +383,7 @@ fn main() -> Result<()> { while let Some((module, func, profile)) = func_stack.pop() { sum.total_cycles += profile.total_cycles; sum.count += profile.count; - let entry = func_profile - .entry((module, func)) - .or_insert_with(SimpleProfile::default); + let entry = func_profile.entry((module, func)).or_default(); entry.count += sum.count; entry.total_cycles += sum.total_cycles; entry.local_cycles += profile.local_cycles; diff --git a/arbitrator/prover/test-cases/go/main.go b/arbitrator/prover/test-cases/go/main.go index 79541e48b0..afed870fea 100644 --- a/arbitrator/prover/test-cases/go/main.go +++ b/arbitrator/prover/test-cases/go/main.go @@ -89,7 +89,7 @@ func main() { verified, err = MerkleSample(data, -1) if err != nil { if verified { - panic("succeded to verify proof invalid") + panic("succeeded to verify proof invalid") } } diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index c848099513..77a839b70a 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/redislock" @@ -47,6 +48,7 @@ import ( var ( batchPosterWalletBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/wallet/balanceether", nil) batchPosterGasRefunderBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/gasrefunder/balanceether", nil) + batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" ) type batchPosterPosition struct { @@ -66,6 +68,8 @@ type BatchPoster struct { syncMonitor *SyncMonitor seqInboxABI *abi.ABI seqInboxAddr common.Address + bridgeAddr common.Address + gasRefunderAddr common.Address building *buildingBatch daWriter das.DataAvailabilityServiceWriter dataPoster *dataposter.DataPoster @@ -78,6 +82,8 @@ type BatchPoster struct { batchReverted atomic.Bool // indicates whether data poster batch was reverted nextRevertCheckBlock int64 // the last parent block scanned for reverting batches + + accessList func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList } type l1BlockBound int @@ -162,7 +168,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") redislock.AddConfigOptions(prefix+".redis-lock", f) - dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) + dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfig) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) } @@ -183,6 +189,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", L1BlockBoundBypass: time.Hour, + RedisLock: redislock.DefaultCfg, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ @@ -210,67 +217,168 @@ var TestBatchPosterConfig = BatchPosterConfig{ L1BlockBoundBypass: time.Hour, } -func NewBatchPoster(ctx context.Context, dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) { - seqInbox, err := bridgegen.NewSequencerInbox(deployInfo.SequencerInbox, l1Reader.Client()) +type BatchPosterOpts struct { + DataPosterDB ethdb.Database + L1Reader *headerreader.HeaderReader + Inbox *InboxTracker + Streamer *TransactionStreamer + SyncMonitor *SyncMonitor + Config BatchPosterConfigFetcher + DeployInfo *chaininfo.RollupAddresses + TransactOpts *bind.TransactOpts + DAWriter das.DataAvailabilityServiceWriter +} + +func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) { + seqInbox, err := bridgegen.NewSequencerInbox(opts.DeployInfo.SequencerInbox, opts.L1Reader.Client()) if err != nil { return nil, err } - bridge, err := bridgegen.NewBridge(deployInfo.Bridge, l1Reader.Client()) + bridge, err := bridgegen.NewBridge(opts.DeployInfo.Bridge, opts.L1Reader.Client()) if err != nil { return nil, err } - if err = config().Validate(); err != nil { + if err = opts.Config().Validate(); err != nil { return nil, err } seqInboxABI, err := bridgegen.SequencerInboxMetaData.GetAbi() if err != nil { return nil, err } - redisClient, err := redisutil.RedisClientFromURL(config().RedisUrl) + redisClient, err := redisutil.RedisClientFromURL(opts.Config().RedisUrl) if err != nil { return nil, err } redisLockConfigFetcher := func() *redislock.SimpleCfg { - return &config().RedisLock + simpleRedisLockConfig := opts.Config().RedisLock + simpleRedisLockConfig.Key = batchPosterSimpleRedisLockKey + return &simpleRedisLockConfig } - redisLock, err := redislock.NewSimple(redisClient, redisLockConfigFetcher, func() bool { return syncMonitor.Synced() }) + redisLock, err := redislock.NewSimple(redisClient, redisLockConfigFetcher, func() bool { return opts.SyncMonitor.Synced() }) if err != nil { return nil, err } b := &BatchPoster{ - l1Reader: l1Reader, - inbox: inbox, - streamer: streamer, - syncMonitor: syncMonitor, - config: config, - bridge: bridge, - seqInbox: seqInbox, - seqInboxABI: seqInboxABI, - seqInboxAddr: deployInfo.SequencerInbox, - daWriter: daWriter, - redisLock: redisLock, + l1Reader: opts.L1Reader, + inbox: opts.Inbox, + streamer: opts.Streamer, + syncMonitor: opts.SyncMonitor, + config: opts.Config, + bridge: bridge, + seqInbox: seqInbox, + seqInboxABI: seqInboxABI, + seqInboxAddr: opts.DeployInfo.SequencerInbox, + gasRefunderAddr: opts.Config().gasRefunder, + bridgeAddr: opts.DeployInfo.Bridge, + daWriter: opts.DAWriter, + redisLock: redisLock, + accessList: func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList { + return AccessList(&AccessListOpts{ + SequencerInboxAddr: opts.DeployInfo.SequencerInbox, + DataPosterAddr: opts.TransactOpts.From, + BridgeAddr: opts.DeployInfo.Bridge, + GasRefunderAddr: opts.Config().gasRefunder, + SequencerInboxAccs: SequencerInboxAccs, + AfterDelayedMessagesRead: AfterDelayedMessagesRead, + }) + }, } dataPosterConfigFetcher := func() *dataposter.DataPosterConfig { - return &config().DataPoster + return &(opts.Config().DataPoster) } b.dataPoster, err = dataposter.NewDataPoster(ctx, &dataposter.DataPosterOpts{ - Database: dataPosterDB, - HeaderReader: l1Reader, - Auth: transactOpts, + Database: opts.DataPosterDB, + HeaderReader: opts.L1Reader, + Auth: opts.TransactOpts, RedisClient: redisClient, RedisLock: redisLock, Config: dataPosterConfigFetcher, MetadataRetriever: b.getBatchPosterPosition, RedisKey: "data-poster.queue", - }, - ) + }) if err != nil { return nil, err } return b, nil } +type AccessListOpts struct { + SequencerInboxAddr common.Address + BridgeAddr common.Address + DataPosterAddr common.Address + GasRefunderAddr common.Address + SequencerInboxAccs int + AfterDelayedMessagesRead int +} + +// AccessList returns access list (contracts, storage slots) for batchposter. +func AccessList(opts *AccessListOpts) types.AccessList { + l := types.AccessList{ + types.AccessTuple{ + Address: opts.SequencerInboxAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), // totalDelayedMessagesRead + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), // bridge + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // maxTimeVariation.delayBlocks + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // maxTimeVariation.futureBlocks + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // maxTimeVariation.delaySeconds + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // maxTimeVariation.futureSeconds + // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of + // "eip1967.proxy.admin" subtracted by 1. + common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), + // IMPLEMENTATION_SLOT from OpenZeppelin, keccak-256 hash + // of "eip1967.proxy.implementation" subtracted by 1. + common.HexToHash("0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc"), + // isBatchPoster[batchPosterAddr]; for mainnnet it's: "0xa10aa54071443520884ed767b0684edf43acec528b7da83ab38ce60126562660". + common.Hash(arbutil.PaddedKeccak256(opts.DataPosterAddr.Bytes(), []byte{3})), + }, + }, + types.AccessTuple{ + Address: opts.BridgeAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // delayedInboxAccs.length + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // sequencerInboxAccs.length + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000009"), // sequencerInbox + common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000000a"), // sequencerReportedSubMessageCount + // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of + // "eip1967.proxy.admin" subtracted by 1. + common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), + // IMPLEMENTATION_SLOT from OpenZeppelin, keccak-256 hash + // of "eip1967.proxy.implementation" subtracted by 1. + common.HexToHash("0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc"), + // These below may change when transaction is actually executed: + // - delayedInboxAccs[delayedInboxAccs.length - 1] + // - delayedInboxAccs.push(...); + }, + }, + } + + for _, v := range []struct{ slotIdx, val int }{ + {7, opts.SequencerInboxAccs - 1}, // - sequencerInboxAccs[sequencerInboxAccs.length - 1]; (keccak256(7, sequencerInboxAccs.length - 1)) + {7, opts.SequencerInboxAccs}, // - sequencerInboxAccs.push(...); (keccak256(7, sequencerInboxAccs.length)) + {6, opts.AfterDelayedMessagesRead - 1}, // - delayedInboxAccs[afterDelayedMessagesRead - 1]; (keccak256(6, afterDelayedMessagesRead - 1)) + } { + sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), big.NewInt(int64(v.val)).Bytes()) + l[1].StorageKeys = append(l[1].StorageKeys, common.Hash(sb)) + } + + if (opts.GasRefunderAddr != common.Address{}) { + l = append(l, types.AccessTuple{ + Address: opts.GasRefunderAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // CommonParameters.{maxRefundeeBalance, extraGasMargin, calldataCost, maxGasTip} + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // CommonParameters.{maxGasCost, maxSingleGasUsage} + // allowedContracts[msg.sender]; for mainnet it's: "0x7686888b19bb7b75e46bb1aa328b65150743f4899443d722f0adf8e252ccda41". + common.Hash(arbutil.PaddedKeccak256(opts.SequencerInboxAddr.Bytes(), []byte{1})), + // allowedRefundees[refundee]; for mainnet it's: "0xe85fd79f89ff278fc57d40aecb7947873df9f0beac531c8f71a98f630e1eab62". + common.Hash(arbutil.PaddedKeccak256(opts.DataPosterAddr.Bytes(), []byte{2})), + }, + }) + } + return l +} + // checkRevert checks blocks with number in range [from, to] whether they // contain reverted batch_poster transaction. // It returns true if it finds batch posting needs to halt, which is true if a batch reverts @@ -909,7 +1017,18 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - tx, err := b.dataPoster.PostTransaction(ctx, firstMsgTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit, new(big.Int)) + tx, err := b.dataPoster.PostTransaction(ctx, + firstMsgTime, + nonce, + newMeta, + b.seqInboxAddr, + data, + gasLimit, + new(big.Int), + b.accessList( + int(batchPosition.NextSeqNum), + int(b.building.segments.delayedMsg)), + ) if err != nil { return false, err } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 3b563e9658..f98f0e51cf 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -6,19 +6,25 @@ package dataposter import ( "context" + "crypto/tls" + "crypto/x509" "errors" "fmt" "math/big" + "net/http" + "os" "strings" "sync" "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" @@ -39,7 +45,7 @@ import ( // is initialized with specified sender/signer and keeps nonce of that address // as it posts transactions. // Transactions are also saved in the queue when it's being sent, and when -// persistant storage is used for the queue, after restarting the node +// persistent storage is used for the queue, after restarting the node // dataposter will pick up where it left. // DataPoster must be RLP serializable and deserializable type DataPoster struct { @@ -47,7 +53,7 @@ type DataPoster struct { headerReader *headerreader.HeaderReader client arbutil.L1Interface sender common.Address - signer bind.SignerFn + signer signerFn redisLock AttemptLocker config ConfigFetcher replacementTimes []time.Duration @@ -66,6 +72,11 @@ type DataPoster struct { errorCount map[uint64]int // number of consecutive intermittent errors rbf-ing or sending, per nonce } +// signerFn is a signer function callback when a contract requires a method to +// sign the transaction before submission. +// This can be local or external, hence the context parameter. +type signerFn func(context.Context, common.Address, *types.Transaction) (*types.Transaction, error) + type AttemptLocker interface { AttemptLock(context.Context) bool } @@ -85,7 +96,7 @@ func parseReplacementTimes(val string) ([]time.Duration, error) { lastReplacementTime = t } if len(res) == 0 { - log.Warn("disabling replace-by-fee for data poster") + log.Warn("Disabling replace-by-fee for data poster") } // To avoid special casing "don't replace again", replace in 10 years. return append(res, time.Hour*24*365*10), nil @@ -103,13 +114,13 @@ type DataPosterOpts struct { } func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, error) { - initConfig := opts.Config() - replacementTimes, err := parseReplacementTimes(initConfig.ReplacementTimes) + cfg := opts.Config() + replacementTimes, err := parseReplacementTimes(cfg.ReplacementTimes) if err != nil { return nil, err } - if opts.HeaderReader.IsParentChainArbitrum() && !initConfig.UseNoOpStorage { - initConfig.UseNoOpStorage = true + if opts.HeaderReader.IsParentChainArbitrum() && !cfg.UseNoOpStorage { + cfg.UseNoOpStorage = true log.Info("Disabling data poster storage, as parent chain appears to be an Arbitrum chain without a mempool") } encF := func() storage.EncoderDecoderInterface { @@ -120,17 +131,17 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro } var queue QueueStorage switch { - case initConfig.UseNoOpStorage: + case cfg.UseNoOpStorage: queue = &noop.Storage{} case opts.RedisClient != nil: var err error - queue, err = redisstorage.NewStorage(opts.RedisClient, opts.RedisKey, &initConfig.RedisSigner, encF) + queue, err = redisstorage.NewStorage(opts.RedisClient, opts.RedisKey, &cfg.RedisSigner, encF) if err != nil { return nil, err } - case initConfig.UseDBStorage: + case cfg.UseDBStorage: storage := dbstorage.New(opts.Database, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) - if initConfig.Dangerous.ClearDBStorage { + if cfg.Dangerous.ClearDBStorage { if err := storage.PruneAll(ctx); err != nil { return nil, err } @@ -139,18 +150,88 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro default: queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) } - return &DataPoster{ - headerReader: opts.HeaderReader, - client: opts.HeaderReader.Client(), - sender: opts.Auth.From, - signer: opts.Auth.Signer, + dp := &DataPoster{ + headerReader: opts.HeaderReader, + client: opts.HeaderReader.Client(), + sender: opts.Auth.From, + signer: func(_ context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { + return opts.Auth.Signer(addr, tx) + }, config: opts.Config, replacementTimes: replacementTimes, metadataRetriever: opts.MetadataRetriever, queue: queue, redisLock: opts.RedisLock, errorCount: make(map[uint64]int), - }, nil + } + if cfg.ExternalSigner.URL != "" { + signer, sender, err := externalSigner(ctx, &cfg.ExternalSigner) + if err != nil { + return nil, err + } + dp.signer, dp.sender = signer, sender + } + return dp, nil +} + +func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error) { + rootCrt, err := os.ReadFile(opts.RootCA) + if err != nil { + return nil, fmt.Errorf("error reading external signer root CA: %w", err) + } + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(rootCrt) + return rpc.DialOptions( + ctx, + opts.URL, + rpc.WithHTTPClient( + &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + RootCAs: pool, + }, + }, + }, + ), + ) +} + +// externalSigner returns signer function and ethereum address of the signer. +// Returns an error if address isn't specified or if it can't connect to the +// signer RPC server. +func externalSigner(ctx context.Context, opts *ExternalSignerCfg) (signerFn, common.Address, error) { + if opts.Address == "" { + return nil, common.Address{}, errors.New("external signer (From) address specified") + } + + client, err := rpcClient(ctx, opts) + if err != nil { + return nil, common.Address{}, fmt.Errorf("error connecting external signer: %w", err) + } + sender := common.HexToAddress(opts.Address) + + var hasher types.Signer + return func(ctx context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { + // According to the "eth_signTransaction" API definition, this should be + // RLP encoded transaction object. + // https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_signtransaction + var data hexutil.Bytes + if err := client.CallContext(ctx, &data, opts.Method, tx); err != nil { + return nil, fmt.Errorf("signing transaction: %w", err) + } + var signedTx types.Transaction + if err := rlp.DecodeBytes(data, &signedTx); err != nil { + return nil, fmt.Errorf("error decoding signed transaction: %w", err) + } + if hasher == nil { + hasher = types.LatestSignerForChainID(tx.ChainId()) + } + if hasher.Hash(tx) != hasher.Hash(&signedTx) { + return nil, fmt.Errorf("transaction: %x from external signer differs from request: %x", hasher.Hash(&signedTx), hasher.Hash(tx)) + } + return &signedTx, nil + }, sender, nil } func (p *DataPoster) Sender() common.Address { @@ -340,7 +421,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return newFeeCap, newTipCap, nil } -func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { +func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, accessList types.AccessList) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() @@ -362,15 +443,16 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, err } inner := types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: tipCap, - GasFeeCap: feeCap, - Gas: gasLimit, - To: &to, - Value: value, - Data: calldata, - } - fullTx, err := p.signer(p.sender, types.NewTx(&inner)) + Nonce: nonce, + GasTipCap: tipCap, + GasFeeCap: feeCap, + Gas: gasLimit, + To: &to, + Value: value, + Data: calldata, + AccessList: accessList, + } + fullTx, err := p.signer(ctx, p.sender, types.NewTx(&inner)) if err != nil { return nil, fmt.Errorf("signing transaction: %w", err) } @@ -449,7 +531,7 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa newTx.Sent = false newTx.Data.GasFeeCap = newFeeCap newTx.Data.GasTipCap = newTipCap - newTx.FullTx, err = p.signer(p.sender, types.NewTx(&newTx.Data)) + newTx.FullTx, err = p.signer(ctx, p.sender, types.NewTx(&newTx.Data)) if err != nil { return err } @@ -635,20 +717,35 @@ type DataPosterConfig struct { ReplacementTimes string `koanf:"replacement-times"` // This is forcibly disabled if the parent chain is an Arbitrum chain, // so you should probably use DataPoster's waitForL1Finality method instead of reading this field directly. - WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` - MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` - MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` - TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` - UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` - MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` - MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` - MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` - NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` - AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` - UseDBStorage bool `koanf:"use-db-storage"` - UseNoOpStorage bool `koanf:"use-noop-storage"` - LegacyStorageEncoding bool `koanf:"legacy-storage-encoding" reload:"hot"` - Dangerous DangerousConfig `koanf:"dangerous"` + WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` + MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` + MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` + TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` + UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` + MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` + MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` + MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` + NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` + AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` + UseDBStorage bool `koanf:"use-db-storage"` + UseNoOpStorage bool `koanf:"use-noop-storage"` + LegacyStorageEncoding bool `koanf:"legacy-storage-encoding" reload:"hot"` + Dangerous DangerousConfig `koanf:"dangerous"` + ExternalSigner ExternalSignerCfg `koanf:"external-signer"` +} + +type ExternalSignerCfg struct { + // URL of the external signer rpc server, if set this overrides transaction + // options and uses external signer + // for signing transactions. + URL string `koanf:"url"` + // Hex encoded ethereum address of the external signer. + Address string `koanf:"address"` + // API method name (e.g. eth_signTransaction). + Method string `koanf:"method"` + // Path to the external signer root CA certificate. + // This allows us to use self-signed certificats on the external signer. + RootCA string `koanf:"root-ca"` } type DangerousConfig struct { @@ -661,30 +758,38 @@ type DangerousConfig struct { // that flags can be reloaded dynamically. type ConfigFetcher func() *DataPosterConfig -func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.String(prefix+".replacement-times", DefaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") - f.Bool(prefix+".wait-for-l1-finality", DefaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") - f.Uint64(prefix+".max-mempool-transactions", DefaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") - f.Int(prefix+".max-queued-transactions", DefaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") - f.Float64(prefix+".target-price-gwei", DefaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") - f.Float64(prefix+".urgency-gwei", DefaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") - f.Float64(prefix+".min-fee-cap-gwei", DefaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") - f.Float64(prefix+".min-tip-cap-gwei", DefaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") - f.Float64(prefix+".max-tip-cap-gwei", DefaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") - f.Uint64(prefix+".nonce-rbf-soft-confs", DefaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") - f.Bool(prefix+".allocate-mempool-balance", DefaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") - f.Bool(prefix+".use-db-storage", DefaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") - f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") - f.Bool(prefix+".legacy-storage-encoding", DefaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)") +func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPosterConfig DataPosterConfig) { + f.String(prefix+".replacement-times", defaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") + f.Bool(prefix+".wait-for-l1-finality", defaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") + f.Uint64(prefix+".max-mempool-transactions", defaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") + f.Int(prefix+".max-queued-transactions", defaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") + f.Float64(prefix+".target-price-gwei", defaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") + f.Float64(prefix+".urgency-gwei", defaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") + f.Float64(prefix+".min-fee-cap-gwei", defaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") + f.Float64(prefix+".min-tip-cap-gwei", defaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") + f.Float64(prefix+".max-tip-cap-gwei", defaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") + f.Uint64(prefix+".nonce-rbf-soft-confs", defaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") + f.Bool(prefix+".allocate-mempool-balance", defaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") + f.Bool(prefix+".use-db-storage", defaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") + f.Bool(prefix+".use-noop-storage", defaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") + f.Bool(prefix+".legacy-storage-encoding", defaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)") signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) addDangerousOptions(prefix+".dangerous", f) + addExternalSignerOptions(prefix+".external-signer", f) } func addDangerousOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".clear-dbstorage", DefaultDataPosterConfig.Dangerous.ClearDBStorage, "clear database storage") } +func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { + f.String(prefix+".url", DefaultDataPosterConfig.ExternalSigner.URL, "external signer url") + f.String(prefix+".address", DefaultDataPosterConfig.ExternalSigner.Address, "external signer address") + f.String(prefix+".method", DefaultDataPosterConfig.ExternalSigner.Method, "external signer method") + f.String(prefix+".root-ca", DefaultDataPosterConfig.ExternalSigner.RootCA, "external signer root CA") +} + var DefaultDataPosterConfig = DataPosterConfig{ ReplacementTimes: "5m,10m,20m,30m,1h,2h,4h,6h,8h,12h,16h,18h,20h,22h", WaitForL1Finality: true, @@ -699,6 +804,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ UseNoOpStorage: false, LegacyStorageEncoding: true, Dangerous: DangerousConfig{ClearDBStorage: false}, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, } var DefaultDataPosterConfigForValidator = func() DataPosterConfig { @@ -720,6 +826,7 @@ var TestDataPosterConfig = DataPosterConfig{ AllocateMempoolBalance: true, UseDBStorage: false, UseNoOpStorage: false, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, } var TestDataPosterConfigForValidator = func() DataPosterConfig { diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index b8a9c3e499..519f5f49a2 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -1,9 +1,23 @@ package dataposter import ( + "context" + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "os" "testing" "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/google/go-cmp/cmp" ) @@ -41,3 +55,169 @@ func TestParseReplacementTimes(t *testing.T) { }) } } + +func TestExternalSigner(t *testing.T) { + ctx := context.Background() + httpSrv, srv := newServer(ctx, t) + t.Cleanup(func() { + if err := httpSrv.Shutdown(ctx); err != nil { + t.Fatalf("Error shutting down http server: %v", err) + } + }) + cert, key := "./testdata/localhost.crt", "./testdata/localhost.key" + go func() { + fmt.Println("Server is listening on port 1234...") + if err := httpSrv.ListenAndServeTLS(cert, key); err != nil && err != http.ErrServerClosed { + t.Errorf("ListenAndServeTLS() unexpected error: %v", err) + return + } + }() + signer, addr, err := externalSigner(ctx, + &ExternalSignerCfg{ + Address: srv.address.Hex(), + URL: "https://localhost:1234", + Method: "test_signTransaction", + RootCA: cert, + }) + if err != nil { + t.Fatalf("Error getting external signer: %v", err) + } + tx := types.NewTransaction(13, common.HexToAddress("0x01"), big.NewInt(1), 2, big.NewInt(3), []byte{0x01, 0x02, 0x03}) + got, err := signer(ctx, addr, tx) + if err != nil { + t.Fatalf("Error signing transaction with external signer: %v", err) + } + want, err := srv.signerFn(addr, tx) + if err != nil { + t.Fatalf("Error signing transaction: %v", err) + } + if diff := cmp.Diff(want.Hash(), got.Hash()); diff != "" { + t.Errorf("Signing transaction: unexpected diff: %v\n", diff) + } +} + +type server struct { + handlers map[string]func(*json.RawMessage) (string, error) + signerFn bind.SignerFn + address common.Address +} + +type request struct { + ID *json.RawMessage `json:"id"` + Method string `json:"method"` + Params *json.RawMessage `json:"params"` +} + +type response struct { + ID *json.RawMessage `json:"id"` + Result string `json:"result,omitempty"` +} + +// newServer returns http server and server struct that implements RPC methods. +// It sets up an account in temporary directory and cleans up after test is +// done. +func newServer(ctx context.Context, t *testing.T) (*http.Server, *server) { + t.Helper() + signer, address, err := setupAccount("/tmp/keystore") + if err != nil { + t.Fatalf("Error setting up account: %v", err) + } + t.Cleanup(func() { os.RemoveAll("/tmp/keystore") }) + + s := &server{signerFn: signer, address: address} + s.handlers = map[string]func(*json.RawMessage) (string, error){ + "test_signTransaction": s.signTransaction, + } + m := http.NewServeMux() + httpSrv := &http.Server{Addr: ":1234", Handler: m, ReadTimeout: 5 * time.Second} + m.HandleFunc("/", s.mux) + return httpSrv, s +} + +// setupAccount creates a new account in a given directory, unlocks it, creates +// signer with that account and returns it along with account address. +func setupAccount(dir string) (bind.SignerFn, common.Address, error) { + ks := keystore.NewKeyStore( + dir, + keystore.StandardScryptN, + keystore.StandardScryptP, + ) + a, err := ks.NewAccount("password") + if err != nil { + return nil, common.Address{}, fmt.Errorf("creating account account: %w", err) + } + if err := ks.Unlock(a, "password"); err != nil { + return nil, common.Address{}, fmt.Errorf("unlocking account: %w", err) + } + txOpts, err := bind.NewKeyStoreTransactorWithChainID(ks, a, big.NewInt(1)) + if err != nil { + return nil, common.Address{}, fmt.Errorf("creating transactor: %w", err) + } + return txOpts.Signer, a.Address, nil +} + +// UnmarshallFirst unmarshalls slice of params and returns the first one. +// Parameters in Go ethereum RPC calls are marashalled as slices. E.g. +// eth_sendRawTransaction or eth_signTransaction, marshall transaction as a +// slice of transactions in a message: +// https://github.com/ethereum/go-ethereum/blob/0004c6b229b787281760b14fb9460ffd9c2496f1/rpc/client.go#L548 +func unmarshallFirst(params []byte) (*types.Transaction, error) { + var arr []apitypes.SendTxArgs + if err := json.Unmarshal(params, &arr); err != nil { + return nil, fmt.Errorf("unmarshaling first param: %w", err) + } + if len(arr) != 1 { + return nil, fmt.Errorf("argument should be a single transaction, but got: %d", len(arr)) + } + return arr[0].ToTransaction(), nil +} + +func (s *server) signTransaction(params *json.RawMessage) (string, error) { + tx, err := unmarshallFirst(*params) + if err != nil { + return "", err + } + signedTx, err := s.signerFn(s.address, tx) + if err != nil { + return "", fmt.Errorf("signing transaction: %w", err) + } + data, err := rlp.EncodeToBytes(signedTx) + if err != nil { + return "", fmt.Errorf("rlp encoding transaction: %w", err) + } + return hexutil.Encode(data), nil +} + +func (s *server) mux(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "can't read body", http.StatusBadRequest) + return + } + var req request + if err := json.Unmarshal(body, &req); err != nil { + http.Error(w, "can't unmarshal JSON request", http.StatusBadRequest) + return + } + method, ok := s.handlers[req.Method] + if !ok { + http.Error(w, "method not found", http.StatusNotFound) + return + } + result, err := method(req.Params) + if err != nil { + fmt.Printf("error calling method: %v\n", err) + http.Error(w, "error calling method", http.StatusInternalServerError) + return + } + resp := response{ID: req.ID, Result: result} + respBytes, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("error encoding response: %v", err), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write(respBytes); err != nil { + fmt.Printf("error writing response: %v\n", err) + } +} diff --git a/arbnode/dataposter/testdata/localhost.cnf b/arbnode/dataposter/testdata/localhost.cnf new file mode 100644 index 0000000000..41647cc422 --- /dev/null +++ b/arbnode/dataposter/testdata/localhost.cnf @@ -0,0 +1,52 @@ +[req] +default_bits = 2048 +default_keyfile = server-key.pem +distinguished_name = subject +req_extensions = req_ext +x509_extensions = x509_ext +string_mask = utf8only + +[subject] +countryName = CH +countryName_default = CH + +stateOrProvinceName = Zurich +stateOrProvinceName_default = ZH + +localityName = city +localityName_default = Zurich + +organizationName = Offchain Labs +organizationName_default = Offchain Labs + +commonName = offchainlabs.ch +commonName_default = localhost + +emailAddress = Email Address +emailAddress_default = bigdeal@offchainlabs.ch + +[x509_ext] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[req_ext] +subjectKeyIdentifier = hash + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + diff --git a/arbnode/dataposter/testdata/localhost.crt b/arbnode/dataposter/testdata/localhost.crt new file mode 100644 index 0000000000..ca33dfc8cc --- /dev/null +++ b/arbnode/dataposter/testdata/localhost.crt @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIEwzCCA6ugAwIBAgIUHx3SdpCP5jXZE7USUqX5uRNFKPIwDQYJKoZIhvcNAQEL +BQAwfzELMAkGA1UEBhMCQ0gxCzAJBgNVBAgMAlpIMQ8wDQYDVQQHDAZadXJpY2gx +FjAUBgNVBAoMDU9mZmNoYWluIExhYnMxEjAQBgNVBAMMCWxvY2FsaG9zdDEmMCQG +CSqGSIb3DQEJARYXYmlnZGVhbEBvZmZjaGFpbmxhYnMuY2gwHhcNMjMxMDE2MTQ0 +MDA1WhcNMjQxMDE1MTQ0MDA1WjB/MQswCQYDVQQGEwJDSDELMAkGA1UECAwCWkgx +DzANBgNVBAcMBlp1cmljaDEWMBQGA1UECgwNT2ZmY2hhaW4gTGFiczESMBAGA1UE +AwwJbG9jYWxob3N0MSYwJAYJKoZIhvcNAQkBFhdiaWdkZWFsQG9mZmNoYWlubGFi +cy5jaDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALg7XwaIh4l2Fp8a +MfNMdTQSMPMR0zpnicVTn/eiozWsqlAKaxmQM3PxJ0oVWW3iJ89p4rv5m+UjK6Dr +vsUQOzl8isgyGCTMnkLtxFlyallDNRDawRcuTPuNI9NkdJm+Zz7HooLzFeBDeS13 +iRPEXr1T/4af9MjOxqFvbw5xBY9k4tc2hPp6q00948gPWKIB9Mz4thoB2Hl2rQBY +X/WhjSnre9o9qoyBO0XAsG0mssBs1vPa9/aEp7C5cDY0HCuM1RIjhXnRpb8lC9VQ +aC+FozDffmm23EGVpLmyPs590UOtVJdTUd6Q0TAT6d7fjCRUJ12DendQf2uMFV90 +u6Yj0zUCAwEAAaOCATUwggExMB0GA1UdDgQWBBT2B3FTGFQ49JyBgDGLoZREOIGD +DTCBqAYDVR0jBIGgMIGdoYGEpIGBMH8xCzAJBgNVBAYTAkNIMQswCQYDVQQIDAJa +SDEPMA0GA1UEBwwGWnVyaWNoMRYwFAYDVQQKDA1PZmZjaGFpbiBMYWJzMRIwEAYD +VQQDDAlsb2NhbGhvc3QxJjAkBgkqhkiG9w0BCQEWF2JpZ2RlYWxAb2ZmY2hhaW5s +YWJzLmNoghQfHdJ2kI/mNdkTtRJSpfm5E0Uo8jAJBgNVHRMEAjAAMAsGA1UdDwQE +AwIFoDAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTAsBglghkgBhvhC +AQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDQYJKoZIhvcNAQEL +BQADggEBAIkhBcnLeeNwUwb+sSG4Qm8JdeplHPMeViNfFIflUfIIYS00JA2q9w8W ++6Nh8s6Dn20lQETUnesYj97BdqzLjFuJYAlblhE+zP8g/3Mkpu+wZAGvQjUIRyGT +C17BEtQQgAnv5pD22jr9hpLl2KowN6Oo1gzilCA+AtMkNZFIGDOxzuIv2u8rSD89 +R/V6UEDMCgusFJnZ/GzKkUNbsrAfNUezNUal+KzMhHGHBwg4jfCNhnAAB43eRtJA +0pSRMMLcUEQnVotXDXYC3DhJmkYp1uXOH/tWs6z9xForOkWFxNMVj+zUWBi7n3Jw +N2BXlb64D96uor13U0dmvQJ72ooJc+A= +-----END CERTIFICATE----- diff --git a/arbnode/dataposter/testdata/localhost.key b/arbnode/dataposter/testdata/localhost.key new file mode 100644 index 0000000000..aad9b40b3d --- /dev/null +++ b/arbnode/dataposter/testdata/localhost.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC4O18GiIeJdhaf +GjHzTHU0EjDzEdM6Z4nFU5/3oqM1rKpQCmsZkDNz8SdKFVlt4ifPaeK7+ZvlIyug +677FEDs5fIrIMhgkzJ5C7cRZcmpZQzUQ2sEXLkz7jSPTZHSZvmc+x6KC8xXgQ3kt +d4kTxF69U/+Gn/TIzsahb28OcQWPZOLXNoT6eqtNPePID1iiAfTM+LYaAdh5dq0A +WF/1oY0p63vaPaqMgTtFwLBtJrLAbNbz2vf2hKewuXA2NBwrjNUSI4V50aW/JQvV +UGgvhaMw335pttxBlaS5sj7OfdFDrVSXU1HekNEwE+ne34wkVCddg3p3UH9rjBVf +dLumI9M1AgMBAAECggEAHuc8oyKrQ5xmooUZHGP2pAeqJNfYXAtqoYpLwtUJ9hKy +1e7NdNIKw3fP/J4UrHk7btAm65us8hSCeMGatEErAhNZT0gR4zhcksMCBPQLkVIT ++HINYjdOzAJqoEbRRUnaVT5VDQy8HmyLCtyqhoGR18XbjshNnhKLYKCJ2z0Lrvf2 +3rU7bbt7/rvLitVhxVL8SIe2jWSfIgcEmEAZMigB9WAnUyQ/tAfbPy1I764LLfzD +nLXn7E2OH7GrxkLjOsH9kfERlur7V7IhC9NE/wI0q+rnILRa7Q3+ifRu8qla3bo1 +iyHl1ZmsYJ8Jnzbu9exzZaQmk42OoFPcMFm0mRe+2QKBgQDvRv0Q5JhBuVurkU98 +lzATwEO0uYmeWDMnHzrFSWAKr/x4LNQ9ytSCfe1aLxgOkZq6dQ3TyZiCYzpmwGz9 +K7/gghxmsVDKeCqiGVZOgFAWy7AhQyF6zM60oqqwSvJHhmGTsA/B5LPUiYe9lITW +ZSLVYkOzha7Coa++U8vPzI5VaQKBgQDFG4reFT79j8RKEm9jie6PdRdYMzOSDWty +Gjj5N9Jnlp1k/6RzCxjmp7w7yIorq/7fWZsQtt0UqgayOn25+I8dZeGC0BradUSB +tZbGElxPsF8Jg00ZvvK3G5mpZYDrJCud8Q05EaUZPXv9GuZhozEsTQgylVecVzsN +wyEK8VuZ7QKBgQChx9adUGIdtgzkILiknbh08j8U94mz1SCo5/WdpLHaKAlE29KZ +AQXUQP51Rng2iX4bab9yndCPADZheON3/debHX3EdUkRzFPPC+CN7TW5Y/jvVGtT +kxyDh6Ru1A2iDJr290iAKXjpUB/GL5/tMa5upiTuQYnasOWZgyC/nCf0WQKBgEwn +pRLDMLA1IMjhsInL3BEvU1KvjahLaQ0P1p1rlO6TAcLpBrewPPG5MwACLmhLLtFK +xJ/Dl02Jl8a61KLKxzi7iVLKZuWq00ouR8/FfkcHxOBfC6X74bkff9I0NogjVHrU +jKBVEe3blJEpGIP20mPka1tn2g68oUNi9dxNfm/NAoGAWj/Q0pgnNq0MQ8Lj6m99 +1baaXSo8biks3E3A3cqhHQm/j3SRnkf0lueQW8+r9yR9IWdYFXz5Waq13qK+lopE +KDmww0xr8dyMUYTP1vde7np2XKa/OX3iejDzbI3RcZN/DEV+dCBY8pqHHfaAaESu +fwBWvfD8wtwCZzB3lOZEi80= +-----END PRIVATE KEY----- diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index c82e45fbee..72e4ba2887 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -15,7 +15,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" @@ -24,6 +26,11 @@ import ( "github.com/offchainlabs/nitro/util/containers" ) +var ( + inboxLatestBatchGauge = metrics.NewRegisteredGauge("arb/inbox/latest/batch", nil) + inboxLatestBatchMessageGauge = metrics.NewRegisteredGauge("arb/inbox/latest/batch/message", nil) +) + type InboxTracker struct { db ethdb.Database txStreamer *TransactionStreamer @@ -676,6 +683,8 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L "l1Block", latestL1Block, "l1Timestamp", time.Unix(int64(latestTimestamp), 0), ) + inboxLatestBatchGauge.Update(int64(pos)) + inboxLatestBatchMessageGauge.Update(int64(newMessageCount)) if t.validator != nil { t.validator.ReorgToBatchCount(startPos) diff --git a/arbnode/maintenance.go b/arbnode/maintenance.go index f5b937cd06..53d038a0f9 100644 --- a/arbnode/maintenance.go +++ b/arbnode/maintenance.go @@ -78,6 +78,7 @@ func MaintenanceConfigAddOptions(prefix string, f *flag.FlagSet) { var DefaultMaintenanceConfig = MaintenanceConfig{ TimeOfDay: "", + Lock: redislock.DefaultCfg, minutesAfterMidnight: 0, } diff --git a/arbnode/node.go b/arbnode/node.go index bf57b1c004..4fbfaf4bb2 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -370,6 +370,7 @@ var ConfigDefault = Config{ Dangerous: DefaultDangerousConfig, TransactionStreamer: DefaultTransactionStreamerConfig, ResourceMgmt: resourcemanager.DefaultConfig, + Maintenance: DefaultMaintenanceConfig, } func ConfigDefaultL1Test() *Config { @@ -841,7 +842,17 @@ func createNodeImpl( if txOptsBatchPoster == nil { return nil, errors.New("batchposter, but no TxOpts") } - batchPoster, err = NewBatchPoster(ctx, rawdb.NewTable(arbDb, storage.BatchPosterPrefix), l1Reader, inboxTracker, txStreamer, syncMonitor, func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, deployInfo, txOptsBatchPoster, daWriter) + batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{ + DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), + L1Reader: l1Reader, + Inbox: inboxTracker, + Streamer: txStreamer, + SyncMonitor: syncMonitor, + Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, + DeployInfo: deployInfo, + TransactOpts: txOptsBatchPoster, + DAWriter: daWriter, + }) if err != nil { return nil, err } diff --git a/arbnode/redislock/redis.go b/arbnode/redislock/redis.go index c02476f04a..c8252e059f 100644 --- a/arbnode/redislock/redis.go +++ b/arbnode/redislock/redis.go @@ -42,7 +42,7 @@ func AddConfigOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".my-id", "", "this node's id prefix when acquiring the lock (optional)") f.Duration(prefix+".lockout-duration", DefaultCfg.LockoutDuration, "how long lock is held") f.Duration(prefix+".refresh-duration", DefaultCfg.RefreshDuration, "how long between consecutive calls to redis") - f.String(prefix+".key", prefix+".simple-lock-key", "key for lock") + f.String(prefix+".key", DefaultCfg.Key, "key for lock") f.Bool(prefix+".background-lock", DefaultCfg.BackgroundLock, "should node always try grabing lock in background") } diff --git a/arbos/addressSet/addressSet_test.go b/arbos/addressSet/addressSet_test.go index bc3b46e80f..7d06c74f0b 100644 --- a/arbos/addressSet/addressSet_test.go +++ b/arbos/addressSet/addressSet_test.go @@ -270,7 +270,7 @@ func TestRectifyMapping(t *testing.T) { // Non owner's should not be able to call RectifyMapping err := aset.RectifyMapping(testhelpers.RandomAddress()) if err == nil { - Fail(t, "RectifyMapping was succesfully called by non owner") + Fail(t, "RectifyMapping was successfully called by non owner") } // Corrupt the list and verify if RectifyMapping fixes it diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index e506f76907..142efbeafe 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -75,7 +75,7 @@ const ( InitialInertia = 10 InitialPerUnitReward = 10 InitialPerBatchGasCostV6 = 100_000 - InitialPerBatchGasCostV12 = 210_000 // overriden as part of the upgrade + InitialPerBatchGasCostV12 = 210_000 // overridden as part of the upgrade ) // one minute at 100000 bytes / sec diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index 3572042a09..4eeffc679e 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -483,7 +483,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { err = util.TransferBalance(&refundFrom, &inner.RefundTo, toRefundAddr, p.evm, scenario, "refund") if err != nil { // Normally the network fee address should be holding any collected fees. - // However, in theory, they could've been transfered out during the redeem attempt. + // However, in theory, they could've been transferred out during the redeem attempt. // If the network fee address doesn't have the necessary balance, log an error and don't give a refund. log.Error(errLog, "err", err, "feeAddress", refundFrom) } diff --git a/arbos/util/transfer.go b/arbos/util/transfer.go index da3243fd8d..3a81181200 100644 --- a/arbos/util/transfer.go +++ b/arbos/util/transfer.go @@ -15,7 +15,7 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" ) -// TransferBalance represents a balance change occuring aside from a call. +// TransferBalance represents a balance change occurring aside from a call. // While most uses will be transfers, setting `from` or `to` to nil will mint or burn funds, respectively. func TransferBalance( from, to *common.Address, @@ -39,7 +39,7 @@ func TransferBalance( } if tracer := evm.Config.Tracer; tracer != nil { if evm.Depth() != 0 && scenario != TracingDuringEVM { - // A non-zero depth implies this transfer is occuring inside EVM execution + // A non-zero depth implies this transfer is occurring inside EVM execution log.Error("Tracing scenario mismatch", "scenario", scenario, "depth", evm.Depth()) return errors.New("tracing scenario mismatch") } diff --git a/arbutil/hash.go b/arbutil/hash.go new file mode 100644 index 0000000000..c6e91c8ebf --- /dev/null +++ b/arbutil/hash.go @@ -0,0 +1,26 @@ +package arbutil + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// PaddedKeccak256 pads each argument to 32 bytes, concatenates and returns +// keccak256 hash of the result. +func PaddedKeccak256(args ...[]byte) []byte { + var data []byte + for _, arg := range args { + data = append(data, common.BytesToHash(arg).Bytes()...) + } + return crypto.Keccak256(data) +} + +// SumBytes sums two byte slices and returns the result. +// If the sum of bytes are over 32 bytes, it return last 32. +func SumBytes(a, b []byte) []byte { + A := big.NewInt(0).SetBytes(a) + B := big.NewInt(0).SetBytes(b) + return common.BytesToHash((A.Add(A, B)).Bytes()).Bytes() +} diff --git a/arbutil/hash_test.go b/arbutil/hash_test.go new file mode 100644 index 0000000000..2b93353d08 --- /dev/null +++ b/arbutil/hash_test.go @@ -0,0 +1,83 @@ +package arbutil + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/go-cmp/cmp" +) + +func TestSlotAddress(t *testing.T) { + for _, tc := range []struct { + name string + args [][]byte + want []byte + }{ + { + name: "isBatchPoster[batchPosterAddr]", // Keccak256(addr, 3) + args: [][]byte{ + common.FromHex("0xC1b634853Cb333D3aD8663715b08f41A3Aec47cc"), // mainnet batch poster address + {3}, + }, + want: common.HexToHash("0xa10aa54071443520884ed767b0684edf43acec528b7da83ab38ce60126562660").Bytes(), + }, + { + name: "allowedContracts[msg.sender]", // Keccak256(msg.sender, 1) + args: [][]byte{ + common.FromHex("0x1c479675ad559DC151F6Ec7ed3FbF8ceE79582B6"), // mainnet sequencer address + {1}, + }, + want: common.HexToHash("0xe85fd79f89ff278fc57d40aecb7947873df9f0beac531c8f71a98f630e1eab62").Bytes(), + }, + { + name: "allowedRefundees[refundee]", // Keccak256(msg.sender, 2) + args: [][]byte{ + common.FromHex("0xC1b634853Cb333D3aD8663715b08f41A3Aec47cc"), // mainnet batch poster address + {2}, + }, + want: common.HexToHash("0x7686888b19bb7b75e46bb1aa328b65150743f4899443d722f0adf8e252ccda41").Bytes(), + }, + } { + t.Run(tc.name, func(t *testing.T) { + got := PaddedKeccak256(tc.args...) + if !bytes.Equal(got, tc.want) { + t.Errorf("slotAddress(%x) = %x, want %x", tc.args, got, tc.want) + } + }) + } + +} + +func TestSumBytes(t *testing.T) { + for _, tc := range []struct { + desc string + a, b, want []byte + }{ + { + desc: "simple case", + a: []byte{0x0a, 0x0b}, + b: []byte{0x03, 0x04}, + want: common.HexToHash("0x0d0f").Bytes(), + }, + { + desc: "carry over last byte", + a: []byte{0x0a, 0xff}, + b: []byte{0x01}, + want: common.HexToHash("0x0b00").Bytes(), + }, + { + desc: "overflow", + a: common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), + b: []byte{0x01}, + want: common.HexToHash("0x00").Bytes(), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + got := SumBytes(tc.a, tc.b) + if diff := cmp.Diff(got, tc.want); diff != "" { + t.Errorf("SumBytes(%x, %x) = %x want: %x", tc.a, tc.b, got, tc.want) + } + }) + } +} diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index 2649c88192..483b0b3b72 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -96,7 +96,7 @@ var DefaultConfig = Config{ RequireChainId: false, RequireFeedVersion: false, Verify: signature.DefultFeedVerifierConfig, - URL: []string{""}, + URL: []string{}, Timeout: 20 * time.Second, EnableCompression: true, } diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 335aba6a1b..b2f8728a7d 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -44,6 +44,7 @@ type DAServerConfig struct { Conf genericconf.ConfConfig `koanf:"conf"` LogLevel int `koanf:"log-level"` + LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -62,11 +63,12 @@ var DefaultDAServerConfig = DAServerConfig{ RESTServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, DataAvailability: das.DefaultDataAvailabilityConfig, Conf: genericconf.ConfConfigDefault, + LogLevel: int(log.LvlInfo), + LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, PProf: false, PprofCfg: genericconf.PProfDefault, - LogLevel: 3, } func main() { @@ -99,6 +101,8 @@ func parseDAServer(args []string) (*DAServerConfig, error) { genericconf.PProfAddOptions("pprof-cfg", f) f.Int("log-level", int(log.LvlInfo), "log level; 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE") + f.String("log-type", DefaultDAServerConfig.LogType, "log type (plaintext or json)") + das.DataAvailabilityConfigAddDaserverOptions("data-availability", f) genericconf.ConfConfigAddOptions("conf", f) @@ -178,7 +182,12 @@ func startup() error { confighelpers.PrintErrorAndExit(errors.New("please specify at least one of --enable-rest or --enable-rpc"), printSampleUsage) } - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + logFormat, err := genericconf.ParseLogType(serverConfig.LogType) + if err != nil { + flag.Usage() + panic(fmt.Sprintf("Error parsing log type: %v", err)) + } + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, logFormat)) glogger.Verbosity(log.Lvl(serverConfig.LogLevel)) log.Root().SetHandler(glogger) diff --git a/cmd/genericconf/config.go b/cmd/genericconf/config.go index 8e75b61772..c3282fe1af 100644 --- a/cmd/genericconf/config.go +++ b/cmd/genericconf/config.go @@ -33,7 +33,7 @@ func ConfConfigAddOptions(prefix string, f *flag.FlagSet) { var ConfConfigDefault = ConfConfig{ Dump: false, EnvPrefix: "", - File: nil, + File: []string{}, S3: DefaultS3Config, String: "", ReloadInterval: 0, diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index 53560dfdb0..3da027ab27 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -26,7 +26,7 @@ var HTTPConfigDefault = HTTPConfig{ Port: 8547, API: append(node.DefaultConfig.HTTPModules, "eth", "arb"), RPCPrefix: node.DefaultConfig.HTTPPathPrefix, - CORSDomain: node.DefaultConfig.HTTPCors, + CORSDomain: []string{}, VHosts: node.DefaultConfig.HTTPVirtualHosts, ServerTimeouts: HTTPServerTimeoutConfigDefault, } @@ -91,7 +91,7 @@ var WSConfigDefault = WSConfig{ Port: 8548, API: append(node.DefaultConfig.WSModules, "eth", "arb"), RPCPrefix: node.DefaultConfig.WSPathPrefix, - Origins: node.DefaultConfig.WSOrigins, + Origins: []string{}, ExposeAll: node.DefaultConfig.WSExposeAll, } @@ -137,7 +137,7 @@ type GraphQLConfig struct { var GraphQLConfigDefault = GraphQLConfig{ Enable: false, - CORSDomain: node.DefaultConfig.GraphQLCors, + CORSDomain: []string{}, VHosts: node.DefaultConfig.GraphQLVirtualHosts, } diff --git a/cmd/nitro/config_test.go b/cmd/nitro/config_test.go index 417b256116..ea04d4eb1f 100644 --- a/cmd/nitro/config_test.go +++ b/cmd/nitro/config_test.go @@ -15,10 +15,29 @@ import ( "time" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/testhelpers" + + "github.com/r3labs/diff/v3" + flag "github.com/spf13/pflag" ) +func TestEmptyCliConfig(t *testing.T) { + f := flag.NewFlagSet("", flag.ContinueOnError) + NodeConfigAddOptions(f) + k, err := confighelpers.BeginCommonParse(f, []string{}) + Require(t, err) + var emptyCliNodeConfig NodeConfig + err = confighelpers.EndCommonParse(k, &emptyCliNodeConfig) + Require(t, err) + if !reflect.DeepEqual(emptyCliNodeConfig, NodeConfigDefault) { + changelog, err := diff.Diff(emptyCliNodeConfig, NodeConfigDefault) + Require(t, err) + Fail(t, "empty cli config differs from expected default", changelog) + } +} + func TestSeqConfig(t *testing.T) { args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") _, _, _, err := ParseNode(context.Background(), args) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index bef0f83d1f..f874b5d71e 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -10,6 +10,7 @@ import ( "fmt" "math/big" "os" + "reflect" "regexp" "runtime" "strings" @@ -296,7 +297,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, err } if initConfig.Prune == "validator" { - if l1Client == nil { + if l1Client == nil || reflect.ValueOf(l1Client).IsNil() { return nil, errors.New("an L1 connection is required for validator pruning") } callOpts := bind.CallOpts{ diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 9656f7f5ec..80b21e5ebe 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -61,7 +61,10 @@ import ( ) func printSampleUsage(name string) { - fmt.Printf("Sample usage: %s --help \n", name) + fmt.Printf("Sample usage: %s [OPTIONS] \n\n", name) + fmt.Printf("Options:\n") + fmt.Printf(" --help\n") + fmt.Printf(" --dev: Start a default L2-only dev chain\n") } func addUnlockWallet(accountManager *accounts.Manager, walletConf *genericconf.WalletConfig) (common.Address, error) { @@ -591,16 +594,23 @@ type NodeConfig struct { var NodeConfigDefault = NodeConfig{ Conf: genericconf.ConfConfigDefault, Node: arbnode.ConfigDefault, + Execution: gethexec.ConfigDefault, + Validation: valnode.DefaultValidationConfig, ParentChain: conf.L1ConfigDefault, Chain: conf.L2ConfigDefault, LogLevel: int(log.LvlInfo), LogType: "plaintext", + FileLogging: genericconf.DefaultFileLoggingConfig, Persistent: conf.PersistentConfigDefault, HTTP: genericconf.HTTPConfigDefault, WS: genericconf.WSConfigDefault, IPC: genericconf.IPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, + GraphQL: genericconf.GraphQLConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, + Init: InitConfigDefault, + Rpc: genericconf.DefaultRpcConfig, PProf: false, PprofCfg: genericconf.PProfDefault, } diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go index a0123a9123..07bc26af2c 100644 --- a/cmd/seq-coordinator-manager/seq-coordinator-manager.go +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -27,7 +27,7 @@ var addSeqForm = tview.NewForm() var priorityForm = tview.NewForm() var nonPriorityForm = tview.NewForm() -// Sequencer coordinator managment UI data store +// Sequencer coordinator management UI data store type manager struct { redisCoordinator *rediscoordinator.RedisCoordinator prioritiesSet map[string]bool diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 18a2b10f2f..6116a492c9 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -138,10 +138,32 @@ func PrintErrorAndExit(err error, usage func(string)) { } } +func devFlagArgs() []string { + args := []string{ + "--init.dev-init", + "--init.dev-init-address", "0x3f1Eae7D46d88F08fc2F8ed27FCb2AB183EB2d0E", + "--node.dangerous.no-l1-listener", + "--node.parent-chain-reader.enable=false", + "--parent-chain.id=1337", + "--chain.id=412346", + "--persistent.chain", "/tmp/dev-test", + "--node.sequencer", + "--node.dangerous.no-sequencer-coordinator", + "--node.staker.enable=false", + "--init.empty=false", + "--http.port", "8547", + "--http.addr", "127.0.0.1", + } + return args +} + func BeginCommonParse(f *flag.FlagSet, args []string) (*koanf.Koanf, error) { for _, arg := range args { if arg == "--version" || arg == "-v" { return nil, ErrVersion + } else if arg == "--dev" { + args = devFlagArgs() + break } } if err := f.Parse(args); err != nil { diff --git a/das/das.go b/das/das.go index 208a12cc83..910e511083 100644 --- a/das/das.go +++ b/das/das.go @@ -69,6 +69,7 @@ var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ RestAggregator: DefaultRestfulClientAggregatorConfig, ParentChainConnectionAttempts: 15, PanicOnError: false, + IpfsStorage: DefaultIpfsStorageServiceConfig, } func OptionalAddressFromString(s string) (*common.Address, error) { @@ -132,9 +133,9 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { IpfsStorageServiceConfigAddOptions(prefix+".ipfs-storage", f) RestfulClientAggregatorConfigAddOptions(prefix+".rest-aggregator", f) - f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.ParentChainNodeURL, "URL for L1 node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") - f.Int(prefix+".parent-chain-connection-attempts", DefaultDataAvailabilityConfig.ParentChainConnectionAttempts, "layer 1 RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's L1 configuration is used") - f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "L1 address of SequencerInbox contract") + f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.ParentChainNodeURL, "URL for parent chain node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") + f.Int(prefix+".parent-chain-connection-attempts", DefaultDataAvailabilityConfig.ParentChainConnectionAttempts, "parent chain RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's parent chain configuration is used") + f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "parent chain address of SequencerInbox contract") } func Serialize(c *arbstate.DataAvailabilityCertificate) []byte { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index b29309cdbb..5a99d59c5a 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "reflect" "sync/atomic" "testing" @@ -73,6 +74,7 @@ func (c *Config) Validate() error { func ConfigAddOptions(prefix string, f *flag.FlagSet) { arbitrum.ConfigAddOptions(prefix+".rpc", f) SequencerConfigAddOptions(prefix+".sequencer", f) + headerreader.AddOptions(prefix+".parent-chain-reader", f) arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f) f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) @@ -85,16 +87,19 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { var ConfigDefault = Config{ RPC: arbitrum.DefaultConfig, Sequencer: DefaultSequencerConfig, + ParentChainReader: headerreader.DefaultConfig, RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig, ForwardingTarget: "", TxPreChecker: DefaultTxPreCheckerConfig, TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second Caching: DefaultCachingConfig, Dangerous: DefaultDangerousConfig, + Forwarder: DefaultNodeForwarderConfig, } func ConfigDefaultNonSequencerTest() *Config { config := ConfigDefault + config.ParentChainReader = headerreader.Config{} config.Sequencer.Enable = false config.Forwarder = DefaultTestForwarderConfig config.ForwardingTarget = "null" @@ -106,8 +111,10 @@ func ConfigDefaultNonSequencerTest() *Config { func ConfigDefaultTest() *Config { config := ConfigDefault + config.ParentChainReader = headerreader.Config{} config.Sequencer = TestSequencerConfig config.ForwardingTarget = "null" + config.ParentChainReader = headerreader.TestConfig _ = config.Validate() @@ -148,7 +155,7 @@ func CreateExecutionNode( var sequencer *Sequencer var parentChainReader *headerreader.HeaderReader - if l1client != nil { + if l1client != nil && !reflect.ValueOf(l1client).IsNil() { arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher().ParentChainReader }, arbSys) if err != nil { diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 77442f65e4..61792ed9b5 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -85,7 +85,7 @@ type SequencerConfigFetcher func() *SequencerConfig var DefaultSequencerConfig = SequencerConfig{ Enable: false, - MaxBlockSpeed: time.Millisecond * 100, + MaxBlockSpeed: time.Millisecond * 250, MaxRevertGasReject: params.TxGas + 10000, MaxAcceptableTimestampDelta: time.Hour, Forwarder: DefaultSequencerForwarderConfig, diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000000..03a83429e0 --- /dev/null +++ b/flake.lock @@ -0,0 +1,197 @@ +{ + "nodes": { + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1694529238, + "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "locked": { + "lastModified": 1644229661, + "narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_3": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1681202837, + "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "cfacdce06f30d2b68473a46042957675eebb3401", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "foundry": { + "inputs": { + "flake-utils": "flake-utils_2", + "nixpkgs": "nixpkgs" + }, + "locked": { + "lastModified": 1696410815, + "narHash": "sha256-uku47D/L+VzO3sVoZbnexPQPGeQtMwMFBesyaA1vKtE=", + "owner": "shazow", + "repo": "foundry.nix", + "rev": "a56126a754d73f85d904768fed569a9e250388d9", + "type": "github" + }, + "original": { + "owner": "shazow", + "ref": "monthly", + "repo": "foundry.nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1666753130, + "narHash": "sha256-Wff1dGPFSneXJLI2c0kkdWTgxnQ416KE6X4KnFkgPYQ=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "f540aeda6f677354f1e7144ab04352f61aaa0118", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "type": "indirect" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1697379843, + "narHash": "sha256-RcnGuJgC2K/UpTy+d32piEoBXq2M+nVFzM3ah/ZdJzg=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "12bdeb01ff9e2d3917e6a44037ed7df6e6c3df9d", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1681358109, + "narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-compat": "flake-compat", + "flake-utils": "flake-utils", + "foundry": "foundry", + "nixpkgs": "nixpkgs_2", + "rust-overlay": "rust-overlay" + } + }, + "rust-overlay": { + "inputs": { + "flake-utils": "flake-utils_3", + "nixpkgs": "nixpkgs_3" + }, + "locked": { + "lastModified": 1697595136, + "narHash": "sha256-9honwiIeMbBKi7FzfEy89f1ShUiXz/gVxZSS048pKyc=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "a2ccfb2134622b28668a274e403ba6f075ae1223", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000000..78c88e808e --- /dev/null +++ b/flake.nix @@ -0,0 +1,121 @@ +{ + description = "A Nix-flake-based Go 1.20 development environment"; + + inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + inputs.flake-utils.url = "github:numtide/flake-utils"; + inputs.flake-compat.url = "github:edolstra/flake-compat"; + inputs.flake-compat.flake = false; + inputs.rust-overlay.url = "github:oxalica/rust-overlay"; + inputs.foundry.url = "github:shazow/foundry.nix/monthly"; + + outputs = { flake-utils, nixpkgs, foundry, rust-overlay, ... }: + let + goVersion = 20; # Change this to update the whole stack + overlays = [ + (import rust-overlay) + (final: prev: { + go = prev."go_1_${toString goVersion}"; + # Overlaying nodejs here to ensure nodePackages use the desired + # version of nodejs. + nodejs = prev.nodejs-16_x; + pnpm = prev.nodePackages.pnpm; + yarn = prev.nodePackages.yarn; + }) + foundry.overlay + ]; + in + flake-utils.lib.eachDefaultSystem (system: + let + pkgs = import nixpkgs { + inherit overlays system; + config = { + permittedInsecurePackages = [ "nodejs-16.20.2" ]; + }; + }; + stableToolchain = pkgs.rust-bin.stable.latest.minimal.override { + extensions = [ "rustfmt" "clippy" "llvm-tools-preview" "rust-src" ]; + targets = [ "wasm32-unknown-unknown" "wasm32-wasi" ]; + }; + shellHook = '' + # Prevent cargo aliases from using programs in `~/.cargo` to avoid conflicts + # with rustup installations. + export CARGO_HOME=$HOME/.cargo-nix + '' + + pkgs.lib.optionalString pkgs.stdenv.isDarwin '' + # Fix docker-buildx command on OSX. Can we do this in a cleaner way? + mkdir -p ~/.docker/cli-plugins + # Check if the file exists, otherwise symlink + test -f $HOME/.docker/cli-plugins/docker-buildx || ln -sn $(which docker-buildx) $HOME/.docker/cli-plugins + ''; + in + { + devShells = + { + # This shell is only used for one make recipe because the other + # shell is not able to build one recipe and we haven't managed to + # come up with a dev shell that works for everything. + # + # nix develop .#wasm -c make build-wasm-libs + # + # After that, the other shell can be used to run `make build`. + wasm = pkgs.mkShell { + # By default clang-unwrapped does not find its resource dir. See + # https://discourse.nixos.org/t/why-is-the-clang-resource-dir-split-in-a-separate-package/34114 + CPATH = "${pkgs.llvmPackages_16.libclang.lib}/lib/clang/16/include"; + packages = with pkgs; [ + stableToolchain + + llvmPackages_16.clang-unwrapped # provides clang without wrapper + llvmPackages_16.bintools # provides wasm-ld + + # Docker + docker-compose # provides the `docker-compose` command + docker-buildx + docker-credential-helpers # for `docker-credential-osxkeychain` command + ]; + + # Ensure the unwrapped clang is used by default. + shellHook = shellHook + '' + export PATH="${pkgs.llvmPackages_16.clang-unwrapped}/bin:$PATH" + ''; + }; + default = pkgs.mkShell { + + packages = with pkgs; [ + cmake + stableToolchain + + # llvmPackages_16.clang # provides clang without wrapper + # llvmPackages_16.bintools # provides wasm-ld + + go + # goimports, godoc, etc. + gotools + golangci-lint + gotestsum + + # Node + nodejs + yarn + + # wasm + rust-cbindgen + wabt + + # Docker + docker-compose # provides the `docker-compose` command + docker-buildx + docker-credential-helpers # for `docker-credential-osxkeychain` command + + foundry-bin + ] ++ lib.optionals stdenv.isDarwin [ + darwin.libobjc + darwin.IOKit + darwin.apple_sdk.frameworks.CoreFoundation + ]; + inherit shellHook; + RUST_SRC_PATH = "${stableToolchain}/lib/rustlib/src/rust/library"; + }; + }; + }); +} diff --git a/go.mod b/go.mod index 4bc28b950c..cdfae4df16 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( github.com/libp2p/go-libp2p v0.27.8 github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multihash v0.2.1 + github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 @@ -237,6 +238,8 @@ require ( github.com/samber/lo v1.36.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect diff --git a/go.sum b/go.sum index db3935001a..db81b3a07e 100644 --- a/go.sum +++ b/go.sum @@ -1449,6 +1449,8 @@ github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg= +github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo= github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= @@ -1599,6 +1601,10 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.9.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 77102a95b9..ded90ebdf7 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -366,7 +366,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr emitCost := gascost(args) cost := emitCost[0].Interface().(uint64) //nolint:errcheck if !emitCost[1].IsNil() { - // an error occured during gascost() + // an error occurred during gascost() return []reflect.Value{emitCost[1]} } if err := callerCtx.Burn(cost); err != nil { diff --git a/scripts/build-brotli.sh b/scripts/build-brotli.sh index 7160936baa..0e7f451cd1 100755 --- a/scripts/build-brotli.sh +++ b/scripts/build-brotli.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff --git a/shell.nix b/shell.nix new file mode 100644 index 0000000000..9eb132a589 --- /dev/null +++ b/shell.nix @@ -0,0 +1,13 @@ +(import + ( + let + lock = builtins.fromJSON (builtins.readFile ./flake.lock); + in + fetchTarball { + url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; + sha256 = lock.nodes.flake-compat.locked.narHash; + } + ) + { + src = ./.; + }).shellNix diff --git a/staker/block_validator.go b/staker/block_validator.go index 94bc2a0806..108d6d1d49 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -750,7 +750,7 @@ func (v *BlockValidator) iterativeValidationProgress(ctx context.Context, ignore } else if reorg != nil { err := v.Reorg(ctx, *reorg) if err != nil { - log.Error("error trying to rorg validation", "pos", *reorg-1, "err", err) + log.Error("error trying to reorg validation", "pos", *reorg-1, "err", err) v.possiblyFatal(err) } } diff --git a/staker/staker.go b/staker/staker.go index d52d1adc77..4148d0a204 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -203,7 +203,7 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") - dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) + dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfigForValidator) redislock.AddConfigOptions(prefix+".redis-lock", f) DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go index 3ade358cee..302e4fb439 100644 --- a/staker/validatorwallet/contract.go +++ b/staker/validatorwallet/contract.go @@ -177,7 +177,7 @@ func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value) + return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) } func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) error { @@ -288,7 +288,7 @@ func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.B if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - arbTx, err := v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), txData, gas, auth.Value) + arbTx, err := v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), txData, gas, auth.Value, nil) if err != nil { return nil, err } @@ -338,7 +338,7 @@ func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) ( if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value) + return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) } // gasForTxData returns auth.GasLimit if it's nonzero, otherwise returns estimate. diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go index b2c9f68b56..d86181f42f 100644 --- a/staker/validatorwallet/eoa.go +++ b/staker/validatorwallet/eoa.go @@ -95,7 +95,7 @@ func (w *EOA) postTransaction(ctx context.Context, baseTx *types.Transaction) (* return nil, err } gas := baseTx.Gas() + w.getExtraGas() - newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) + newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value(), nil) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 1d705c05ac..8c0de8c6db 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -46,44 +45,56 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { parallelBatchPosters = 4 } - conf := arbnode.ConfigDefaultL1Test() - conf.BatchPoster.Enable = false - conf.BatchPoster.RedisUrl = redisUrl - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + builder.nodeConfig.BatchPoster.RedisUrl = redisUrl + cleanup := builder.Build(t) + defer cleanup() + l1A, l2A := builder.L1, builder.L2 - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil) - defer nodeB.StopAndWait() + l2B, cleanup2nd := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanup2nd() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") var txs []*types.Transaction for i := 0; i < 100; i++ { - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) txs = append(txs, tx) - err := l2clientA.SendTransaction(ctx, tx) + err := l2A.Client.SendTransaction(ctx, tx) Require(t, err) } for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, l2clientA, tx) + _, err := EnsureTxSucceeded(ctx, l2A.Client, tx) Require(t, err) } firstTxData, err := txs[0].MarshalBinary() Require(t, err) - seqTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) - conf.BatchPoster.Enable = true - conf.BatchPoster.MaxSize = len(firstTxData) * 2 - startL1Block, err := l1client.BlockNumber(ctx) + seqTxOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) + builder.nodeConfig.BatchPoster.Enable = true + builder.nodeConfig.BatchPoster.MaxSize = len(firstTxData) * 2 + startL1Block, err := l1A.Client.BlockNumber(ctx) Require(t, err) for i := 0; i < parallelBatchPosters; i++ { // Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race - batchPosterConfig := conf.BatchPoster - batchPoster, err := arbnode.NewBatchPoster(ctx, nil, nodeA.L1Reader, nodeA.InboxTracker, nodeA.TxStreamer, nodeA.SyncMonitor, func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, nodeA.DeployInfo, &seqTxOpts, nil) + batchPosterConfig := builder.nodeConfig.BatchPoster + batchPoster, err := arbnode.NewBatchPoster(ctx, + &arbnode.BatchPosterOpts{ + DataPosterDB: nil, + L1Reader: l2A.ConsensusNode.L1Reader, + Inbox: l2A.ConsensusNode.InboxTracker, + Streamer: l2A.ConsensusNode.TxStreamer, + SyncMonitor: l2A.ConsensusNode.SyncMonitor, + Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, + DeployInfo: l2A.ConsensusNode.DeployInfo, + TransactOpts: &seqTxOpts, + DAWriter: nil, + }, + ) Require(t, err) batchPoster.Start(ctx) defer batchPoster.StopAndWait() @@ -91,11 +102,11 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { lastTxHash := txs[len(txs)-1].Hash() for i := 90; i > 0; i-- { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + SendWaitTestTransactions(t, ctx, l1A.Client, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) time.Sleep(500 * time.Millisecond) - _, err := l2clientB.TransactionReceipt(ctx, lastTxHash) + _, err := l2B.Client.TransactionReceipt(ctx, lastTxHash) if err == nil { break } @@ -104,13 +115,15 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } } + // TODO: factor this out in separate test case and skip it or delete this + // code entirely. // I've locally confirmed that this passes when the clique period is set to 1. // However, setting the clique period to 1 slows everything else (including the L1 deployment for this test) down to a crawl. if false { // Make sure the batch poster is able to post multiple batches in one block - endL1Block, err := l1client.BlockNumber(ctx) + endL1Block, err := l1A.Client.BlockNumber(ctx) Require(t, err) - seqInbox, err := arbnode.NewSequencerInbox(l1client, nodeA.DeployInfo.SequencerInbox, 0) + seqInbox, err := arbnode.NewSequencerInbox(l1A.Client, l2A.ConsensusNode.DeployInfo.SequencerInbox, 0) Require(t, err) batches, err := seqInbox.LookupBatchesInRange(ctx, new(big.Int).SetUint64(startL1Block), new(big.Int).SetUint64(endL1Block)) Require(t, err) @@ -130,7 +143,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } } - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := l2B.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Sign() == 0 { @@ -143,26 +156,26 @@ func TestBatchPosterLargeTx(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := gethexec.ConfigDefaultTest() - conf.Sequencer.MaxTxDataSize = 110000 - l2info, nodeA, l2clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, conf, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Sequencer.MaxTxDataSize = 110000 + cleanup := builder.Build(t) + defer cleanup() + l2A := builder.L2 - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil) - defer nodeB.StopAndWait() + l2B, cleanup2nd := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanup2nd() data := make([]byte, 100000) _, err := rand.Read(data) Require(t, err) - faucetAddr := l2info.GetAddress("Faucet") - gas := l2info.TransferGas + 20000*uint64(len(data)) - tx := l2info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data) - err = l2clientA.SendTransaction(ctx, tx) + faucetAddr := builder.L2Info.GetAddress("Faucet") + gas := builder.L2Info.TransferGas + 20000*uint64(len(data)) + tx := builder.L2Info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data) + err = l2A.Client.SendTransaction(ctx, tx) Require(t, err) - receiptA, err := EnsureTxSucceeded(ctx, l2clientA, tx) + receiptA, err := EnsureTxSucceeded(ctx, l2A.Client, tx) Require(t, err) - receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2clientB, tx, time.Second*30) + receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2B.Client, tx, time.Second*30) Require(t, err) if receiptA.BlockHash != receiptB.BlockHash { Fatal(t, "receipt A block hash", receiptA.BlockHash, "does not equal receipt B block hash", receiptB.BlockHash) @@ -174,26 +187,25 @@ func TestBatchPosterKeepsUp(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := arbnode.ConfigDefaultL1Test() - conf.BatchPoster.CompressionLevel = brotli.BestCompression - conf.BatchPoster.MaxDelay = time.Hour - execConf := gethexec.ConfigDefaultTest() - execConf.RPC.RPCTxFeeCap = 1000. - l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, execConf, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() - l2info.GasPrice = big.NewInt(100e9) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.CompressionLevel = brotli.BestCompression + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour + builder.execConfig.RPC.RPCTxFeeCap = 1000. + cleanup := builder.Build(t) + defer cleanup() + l2A := builder.L2 + builder.L2Info.GasPrice = big.NewInt(100e9) go func() { data := make([]byte, 90000) _, err := rand.Read(data) Require(t, err) for { - gas := l2info.TransferGas + 20000*uint64(len(data)) - tx := l2info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data) - err = l2clientA.SendTransaction(ctx, tx) + gas := builder.L2Info.TransferGas + 20000*uint64(len(data)) + tx := builder.L2Info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data) + err = l2A.Client.SendTransaction(ctx, tx) Require(t, err) - _, err := EnsureTxSucceeded(ctx, l2clientA, tx) + _, err := EnsureTxSucceeded(ctx, l2A.Client, tx) Require(t, err) } }() @@ -201,11 +213,11 @@ func TestBatchPosterKeepsUp(t *testing.T) { start := time.Now() for { time.Sleep(time.Second) - batches, err := nodeA.InboxTracker.GetBatchCount() + batches, err := l2A.ConsensusNode.InboxTracker.GetBatchCount() Require(t, err) - postedMessages, err := nodeA.InboxTracker.GetBatchMessageCount(batches - 1) + postedMessages, err := l2A.ConsensusNode.InboxTracker.GetBatchMessageCount(batches - 1) Require(t, err) - haveMessages, err := nodeA.TxStreamer.GetMessageCount() + haveMessages, err := l2A.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) duration := time.Since(start) fmt.Printf("batches posted: %v over %v (%.2f batches/second)\n", batches, duration, float64(batches)/(float64(duration)/float64(time.Second))) diff --git a/system_tests/bloom_test.go b/system_tests/bloom_test.go index 14c42f6a2f..9079fd35f1 100644 --- a/system_tests/bloom_test.go +++ b/system_tests/bloom_test.go @@ -17,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" ) @@ -25,17 +24,19 @@ func TestBloom(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - execconfig := gethexec.ConfigDefaultTest() - execconfig.RPC.BloomBitsBlocks = 256 - execconfig.RPC.BloomConfirms = 1 - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, execconfig, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.execConfig.RPC.BloomBitsBlocks = 256 + builder.execConfig.RPC.BloomConfirms = 1 + builder.takeOwnership = false + cleanup := builder.Build(t) - l2info.GenerateAccount("User2") + defer cleanup() - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) + builder.L2Info.GenerateAccount("User2") + + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) ownerTxOpts.Context = ctx - _, simple := deploySimple(t, ctx, ownerTxOpts, client) + _, simple := deploySimple(t, ctx, ownerTxOpts, builder.L2.Client) simpleABI, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) @@ -63,7 +64,7 @@ func TestBloom(t *testing.T) { if sendNullEvent { tx, err = simple.EmitNullEvent(&ownerTxOpts) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) Require(t, err) } @@ -74,15 +75,14 @@ func TestBloom(t *testing.T) { tx, err = simple.Increment(&ownerTxOpts) } Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) Require(t, err) if i%100 == 0 { t.Log("counts: ", i, "/", countsNum) } } - execNode := getExecNode(t, node) for { - sectionSize, sectionNum := execNode.Backend.APIBackend().BloomStatus() + sectionSize, sectionNum := builder.L2.ExecNode.Backend.APIBackend().BloomStatus() if sectionSize != 256 { Fatal(t, "unexpected section size: ", sectionSize) } @@ -92,14 +92,14 @@ func TestBloom(t *testing.T) { } <-time.After(time.Second) } - lastHeader, err := client.HeaderByNumber(ctx, nil) + lastHeader, err := builder.L2.Client.HeaderByNumber(ctx, nil) Require(t, err) nullEventQuery := ethereum.FilterQuery{ FromBlock: big.NewInt(0), ToBlock: lastHeader.Number, Topics: [][]common.Hash{{simpleABI.Events["NullEvent"].ID}}, } - logs, err := client.FilterLogs(ctx, nullEventQuery) + logs, err := builder.L2.Client.FilterLogs(ctx, nullEventQuery) Require(t, err) if len(logs) != len(nullEventCounts) { Fatal(t, "expected ", len(nullEventCounts), " logs, got ", len(logs)) @@ -107,7 +107,7 @@ func TestBloom(t *testing.T) { incrementEventQuery := ethereum.FilterQuery{ Topics: [][]common.Hash{{simpleABI.Events["CounterEvent"].ID}}, } - logs, err = client.FilterLogs(ctx, incrementEventQuery) + logs, err = builder.L2.Client.FilterLogs(ctx, incrementEventQuery) Require(t, err) if len(logs) != len(eventCounts) { Fatal(t, "expected ", len(eventCounts), " logs, got ", len(logs)) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 19357c5b79..d233631d4c 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -8,7 +8,6 @@ import ( "context" "encoding/hex" "encoding/json" - "fmt" "math/big" "net" "os" @@ -60,6 +59,167 @@ import ( type info = *BlockchainTestInfo type client = arbutil.L1Interface +type SecondNodeParams struct { + nodeConfig *arbnode.Config + execConfig *gethexec.Config + stackConfig *node.Config + dasConfig *das.DataAvailabilityConfig + initData *statetransfer.ArbosInitializationInfo +} + +type TestClient struct { + ctx context.Context + Client *ethclient.Client + L1Backend *eth.Ethereum + Stack *node.Node + ConsensusNode *arbnode.Node + ExecNode *gethexec.ExecutionNode + + // having cleanup() field makes cleanup customizable from default cleanup methods after calling build + cleanup func() +} + +func NewTestClient(ctx context.Context) *TestClient { + return &TestClient{ctx: ctx} +} + +func (tc *TestClient) SendSignedTx(t *testing.T, l2Client *ethclient.Client, transaction *types.Transaction, lInfo info) *types.Receipt { + return SendSignedTxViaL1(t, tc.ctx, lInfo, tc.Client, l2Client, transaction) +} + +func (tc *TestClient) SendUnsignedTx(t *testing.T, l2Client *ethclient.Client, transaction *types.Transaction, lInfo info) *types.Receipt { + return SendUnsignedTxViaL1(t, tc.ctx, lInfo, tc.Client, l2Client, transaction) +} + +func (tc *TestClient) TransferBalance(t *testing.T, from string, to string, amount *big.Int, lInfo info) (*types.Transaction, *types.Receipt) { + return TransferBalanceTo(t, from, lInfo.GetAddress(to), amount, lInfo, tc.Client, tc.ctx) +} + +func (tc *TestClient) TransferBalanceTo(t *testing.T, from string, to common.Address, amount *big.Int, lInfo info) (*types.Transaction, *types.Receipt) { + return TransferBalanceTo(t, from, to, amount, lInfo, tc.Client, tc.ctx) +} + +func (tc *TestClient) GetBalance(t *testing.T, account common.Address) *big.Int { + return GetBalance(t, tc.ctx, tc.Client, account) +} + +func (tc *TestClient) GetBaseFeeAt(t *testing.T, blockNum *big.Int) *big.Int { + return GetBaseFeeAt(t, tc.Client, tc.ctx, blockNum) +} + +func (tc *TestClient) SendWaitTestTransactions(t *testing.T, txs []*types.Transaction) { + SendWaitTestTransactions(t, tc.ctx, tc.Client, txs) +} + +func (tc *TestClient) DeploySimple(t *testing.T, auth bind.TransactOpts) (common.Address, *mocksgen.Simple) { + return deploySimple(t, tc.ctx, auth, tc.Client) +} + +type NodeBuilder struct { + // NodeBuilder configuration + ctx context.Context + chainConfig *params.ChainConfig + nodeConfig *arbnode.Config + execConfig *gethexec.Config + l1StackConfig *node.Config + l2StackConfig *node.Config + L1Info info + L2Info info + + // L1, L2 Node parameters + dataDir string + isSequencer bool + takeOwnership bool + withL1 bool + + // Created nodes + L1 *TestClient + L2 *TestClient +} + +func NewNodeBuilder(ctx context.Context) *NodeBuilder { + return &NodeBuilder{ctx: ctx} +} + +func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { + // most used values across current tests are set here as default + b.withL1 = withL1 + if withL1 { + b.isSequencer = true + b.nodeConfig = arbnode.ConfigDefaultL1Test() + } else { + b.takeOwnership = true + b.nodeConfig = arbnode.ConfigDefaultL2Test() + } + b.chainConfig = params.ArbitrumDevTestChainConfig() + b.L1Info = NewL1TestInfo(t) + b.L2Info = NewArbTestInfo(t, b.chainConfig.ChainID) + b.dataDir = t.TempDir() + b.l1StackConfig = createStackConfigForTest(b.dataDir) + b.l2StackConfig = createStackConfigForTest(b.dataDir) + b.execConfig = gethexec.ConfigDefaultTest() + return b +} + +func (b *NodeBuilder) Build(t *testing.T) func() { + if b.withL1 { + l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) + b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = + createTestNodeOnL1WithConfigImpl(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info) + b.L1, b.L2 = l1, l2 + b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } + } else { + l2 := NewTestClient(b.ctx) + b.L2Info, l2.ConsensusNode, l2.Client = + CreateTestL2WithConfig(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.takeOwnership) + b.L2 = l2 + } + b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) + b.L2.cleanup = func() { b.L2.ConsensusNode.StopAndWait() } + return func() { + b.L2.cleanup() + if b.L1 != nil && b.L1.cleanup != nil { + b.L1.cleanup() + } + } +} + +func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*TestClient, func()) { + if b.L2 == nil { + t.Fatal("builder did not previously build a L2 Node") + } + if b.withL1 && b.L1 == nil { + t.Fatal("builder did not previously build a L1 Node") + } + if params.nodeConfig == nil { + params.nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() + } + if params.dasConfig != nil { + params.nodeConfig.DataAvailability = *params.dasConfig + } + if params.stackConfig == nil { + params.stackConfig = b.l2StackConfig + // should use different dataDir from the previously used ones + params.stackConfig.DataDir = t.TempDir() + } + if params.initData == nil { + params.initData = &b.L2Info.ArbInitData + } + if params.execConfig == nil { + params.execConfig = b.execConfig + } + + l2 := NewTestClient(b.ctx) + l2.Client, l2.ConsensusNode = + Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig) + l2.cleanup = func() { l2.ConsensusNode.StopAndWait() } + return l2, func() { l2.cleanup() } +} + +func (b *NodeBuilder) BridgeBalance(t *testing.T, account string, amount *big.Int) (*types.Transaction, *types.Receipt) { + return BridgeBalance(t, account, amount, b.L1Info, b.L2Info, b.L1.Client, b.L2.Client, b.ctx) +} + func SendWaitTestTransactions(t *testing.T, ctx context.Context, client client, txs []*types.Transaction) { t.Helper() for _, tx := range txs { @@ -290,33 +450,19 @@ func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return createTestL1BlockChainWithConfig(t, l1info, nil) } -func stackConfigForTest(t *testing.T) *node.Config { - stackConfig := node.DefaultConfig - stackConfig.HTTPPort = 0 - stackConfig.WSPort = 0 - stackConfig.UseLightweightKDF = true - stackConfig.P2P.ListenAddr = "" - stackConfig.P2P.NoDial = true - stackConfig.P2P.NoDiscovery = true - stackConfig.P2P.NAT = nil - stackConfig.DataDir = t.TempDir() - return &stackConfig -} - -func createDefaultStackForTest(dataDir string) (*node.Node, error) { +func createStackConfigForTest(dataDir string) *node.Config { stackConf := node.DefaultConfig - var err error stackConf.DataDir = dataDir + stackConf.UseLightweightKDF = true + stackConf.WSPort = 0 + stackConf.HTTPPort = 0 stackConf.HTTPHost = "" stackConf.HTTPModules = append(stackConf.HTTPModules, "eth") stackConf.P2P.NoDiscovery = true + stackConf.P2P.NoDial = true stackConf.P2P.ListenAddr = "" - - stack, err := node.New(&stackConf) - if err != nil { - return nil, fmt.Errorf("error creating protocol stack: %w", err) - } - return stack, nil + stackConf.P2P.NAT = nil + return &stackConf } func createTestValidationNode(t *testing.T, ctx context.Context, config *valnode.Config) (*valnode.ValidationNode, *node.Node) { @@ -392,7 +538,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no l1info = NewL1TestInfo(t) } if stackConfig == nil { - stackConfig = stackConfigForTest(t) + stackConfig = createStackConfigForTest(t.TempDir()) } l1info.GenerateAccount("Faucet") @@ -513,12 +659,10 @@ func createL2BlockChainWithStackConfig( var stack *node.Node var err error if stackConfig == nil { - stack, err = createDefaultStackForTest(dataDir) - Require(t, err) - } else { - stack, err = node.New(stackConfig) - Require(t, err) + stackConfig = createStackConfigForTest(dataDir) } + stack, err = node.New(stackConfig) + Require(t, err) chainDb, err := stack.OpenDatabase("chaindb", 0, 0, "", false) Require(t, err) @@ -773,7 +917,7 @@ func Create2ndNodeWithConfig( l1client := ethclient.NewClient(l1rpcClient) if stackConfig == nil { - stackConfig = stackConfigForTest(t) + stackConfig = createStackConfigForTest(t.TempDir()) } l2stack, err := node.New(stackConfig) Require(t, err) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 8c1588273b..c7dd177ab8 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" @@ -171,7 +172,8 @@ func TestDASRekey(t *testing.T) { // Restart the node on the new keyset against the new DAS server running on the same disk as the first with new keys - l2stackA, err := createDefaultStackForTest(nodeDir) + stackConfig := createStackConfigForTest(nodeDir) + l2stackA, err := node.New(stackConfig) Require(t, err) l2chainDb, err := l2stackA.OpenDatabase("chaindb", 0, 0, "", false) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 2e0544cc26..fc7eb4cc2d 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -35,7 +35,7 @@ func TestStaticForwarder(t *testing.T) { ipcPath := tmpPath(t, "test.ipc") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConfig := stackConfigForTest(t) + stackConfig := createStackConfigForTest(t.TempDir()) ipcConfig.Apply(stackConfig) nodeConfigA := arbnode.ConfigDefaultL1Test() nodeConfigA.BatchPoster.Enable = false @@ -99,7 +99,7 @@ func fallbackSequencer( ctx context.Context, t *testing.T, opts *fallbackSequencerOpts, ) (l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { - stackConfig := stackConfigForTest(t) + stackConfig := createStackConfigForTest(t.TempDir()) ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = opts.ipcPath ipcConfig.Apply(stackConfig) @@ -120,7 +120,7 @@ func createForwardingNode( redisUrl string, fallbackPath string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := stackConfigForTest(t) + stackConfig := createStackConfigForTest(t.TempDir()) if ipcPath != "" { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath @@ -148,7 +148,7 @@ func createSequencer( ipcPath string, redisUrl string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := stackConfigForTest(t) + stackConfig := createStackConfigForTest(t.TempDir()) ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath ipcConfig.Apply(stackConfig) diff --git a/system_tests/ipc_test.go b/system_tests/ipc_test.go index e25b4a21ea..dc73825a13 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/ipc_test.go @@ -18,7 +18,7 @@ func TestIpcRpc(t *testing.T) { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConf := stackConfigForTest(t) + stackConf := createStackConfigForTest(t.TempDir()) ipcConfig.Apply(stackConf) ctx, cancel := context.WithCancel(context.Background()) diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 28e72b0653..285548dcdb 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -97,9 +97,8 @@ func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 nodeConfig.Caching.Archive = true - // disable caching of states in BlockChain.stateCache + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.TrieDirtyCache = 0 nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) @@ -121,7 +120,6 @@ func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { if balance.Cmp(expectedBalance) != 0 { Fatal(t, "unexpected balance result for last block, want: ", expectedBalance, " have: ", balance) } - } func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { @@ -133,9 +131,8 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 nodeConfig.Caching.Archive = true - // disable caching of states in BlockChain.stateCache + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.TrieDirtyCache = 0 nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) @@ -168,9 +165,8 @@ func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 nodeConfig.Caching.Archive = true - // disable caching of states in BlockChain.stateCache + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.TrieDirtyCache = 0 nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) @@ -203,9 +199,8 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 nodeConfig.Caching.Archive = true - // disable caching of states in BlockChain.stateCache + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.TrieDirtyCache = 0 nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, headerCacheLimit+5) @@ -249,9 +244,8 @@ func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 nodeConfig.Caching.Archive = true - // disable caching of states in BlockChain.stateCache + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.TrieDirtyCache = 0 nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) @@ -285,9 +279,9 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 nodeConfig.Caching.Archive = true - // disable caching of states in BlockChain.stateCache + // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.TrieDirtyCache = 0 + nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, blockCacheLimit+4) @@ -421,6 +415,9 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig } for i := genesis + 1; i <= genesis+uint64(txCount); i += i % 10 { _, err = client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(i)) + if err != nil { + t.Log("skipBlocks:", skipBlocks, "skipGas:", skipGas) + } Require(t, err) } @@ -434,10 +431,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { cacheConfig := gethexec.DefaultCachingConfig cacheConfig.Archive = true - // disable caching of states in BlockChain.stateCache - cacheConfig.TrieCleanCache = 0 - cacheConfig.TrieDirtyCache = 0 - // test defaults + //// test defaults testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127 diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 881e3b2658..a213c366cf 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -197,7 +197,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } } - // sequencing suceeds only on the leder + // sequencing succeeds only on the leder for i := arbutil.MessageIndex(0); i < messagesPerRound; i++ { if sequencer := trySequencingEverywhere(); sequencer != currentSequencer { Fatal(t, "unexpected sequencer. expected: ", currentSequencer, " got ", sequencer) diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index 6c01ae65a8..a456dc5fe9 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -6,16 +6,20 @@ package arbtest import ( "bytes" "context" + "errors" "fmt" "math/big" "math/rand" "testing" "time" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethclient/gethclient" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" @@ -39,6 +43,54 @@ type blockTestState struct { const seqInboxTestIters = 40 +func encodeAddBatch(seqABI *abi.ABI, seqNum *big.Int, message []byte, afterDelayedMsgRead *big.Int, gasRefunder common.Address) ([]byte, error) { + method, ok := seqABI.Methods["addSequencerL2BatchFromOrigin0"] + if !ok { + return nil, errors.New("failed to find add addSequencerL2BatchFromOrigin0 method") + } + inputData, err := method.Inputs.Pack( + seqNum, + message, + afterDelayedMsgRead, + gasRefunder, + new(big.Int).SetUint64(uint64(1)), + new(big.Int).SetUint64(uint64(1)), + ) + if err != nil { + return nil, err + } + fullData := append([]byte{}, method.ID...) + fullData = append(fullData, inputData...) + return fullData, nil +} +func diffAccessList(accessed, al types.AccessList) string { + m := make(map[common.Address]map[common.Hash]bool) + for i := 0; i < len(al); i++ { + if _, ok := m[al[i].Address]; !ok { + m[al[i].Address] = make(map[common.Hash]bool) + } + for _, slot := range al[i].StorageKeys { + m[al[i].Address][slot] = true + } + } + + diff := "" + for i := 0; i < len(accessed); i++ { + addr := accessed[i].Address + if _, ok := m[addr]; !ok { + diff += fmt.Sprintf("contract address: %q wasn't accessed\n", addr) + continue + } + for j := 0; j < len(accessed[i].StorageKeys); j++ { + slot := accessed[i].StorageKeys[j] + if _, ok := m[addr][slot]; !ok { + diff += fmt.Sprintf("storage slot: %v for contract: %v wasn't accessed\n", slot, addr) + } + } + } + return diff +} + func deployGasRefunder(ctx context.Context, t *testing.T, info *BlockchainTestInfo, client *ethclient.Client) common.Address { t.Helper() abi, err := bridgegen.GasRefunderMetaData.GetAbi() @@ -99,6 +151,12 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { l1BlockChain := l1backend.BlockChain() + rpcC, err := l1stack.Attach() + if err != nil { + t.Fatalf("Error connecting to l1 node: %v", err) + } + gethClient := gethclient.New(rpcC) + seqInbox, err := bridgegen.NewSequencerInbox(l1Info.GetAddress("SequencerInbox"), l1Client) Require(t, err) seqOpts := l1Info.GetDefaultTransactOpts("Sequencer", ctx) @@ -143,6 +201,11 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { } SendWaitTestTransactions(t, ctx, l1Client, faucetTxs) + seqABI, err := bridgegen.SequencerInboxMetaData.GetAbi() + if err != nil { + t.Fatalf("Error getting sequencer inbox abi: %v", err) + } + for i := 1; i < seqInboxTestIters; i++ { if i%10 == 0 { reorgTo := rand.Int() % len(blockStates) @@ -267,6 +330,31 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if err != nil { t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err) } + + data, err := encodeAddBatch(seqABI, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr) + if err != nil { + t.Fatalf("Error encoding batch data: %v", err) + } + si := l1Info.GetAddress("SequencerInbox") + wantAL, _, _, err := gethClient.CreateAccessList(ctx, ethereum.CallMsg{ + From: seqOpts.From, + To: &si, + Data: data, + }) + if err != nil { + t.Fatalf("Error creating access list: %v", err) + } + accessed := arbnode.AccessList(&arbnode.AccessListOpts{ + SequencerInboxAddr: l1Info.GetAddress("SequencerInbox"), + BridgeAddr: l1Info.GetAddress("Bridge"), + DataPosterAddr: seqOpts.From, + GasRefunderAddr: gasRefunderAddr, + SequencerInboxAccs: len(blockStates), + AfterDelayedMessagesRead: 1, + }) + if diff := diffAccessList(accessed, *wantAL); diff != "" { + t.Errorf("Access list mistmatch:\n%s\n", diff) + } if i%5 == 0 { tx, err = seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, big.NewInt(0), big.NewInt(0)) } else { diff --git a/system_tests/triedb_race_test.go b/system_tests/triedb_race_test.go new file mode 100644 index 0000000000..8174a9b6a2 --- /dev/null +++ b/system_tests/triedb_race_test.go @@ -0,0 +1,84 @@ +package arbtest + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/arbitrum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestTrieDBCommitRace(t *testing.T) { + _ = testhelpers.InitTestLog(t, log.LvlError) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true + execConfig.Caching.BlockCount = 127 + execConfig.Caching.BlockAge = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 127 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, execConfig, nil, nil) + cancel = func() { + defer requireClose(t, l1stack) + defer node.StopAndWait() + } + defer cancel() + execNode := getExecNode(t, node) + l2info.GenerateAccount("User2") + bc := execNode.Backend.ArbInterface().BlockChain() + + var wg sync.WaitGroup + quit := make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + default: + TransferBalance(t, "Faucet", "User2", common.Big1, l2info, l2client, ctx) + case <-quit: + return + } + } + }() + api := execNode.Backend.APIBackend() + blockNumber := 1 + for i := 0; i < 5; i++ { + var roots []common.Hash + for len(roots) < 1024 { + select { + default: + block, err := api.BlockByNumber(ctx, rpc.BlockNumber(blockNumber)) + if err == nil && block != nil { + root := block.Root() + if statedb, err := bc.StateAt(root); err == nil { + err := statedb.Database().TrieDB().Reference(root, common.Hash{}) + Require(t, err) + roots = append(roots, root) + } + blockNumber += 1 + } + case <-quit: + return + } + } + t.Log("dereferencing...") + for _, root := range roots { + err := bc.TrieDB().Dereference(root) + Require(t, err) + time.Sleep(1) + } + } + close(quit) + wg.Wait() +} diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index ab61f8a2ee..ff3b420a1c 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -81,6 +81,7 @@ func AddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".poll-only", DefaultConfig.PollOnly, "do not attempt to subscribe to header events") f.Bool(prefix+".use-finality-data", DefaultConfig.UseFinalityData, "use l1 data about finalized/safe blocks") f.Duration(prefix+".poll-interval", DefaultConfig.PollInterval, "interval when polling endpoint") + f.Duration(prefix+".subscribe-err-interval", DefaultConfig.SubscribeErrInterval, "interval for subscribe error") f.Duration(prefix+".tx-timeout", DefaultConfig.TxTimeout, "timeout when waiting for a transaction") f.Duration(prefix+".old-header-timeout", DefaultConfig.OldHeaderTimeout, "warns if the latest l1 block is at least this old") } diff --git a/util/signature/sign_verify.go b/util/signature/sign_verify.go index 2911912979..5ed852bfbc 100644 --- a/util/signature/sign_verify.go +++ b/util/signature/sign_verify.go @@ -31,6 +31,12 @@ func SignVerifyConfigAddOptions(prefix string, f *flag.FlagSet) { } var DefaultSignVerifyConfig = SignVerifyConfig{ + ECDSA: DefultFeedVerifierConfig, + SymmetricFallback: false, + SymmetricSign: false, + Symmetric: EmptySimpleHmacConfig, +} +var TestSignVerifyConfig = SignVerifyConfig{ ECDSA: VerifierConfig{ AcceptSequencer: true, }, diff --git a/util/signature/sign_verify_test.go b/util/signature/sign_verify_test.go index 8ecb6e5ccc..916fc03a20 100644 --- a/util/signature/sign_verify_test.go +++ b/util/signature/sign_verify_test.go @@ -17,7 +17,7 @@ func TestSignVerifyModes(t *testing.T) { signingAddr := crypto.PubkeyToAddress(privateKey.PublicKey) dataSigner := DataSignerFromPrivateKey(privateKey) - config := DefaultSignVerifyConfig + config := TestSignVerifyConfig config.SymmetricFallback = false config.SymmetricSign = false config.ECDSA.AcceptSequencer = false @@ -25,14 +25,14 @@ func TestSignVerifyModes(t *testing.T) { signVerifyECDSA, err := NewSignVerify(&config, dataSigner, nil) Require(t, err) - configSymmetric := DefaultSignVerifyConfig + configSymmetric := TestSignVerifyConfig configSymmetric.SymmetricFallback = true configSymmetric.SymmetricSign = true configSymmetric.ECDSA.AcceptSequencer = false signVerifySymmetric, err := NewSignVerify(&configSymmetric, nil, nil) Require(t, err) - configFallback := DefaultSignVerifyConfig + configFallback := TestSignVerifyConfig configFallback.SymmetricFallback = true configFallback.SymmetricSign = false configFallback.ECDSA.AllowedAddresses = []string{signingAddr.Hex()} diff --git a/util/signature/verifier.go b/util/signature/verifier.go index 2bf5b854ed..c2f6529ec6 100644 --- a/util/signature/verifier.go +++ b/util/signature/verifier.go @@ -37,7 +37,7 @@ var ErrMissingSignature = fmt.Errorf("%w: signature not found", ErrSignatureNotV var ErrSignerNotApproved = fmt.Errorf("%w: signer not approved", ErrSignatureNotVerified) func FeedVerifierConfigAddOptions(prefix string, f *flag.FlagSet) { - f.StringArray(prefix+".allowed-addresses", DefultFeedVerifierConfig.AllowedAddresses, "a list of allowed addresses") + f.StringSlice(prefix+".allowed-addresses", DefultFeedVerifierConfig.AllowedAddresses, "a list of allowed addresses") f.Bool(prefix+".accept-sequencer", DefultFeedVerifierConfig.AcceptSequencer, "accept verified message from sequencer") DangerousFeedVerifierConfigAddOptions(prefix+".dangerous", f) }