From ea5a60354a0bb19ab647d42aad5a5e3daa17428c Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 11:24:56 +0800 Subject: [PATCH 01/17] port changes from #1013 --- cmd/geth/main.go | 5 + cmd/utils/flags.go | 49 ++++ common/backoff/exponential.go | 51 ++++ common/backoff/exponential_test.go | 39 +++ common/heap.go | 109 ++++++++ common/heap_test.go | 40 +++ common/shrinkingmap.go | 71 +++++ common/shrinkingmap_test.go | 135 ++++++++++ core/blockchain.go | 50 ++++ core/rawdb/accessors_da_syncer.go | 39 +++ core/rawdb/schema.go | 3 + eth/backend.go | 58 ++++- eth/ethconfig/config.go | 10 + go.mod | 4 +- go.sum | 3 + miner/miner.go | 10 +- miner/miner_test.go | 2 +- miner/scroll_worker.go | 8 +- miner/scroll_worker_test.go | 2 +- node/config.go | 2 + node/node.go | 11 +- rollup/da_syncer/batch_queue.go | 102 ++++++++ .../blob_client/beacon_node_client.go | 192 ++++++++++++++ rollup/da_syncer/blob_client/blob_client.go | 64 +++++ .../da_syncer/blob_client/blob_scan_client.go | 92 +++++++ .../blob_client/block_native_client.go | 85 ++++++ rollup/da_syncer/block_queue.go | 56 ++++ rollup/da_syncer/da/calldata_blob_source.go | 246 ++++++++++++++++++ rollup/da_syncer/da/commitV0.go | 172 ++++++++++++ rollup/da_syncer/da/commitV1.go | 82 ++++++ rollup/da_syncer/da/da.go | 69 +++++ rollup/da_syncer/da/finalize.go | 34 +++ rollup/da_syncer/da/revert.go | 33 +++ rollup/da_syncer/da_queue.go | 70 +++++ rollup/da_syncer/da_syncer.go | 49 ++++ rollup/da_syncer/data_source.go | 44 ++++ rollup/da_syncer/modes.go | 52 ++++ rollup/da_syncer/serrors/errors.go | 62 +++++ rollup/da_syncer/syncing_pipeline.go | 233 +++++++++++++++++ rollup/rollup_sync_service/abi.go | 4 +- rollup/rollup_sync_service/abi_test.go | 4 +- rollup/rollup_sync_service/l1client.go | 80 +++++- rollup/rollup_sync_service/l1client_test.go | 8 +- .../rollup_sync_service.go | 6 +- .../rollup_sync_service_test.go | 8 +- 45 files changed, 2502 insertions(+), 46 deletions(-) create mode 100644 common/backoff/exponential.go create mode 100644 common/backoff/exponential_test.go create mode 100644 common/heap.go create mode 100644 common/heap_test.go create mode 100644 common/shrinkingmap.go create mode 100644 common/shrinkingmap_test.go create mode 100644 core/rawdb/accessors_da_syncer.go create mode 100644 rollup/da_syncer/batch_queue.go create mode 100644 rollup/da_syncer/blob_client/beacon_node_client.go create mode 100644 rollup/da_syncer/blob_client/blob_client.go create mode 100644 rollup/da_syncer/blob_client/blob_scan_client.go create mode 100644 rollup/da_syncer/blob_client/block_native_client.go create mode 100644 rollup/da_syncer/block_queue.go create mode 100644 rollup/da_syncer/da/calldata_blob_source.go create mode 100644 rollup/da_syncer/da/commitV0.go create mode 100644 rollup/da_syncer/da/commitV1.go create mode 100644 rollup/da_syncer/da/da.go create mode 100644 rollup/da_syncer/da/finalize.go create mode 100644 rollup/da_syncer/da/revert.go create mode 100644 rollup/da_syncer/da_queue.go create mode 100644 rollup/da_syncer/da_syncer.go create mode 100644 rollup/da_syncer/data_source.go create mode 100644 rollup/da_syncer/modes.go create mode 100644 rollup/da_syncer/serrors/errors.go create mode 100644 rollup/da_syncer/syncing_pipeline.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 24760af5e080..f2147f35d4ad 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -171,6 +171,11 @@ var ( utils.CircuitCapacityCheckWorkersFlag, utils.RollupVerifyEnabledFlag, utils.ShadowforkPeersFlag, + utils.DASyncEnabledFlag, + utils.DASnapshotFileFlag, + utils.DABlockNativeAPIEndpointFlag, + utils.DABlobScanAPIEndpointFlag, + utils.DABeaconNodeAPIEndpointFlag, } rpcFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 3e4ad289a906..445248b1ff6f 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -74,6 +74,7 @@ import ( "github.com/scroll-tech/go-ethereum/p2p/nat" "github.com/scroll-tech/go-ethereum/p2p/netutil" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" "github.com/scroll-tech/go-ethereum/rollup/tracing" "github.com/scroll-tech/go-ethereum/rpc" ) @@ -871,6 +872,28 @@ var ( Name: "net.shadowforkpeers", Usage: "peer ids of shadow fork peers", } + + // DA syncing settings + DASyncEnabledFlag = &cli.BoolFlag{ + Name: "da.sync", + Usage: "Enable node syncing from DA", + } + DASnapshotFileFlag = &cli.StringFlag{ + Name: "da.snapshot.file", + Usage: "Snapshot file to sync from DA", + } + DABlobScanAPIEndpointFlag = &cli.StringFlag{ + Name: "da.blob.blobscan", + Usage: "BlobScan blob API endpoint", + } + DABlockNativeAPIEndpointFlag = &cli.StringFlag{ + Name: "da.blob.blocknative", + Usage: "BlockNative blob API endpoint", + } + DABeaconNodeAPIEndpointFlag = &cli.StringFlag{ + Name: "da.blob.beaconnode", + Usage: "Beacon node API endpoint", + } ) // MakeDataDir retrieves the currently requested data directory, terminating @@ -1315,6 +1338,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { setSmartCard(ctx, cfg) setL1(ctx, cfg) + if ctx.IsSet(DASyncEnabledFlag.Name) { + cfg.DaSyncingEnabled = ctx.Bool(DASyncEnabledFlag.Name) + } + if ctx.GlobalIsSet(ExternalSignerFlag.Name) { cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name) } @@ -1597,6 +1624,27 @@ func setEnableRollupVerify(ctx *cli.Context, cfg *ethconfig.Config) { } } +func setDA(ctx *cli.Context, cfg *ethconfig.Config) { + if ctx.IsSet(DASyncEnabledFlag.Name) { + cfg.EnableDASyncing = ctx.Bool(DASyncEnabledFlag.Name) + if ctx.IsSet(DAModeFlag.Name) { + cfg.DA.FetcherMode = *flags.GlobalTextMarshaler(ctx, DAModeFlag.Name).(*da_syncer.FetcherMode) + } + if ctx.IsSet(DASnapshotFileFlag.Name) { + cfg.DA.SnapshotFilePath = ctx.String(DASnapshotFileFlag.Name) + } + if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) { + cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name) + } + if ctx.IsSet(DABlockNativeAPIEndpointFlag.Name) { + cfg.DA.BlockNativeAPIEndpoint = ctx.String(DABlockNativeAPIEndpointFlag.Name) + } + if ctx.IsSet(DABeaconNodeAPIEndpointFlag.Name) { + cfg.DA.BeaconNodeAPIEndpoint = ctx.String(DABeaconNodeAPIEndpointFlag.Name) + } + } +} + func setMaxBlockRange(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.GlobalIsSet(MaxBlockRangeFlag.Name) { cfg.MaxBlockRange = ctx.GlobalInt64(MaxBlockRangeFlag.Name) @@ -1672,6 +1720,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { setLes(ctx, cfg) setCircuitCapacityCheck(ctx, cfg) setEnableRollupVerify(ctx, cfg) + setDA(ctx, cfg) setMaxBlockRange(ctx, cfg) if ctx.GlobalIsSet(ShadowforkPeersFlag.Name) { cfg.ShadowForkPeerIDs = ctx.GlobalStringSlice(ShadowforkPeersFlag.Name) diff --git a/common/backoff/exponential.go b/common/backoff/exponential.go new file mode 100644 index 000000000000..e1f9b53a350e --- /dev/null +++ b/common/backoff/exponential.go @@ -0,0 +1,51 @@ +package backoff + +import ( + "math" + "math/rand" + "time" +) + +// Exponential is a backoff strategy that increases the delay between retries exponentially. +type Exponential struct { + attempt int + + maxJitter time.Duration + + min time.Duration + max time.Duration +} + +func NewExponential(minimum, maximum, maxJitter time.Duration) *Exponential { + return &Exponential{ + min: minimum, + max: maximum, + maxJitter: maxJitter, + } +} + +func (e *Exponential) NextDuration() time.Duration { + var jitter time.Duration + if e.maxJitter > 0 { + jitter = time.Duration(rand.Int63n(e.maxJitter.Nanoseconds())) + } + + minFloat := float64(e.min) + duration := math.Pow(2, float64(e.attempt)) * minFloat + + // limit at configured maximum + if duration > float64(e.max) { + duration = float64(e.max) + } + + e.attempt++ + return time.Duration(duration) + jitter +} + +func (e *Exponential) Reset() { + e.attempt = 0 +} + +func (e *Exponential) Attempt() int { + return e.attempt +} diff --git a/common/backoff/exponential_test.go b/common/backoff/exponential_test.go new file mode 100644 index 000000000000..ff659337a2b0 --- /dev/null +++ b/common/backoff/exponential_test.go @@ -0,0 +1,39 @@ +package backoff + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestExponentialBackoff(t *testing.T) { + t.Run("Multiple attempts", func(t *testing.T) { + e := NewExponential(100*time.Millisecond, 10*time.Second, 0) + expectedDurations := []time.Duration{ + 100 * time.Millisecond, + 200 * time.Millisecond, + 400 * time.Millisecond, + 800 * time.Millisecond, + 1600 * time.Millisecond, + 3200 * time.Millisecond, + 6400 * time.Millisecond, + 10 * time.Second, // capped at max + } + for i, expected := range expectedDurations { + require.Equal(t, expected, e.NextDuration(), "attempt %d", i) + } + }) + + t.Run("Jitter added", func(t *testing.T) { + e := NewExponential(1*time.Second, 10*time.Second, 1*time.Second) + duration := e.NextDuration() + require.GreaterOrEqual(t, duration, 1*time.Second) + require.Less(t, duration, 2*time.Second) + }) + + t.Run("Edge case: min > max", func(t *testing.T) { + e := NewExponential(10*time.Second, 5*time.Second, 0) + require.Equal(t, 5*time.Second, e.NextDuration()) + }) +} diff --git a/common/heap.go b/common/heap.go new file mode 100644 index 000000000000..67b79a1136d1 --- /dev/null +++ b/common/heap.go @@ -0,0 +1,109 @@ +package common + +import ( + "container/heap" +) + +// Heap is a generic min-heap (or max-heap, depending on Comparable behavior) implementation. +type Heap[T Comparable[T]] struct { + heap innerHeap[T] +} + +func NewHeap[T Comparable[T]]() *Heap[T] { + return &Heap[T]{ + heap: make(innerHeap[T], 0), + } +} + +func (h *Heap[T]) Len() int { + return len(h.heap) +} + +func (h *Heap[T]) Push(element T) *HeapElement[T] { + heapElement := NewHeapElement(element) + heap.Push(&h.heap, heapElement) + + return heapElement +} + +func (h *Heap[T]) Pop() *HeapElement[T] { + return heap.Pop(&h.heap).(*HeapElement[T]) +} + +func (h *Heap[T]) Peek() *HeapElement[T] { + if h.Len() == 0 { + return nil + } + + return h.heap[0] +} + +func (h *Heap[T]) Remove(element *HeapElement[T]) { + heap.Remove(&h.heap, element.index) +} + +func (h *Heap[T]) Clear() { + h.heap = make(innerHeap[T], 0) +} + +type innerHeap[T Comparable[T]] []*HeapElement[T] + +func (h innerHeap[T]) Len() int { + return len(h) +} + +func (h innerHeap[T]) Less(i, j int) bool { + return h[i].Value().CompareTo(h[j].Value()) < 0 +} + +func (h innerHeap[T]) Swap(i, j int) { + h[i], h[j] = h[j], h[i] + h[i].index, h[j].index = i, j +} + +func (h *innerHeap[T]) Push(x interface{}) { + data := x.(*HeapElement[T]) + *h = append(*h, data) + data.index = len(*h) - 1 +} + +func (h *innerHeap[T]) Pop() interface{} { + n := len(*h) + element := (*h)[n-1] + (*h)[n-1] = nil // avoid memory leak + *h = (*h)[:n-1] + element.index = -1 + + return element +} + +// Comparable is an interface for types that can be compared. +type Comparable[T any] interface { + // CompareTo compares x with other. + // To create a min heap, return: + // -1 if x < other + // 0 if x == other + // +1 if x > other + // To create a max heap, return the opposite. + CompareTo(other T) int +} + +// HeapElement is a wrapper around the value stored in the heap. +type HeapElement[T Comparable[T]] struct { + value T + index int +} + +func NewHeapElement[T Comparable[T]](value T) *HeapElement[T] { + return &HeapElement[T]{ + value: value, + } +} + +func (h *HeapElement[T]) Value() T { + return h.value +} + +func (h *HeapElement[T]) Index() int { + return h.index +} diff --git a/common/heap_test.go b/common/heap_test.go new file mode 100644 index 000000000000..ac927c375de4 --- /dev/null +++ b/common/heap_test.go @@ -0,0 +1,40 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type Int int + +func (i Int) CompareTo(other Int) int { + if i < other { + return -1 + } else if i > other { + return 1 + } else { + return 0 + } +} + +func TestHeap(t *testing.T) { + h := NewHeap[Int]() + + require.Equal(t, 0, h.Len(), "Heap should be empty initially") + + h.Push(Int(3)) + h.Push(Int(1)) + h.Push(Int(2)) + + require.Equal(t, 3, h.Len(), "Heap should have three elements after pushing") + + require.EqualValues(t, 1, h.Pop(), "Pop should return the smallest element") + require.Equal(t, 2, h.Len(), "Heap should have two elements after popping") + + require.EqualValues(t, 2, h.Pop(), "Pop should return the next smallest element") + require.Equal(t, 1, h.Len(), "Heap should have one element after popping") + + require.EqualValues(t, 3, h.Pop(), "Pop should return the last element") + require.Equal(t, 0, h.Len(), "Heap should be empty after popping all elements") +} diff --git a/common/shrinkingmap.go b/common/shrinkingmap.go new file mode 100644 index 000000000000..4bf98f87c2da --- /dev/null +++ b/common/shrinkingmap.go @@ -0,0 +1,71 @@ +package common + +// ShrinkingMap is a map that shrinks itself (by allocating a new map) after a certain number of deletions have been performed. +// If shrinkAfterDeletionsCount is set to <=0, the map will never shrink. +// This is useful to prevent memory leaks in long-running processes that delete a lot of keys from a map. +// See here for more details: https://github.com/golang/go/issues/20135 +type ShrinkingMap[K comparable, V any] struct { + m map[K]V + deletedKeys int + + shrinkAfterDeletionsCount int +} + +func NewShrinkingMap[K comparable, V any](shrinkAfterDeletionsCount int) *ShrinkingMap[K, V] { + return &ShrinkingMap[K, V]{ + m: make(map[K]V), + shrinkAfterDeletionsCount: shrinkAfterDeletionsCount, + } +} + +func (s *ShrinkingMap[K, V]) Set(key K, value V) { + s.m[key] = value +} + +func (s *ShrinkingMap[K, V]) Get(key K) (value V, exists bool) { + value, exists = s.m[key] + return value, exists +} + +func (s *ShrinkingMap[K, V]) Has(key K) bool { + _, exists := s.m[key] + return exists +} + +func (s *ShrinkingMap[K, V]) Delete(key K) (deleted bool) { + if _, exists := s.m[key]; !exists { + return false + } + + delete(s.m, key) + s.deletedKeys++ + + if s.shouldShrink() { + s.shrink() + } + + return true +} + +func (s *ShrinkingMap[K, V]) Size() (size int) { + return len(s.m) +} + +func (s *ShrinkingMap[K, V]) Clear() { + s.m = make(map[K]V) + s.deletedKeys = 0 +} + +func (s *ShrinkingMap[K, V]) shouldShrink() bool { + return s.shrinkAfterDeletionsCount > 0 && s.deletedKeys >= s.shrinkAfterDeletionsCount +} + +func (s *ShrinkingMap[K, V]) shrink() { + newMap := make(map[K]V, len(s.m)) + for k, v := range s.m { + newMap[k] = v + } + + s.m = newMap + s.deletedKeys = 0 +} diff --git a/common/shrinkingmap_test.go b/common/shrinkingmap_test.go new file mode 100644 index 000000000000..c94a917ee140 --- /dev/null +++ b/common/shrinkingmap_test.go @@ -0,0 +1,135 @@ +package common + +import ( + "fmt" + "runtime" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestShrinkingMap_Shrink(t *testing.T) { + m := NewShrinkingMap[int, int](10) + + for i := 0; i < 100; i++ { + m.Set(i, i) + } + + for i := 0; i < 100; i++ { + val, exists := m.Get(i) + require.Equal(t, true, exists) + require.Equal(t, i, val) + + has := m.Has(i) + require.Equal(t, true, has) + } + + for i := 0; i < 9; i++ { + m.Delete(i) + } + require.Equal(t, 9, m.deletedKeys) + + // Delete the 10th key -> shrinks the map + m.Delete(9) + require.Equal(t, 0, m.deletedKeys) + + for i := 0; i < 100; i++ { + if i < 10 { + val, exists := m.Get(i) + require.Equal(t, false, exists) + require.Equal(t, 0, val) + + has := m.Has(i) + require.Equal(t, false, has) + } else { + val, exists := m.Get(i) + require.Equal(t, true, exists) + require.Equal(t, i, val) + + has := m.Has(i) + require.Equal(t, true, has) + } + } + + require.Equal(t, 90, m.Size()) +} + +func TestNewShrinkingMap_NoShrinking(t *testing.T) { + m := NewShrinkingMap[int, int](0) + for i := 0; i < 10000; i++ { + m.Set(i, i) + } + + for i := 0; i < 10000; i++ { + val, exists := m.Get(i) + require.Equal(t, true, exists) + require.Equal(t, i, val) + + m.Delete(i) + } + + require.Equal(t, 0, m.Size()) + require.Equal(t, 10000, m.deletedKeys) +} + +func TestShrinkingMap_MemoryShrinking(t *testing.T) { + t.Skip("Only for manual testing and memory profiling") + + gcAndPrintAlloc("start") + m := NewShrinkingMap[int, int](10000) + + const mapSize = 1_000_000 + + for i := 0; i < mapSize; i++ { + m.Set(i, i) + } + + gcAndPrintAlloc("after map creation") + + for i := 0; i < mapSize/2; i++ { + m.Delete(i) + } + + gcAndPrintAlloc("after removing half of the elements") + + val, exist := m.Get(mapSize - 1) + require.Equal(t, true, exist) + require.Equal(t, mapSize-1, val) + + gcAndPrintAlloc("end") +} + +func TestShrinkingMap_MemoryNoShrinking(t *testing.T) { + t.Skip("Only for manual testing and memory profiling") + + gcAndPrintAlloc("start") + m := NewShrinkingMap[int, int](0) + + const mapSize = 1_000_000 + + for i := 0; i < mapSize; i++ { + m.Set(i, i) + } + + gcAndPrintAlloc("after map creation") + + for i := 0; i < mapSize/2; i++ { + m.Delete(i) + } + + gcAndPrintAlloc("after removing half of the elements") + + val, exist := m.Get(mapSize - 1) + require.Equal(t, true, exist) + require.Equal(t, mapSize-1, val) + + gcAndPrintAlloc("end") +} + +func gcAndPrintAlloc(prefix string) { + runtime.GC() + + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + fmt.Printf(prefix+", Allocated memory %d KiB\n", stats.Alloc/1024) +} diff --git a/core/blockchain.go b/core/blockchain.go index 63b244cc06c7..e3294ded5bde 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1803,6 +1803,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er return it.index, err } +func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types.Header, txs types.Transactions) (WriteStatus, error) { + if !bc.chainmu.TryLock() { + return NonStatTy, errInsertionInterrupted + } + defer bc.chainmu.Unlock() + + statedb, err := state.New(parentBlock.Root(), bc.stateCache, bc.snaps) + if err != nil { + return NonStatTy, err + } + + statedb.StartPrefetcher("l1sync") + defer statedb.StopPrefetcher() + + header.ParentHash = parentBlock.Hash() + + tempBlock := types.NewBlockWithHeader(header).WithBody(txs, nil) + receipts, logs, gasUsed, err := bc.processor.Process(tempBlock, statedb, bc.vmConfig) + if err != nil { + return NonStatTy, fmt.Errorf("error processing block: %w", err) + } + + // TODO: once we have the extra and difficulty we need to verify the signature of the block with Clique + // This should be done with https://github.com/scroll-tech/go-ethereum/pull/913. + + // finalize and assemble block as fullBlock + header.GasUsed = gasUsed + header.Root = statedb.IntermediateRoot(bc.chainConfig.IsEIP158(header.Number)) + + fullBlock := types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) + + blockHash := fullBlock.Hash() + // manually replace the block hash in the receipts + for i, receipt := range receipts { + // add block location fields + receipt.BlockHash = blockHash + receipt.BlockNumber = tempBlock.Number() + receipt.TransactionIndex = uint(i) + + for _, l := range receipt.Logs { + l.BlockHash = blockHash + } + } + for _, l := range logs { + l.BlockHash = blockHash + } + + return bc.writeBlockAndSetHead(fullBlock, receipts, logs, statedb, false) +} + // insertSideChain is called when an import batch hits upon a pruned ancestor // error, which happens when a sidechain with a sufficiently old fork-block is // found. diff --git a/core/rawdb/accessors_da_syncer.go b/core/rawdb/accessors_da_syncer.go new file mode 100644 index 000000000000..96f816685652 --- /dev/null +++ b/core/rawdb/accessors_da_syncer.go @@ -0,0 +1,39 @@ +package rawdb + +import ( + "math/big" + + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/log" +) + +// WriteDASyncedL1BlockNumber writes the highest synced L1 block number to the database. +func WriteDASyncedL1BlockNumber(db ethdb.KeyValueWriter, L1BlockNumber uint64) { + value := big.NewInt(0).SetUint64(L1BlockNumber).Bytes() + + if err := db.Put(daSyncedL1BlockNumberKey, value); err != nil { + log.Crit("Failed to update DA synced L1 block number", "err", err) + } +} + +// ReadDASyncedL1BlockNumber retrieves the highest synced L1 block number. +func ReadDASyncedL1BlockNumber(db ethdb.Reader) *uint64 { + data, err := db.Get(daSyncedL1BlockNumberKey) + if err != nil && isNotFoundErr(err) { + return nil + } + if err != nil { + log.Crit("Failed to read DA synced L1 block number from database", "err", err) + } + if len(data) == 0 { + return nil + } + + number := new(big.Int).SetBytes(data) + if !number.IsUint64() { + log.Crit("Unexpected DA synced L1 block number in database", "number", number) + } + + value := number.Uint64() + return &value +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 2f8281c83d1d..2e4f2a18c5de 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -125,6 +125,9 @@ var ( numSkippedTransactionsKey = []byte("NumberOfSkippedTransactions") skippedTransactionPrefix = []byte("skip") // skippedTransactionPrefix + tx hash -> skipped transaction skippedTransactionHashPrefix = []byte("sh") // skippedTransactionHashPrefix + index -> tx hash + + // Scroll da syncer store + daSyncedL1BlockNumberKey = []byte("LastDASyncedL1BlockNumber") ) // Use the updated "L1" prefix on all new networks diff --git a/eth/backend.go b/eth/backend.go index 4c7b024f4969..2b6c663d2744 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -56,6 +56,7 @@ import ( "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/rollup/ccc" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rpc" @@ -70,10 +71,12 @@ type Ethereum struct { config *ethconfig.Config // Handlers - txPool *core.TxPool - syncService *sync_service.SyncService - rollupSyncService *rollup_sync_service.RollupSyncService - asyncChecker *ccc.AsyncChecker + txPool *core.TxPool + syncService *sync_service.SyncService + rollupSyncService *rollup_sync_service.RollupSyncService + asyncChecker *ccc.AsyncChecker + syncingPipeline *da_syncer.SyncingPipeline + blockchain *core.BlockChain handler *handler ethDialCandidates enode.Iterator @@ -220,6 +223,18 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthCl } eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain) + // Initialize and start DA syncing pipeline before SyncService as SyncService is blocking until all L1 messages are loaded. + // We need SyncService to load the L1 messages for DA syncing, but since both sync from last known L1 state, we can + // simply let them run simultaneously. If messages are missing in DA syncing, it will be handled by the syncing pipeline + // by waiting and retrying. + if config.EnableDASyncing { + eth.syncingPipeline, err = da_syncer.NewSyncingPipeline(context.Background(), eth.blockchain, chainConfig, eth.chainDb, l1Client, stack.Config().L1DeploymentBlock, config.DA) + if err != nil { + return nil, fmt.Errorf("cannot initialize da syncer: %w", err) + } + eth.syncingPipeline.Start() + } + // initialize and start L1 message sync service eth.syncService, err = sync_service.NewSyncService(context.Background(), chainConfig, stack.Config(), eth.chainDb, l1Client) if err != nil { @@ -257,7 +272,7 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthCl return nil, err } - eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock) + eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock, config.EnableDASyncing) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} @@ -330,6 +345,15 @@ func (s *Ethereum) APIs() []rpc.API { // Append any APIs exposed explicitly by the consensus engine apis = append(apis, s.engine.APIs(s.BlockChain())...) + if !s.config.EnableDASyncing { + apis = append(apis, rpc.API{ + Namespace: "eth", + Version: "1.0", + Service: downloader.NewPublicDownloaderAPI(s.handler.downloader, s.eventMux), + Public: true, + }) + } + // Append all the local APIs and return return append(apis, []rpc.API{ { @@ -342,11 +366,6 @@ func (s *Ethereum) APIs() []rpc.API { Version: "1.0", Service: NewPublicMinerAPI(s), Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: downloader.NewPublicDownloaderAPI(s.handler.downloader, s.eventMux), - Public: true, }, { Namespace: "miner", Version: "1.0", @@ -553,6 +572,11 @@ func (s *Ethereum) SyncService() *sync_service.SyncService { return s.syncServic // Protocols returns all the currently configured // network protocols to start. func (s *Ethereum) Protocols() []p2p.Protocol { + // if DA syncing enabled then we don't create handler + if s.config.EnableDASyncing { + return nil + } + protos := eth.MakeProtocols((*ethHandler)(s.handler), s.networkID, s.ethDialCandidates) if !s.blockchain.Config().Scroll.ZktrieEnabled() && s.config.SnapshotCache > 0 { protos = append(protos, snap.MakeProtocols((*snapHandler)(s.handler), s.snapDialCandidates)...) @@ -577,7 +601,11 @@ func (s *Ethereum) Start() error { // maxPeers -= s.config.LightPeers //} // Start the networking layer and the light server if requested - s.handler.Start(maxPeers) + + // handler is not enabled when DA syncing enabled + if !s.config.EnableDASyncing { + s.handler.Start(maxPeers) + } return nil } @@ -587,7 +615,10 @@ func (s *Ethereum) Stop() error { // Stop all the peer-related stuff first. s.ethDialCandidates.Close() s.snapDialCandidates.Close() - s.handler.Stop() + // handler is not enabled if DA syncing enabled + if !s.config.EnableDASyncing { + s.handler.Stop() + } // Then stop everything else. s.bloomIndexer.Close() @@ -597,6 +628,9 @@ func (s *Ethereum) Stop() error { if s.config.EnableRollupVerify { s.rollupSyncService.Stop() } + if s.config.EnableDASyncing { + s.syncingPipeline.Stop() + } s.miner.Close() if s.config.CheckCircuitCapacity { s.asyncChecker.Wait() diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 5a933a95e5f9..ad295d5de3be 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -37,6 +37,7 @@ import ( "github.com/scroll-tech/go-ethereum/miner" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" ) // FullNodeGPO contains default gasprice oracle settings for full node. @@ -93,6 +94,9 @@ var Defaults = Config{ GPO: FullNodeGPO, RPCTxFeeCap: 1, // 1 ether MaxBlockRange: -1, // Default unconfigured value: no block range limit for backward compatibility + DA: da_syncer.Config{ + FetcherMode: da_syncer.L1RPC, + }, } func init() { @@ -218,6 +222,12 @@ type Config struct { // List of peer ids that take part in the shadow-fork ShadowForkPeerIDs []string + + // Enable syncing node from DA + EnableDASyncing bool + + // DA syncer options + DA da_syncer.Config } // CreateConsensusEngine creates a consensus engine for the given chain configuration. diff --git a/go.mod b/go.mod index 658a06109d0e..2bda32ee00d7 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac + github.com/scroll-tech/da-codec v0.1.2 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 @@ -96,7 +96,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 // indirect + github.com/supranational/blst v0.3.11 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect diff --git a/go.sum b/go.sum index 290418161087..8c96ce6e7cd5 100644 --- a/go.sum +++ b/go.sum @@ -394,6 +394,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac h1:DjLrqjoOLVFug9ZkAbJYwjtYW51YZE0Num3p4cZXaZs= github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= +github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -430,6 +432,7 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0= github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= diff --git a/miner/miner.go b/miner/miner.go index f0920ade1376..e6b1b2ae5d38 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -76,7 +76,7 @@ type Miner struct { wg sync.WaitGroup } -func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(block *types.Block) bool) *Miner { +func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(block *types.Block) bool, daSyncingEnabled bool) *Miner { miner := &Miner{ eth: eth, mux: mux, @@ -84,10 +84,12 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even exitCh: make(chan struct{}), startCh: make(chan common.Address), stopCh: make(chan struct{}), - worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true), + worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true, daSyncingEnabled), + } + if !daSyncingEnabled { + miner.wg.Add(1) + go miner.update() } - miner.wg.Add(1) - go miner.update() return miner } diff --git a/miner/miner_test.go b/miner/miner_test.go index d84c9aea703e..f39700193430 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -276,5 +276,5 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) { // Create event Mux mux := new(event.TypeMux) // Create Miner - return New(backend, &config, chainConfig, mux, engine, nil), mux + return New(backend, &config, chainConfig, mux, engine, nil, false), mux } diff --git a/miner/scroll_worker.go b/miner/scroll_worker.go index ec6337bef406..e152878d40e6 100644 --- a/miner/scroll_worker.go +++ b/miner/scroll_worker.go @@ -177,7 +177,7 @@ type worker struct { skipTxHash common.Hash } -func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { +func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool, daSyncingEnabled bool) *worker { worker := &worker{ config: config, chainConfig: chainConfig, @@ -192,6 +192,12 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus startCh: make(chan struct{}, 1), reorgCh: make(chan reorgTrigger, 1), } + + if daSyncingEnabled { + log.Info("Worker will not start, because DA syncing is enabled") + return worker + } + worker.asyncChecker = ccc.NewAsyncChecker(worker.chain, config.CCCMaxWorkers, false).WithOnFailingBlock(worker.onBlockFailingCCC) // Subscribe NewTxsEvent for tx pool diff --git a/miner/scroll_worker_test.go b/miner/scroll_worker_test.go index 407d508cf819..70ec4a9582d3 100644 --- a/miner/scroll_worker_test.go +++ b/miner/scroll_worker_test.go @@ -208,7 +208,7 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) backend.txPool.AddLocals(pendingTxs) - w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) + w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, false) w.setEtherbase(testBankAddress) return w, backend } diff --git a/node/config.go b/node/config.go index 439b11a2f170..2dc3207dfdca 100644 --- a/node/config.go +++ b/node/config.go @@ -197,6 +197,8 @@ type Config struct { L1Confirmations rpc.BlockNumber `toml:",omitempty"` // L1 bridge deployment block number L1DeploymentBlock uint64 `toml:",omitempty"` + // Is daSyncingEnabled + DaSyncingEnabled bool `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/node/node.go b/node/node.go index ac8c27dde51c..8c02d46f1ccc 100644 --- a/node/node.go +++ b/node/node.go @@ -262,10 +262,15 @@ func (n *Node) doClose(errs []error) error { // openEndpoints starts all network and RPC endpoints. func (n *Node) openEndpoints() error { // start networking endpoints - n.log.Info("Starting peer-to-peer node", "instance", n.server.Name) - if err := n.server.Start(); err != nil { - return convertFileLockError(err) + if !n.config.DaSyncingEnabled { + n.log.Info("Starting peer-to-peer node", "instance", n.server.Name) + if err := n.server.Start(); err != nil { + return convertFileLockError(err) + } + } else { + n.log.Info("Peer-to-peer node will not start, because DA syncing is enabled") } + // start RPC endpoints err := n.startRPC() if err != nil { diff --git a/rollup/da_syncer/batch_queue.go b/rollup/da_syncer/batch_queue.go new file mode 100644 index 000000000000..a0172a86c077 --- /dev/null +++ b/rollup/da_syncer/batch_queue.go @@ -0,0 +1,102 @@ +package da_syncer + +import ( + "context" + "fmt" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" +) + +// BatchQueue is a pipeline stage that reads all batch events from DAQueue and provides only finalized batches to the next stage. +type BatchQueue struct { + DAQueue *DAQueue + db ethdb.Database + lastFinalizedBatchIndex uint64 + batches *common.Heap[da.Entry] + batchesMap *common.ShrinkingMap[uint64, *common.HeapElement[da.Entry]] +} + +func NewBatchQueue(DAQueue *DAQueue, db ethdb.Database) *BatchQueue { + return &BatchQueue{ + DAQueue: DAQueue, + db: db, + lastFinalizedBatchIndex: 0, + batches: common.NewHeap[da.Entry](), + batchesMap: common.NewShrinkingMap[uint64, *common.HeapElement[da.Entry]](1000), + } +} + +// NextBatch finds next finalized batch and returns data, that was committed in that batch +func (bq *BatchQueue) NextBatch(ctx context.Context) (da.Entry, error) { + if batch := bq.getFinalizedBatch(); batch != nil { + return batch, nil + } + + for { + daEntry, err := bq.DAQueue.NextDA(ctx) + if err != nil { + return nil, err + } + switch daEntry.Type() { + case da.CommitBatchV0Type, da.CommitBatchWithBlobType: + bq.addBatch(daEntry) + case da.RevertBatchType: + bq.deleteBatch(daEntry) + case da.FinalizeBatchType: + if daEntry.BatchIndex() > bq.lastFinalizedBatchIndex { + bq.lastFinalizedBatchIndex = daEntry.BatchIndex() + } + + if batch := bq.getFinalizedBatch(); batch != nil { + return batch, nil + } + default: + return nil, fmt.Errorf("unexpected type of daEntry: %T", daEntry) + } + } +} + +// getFinalizedBatch returns next finalized batch if there is available +func (bq *BatchQueue) getFinalizedBatch() da.Entry { + if bq.batches.Len() == 0 { + return nil + } + + batch := bq.batches.Peek().Value() + if batch.BatchIndex() <= bq.lastFinalizedBatchIndex { + bq.deleteBatch(batch) + return batch + } else { + return nil + } +} + +func (bq *BatchQueue) addBatch(batch da.Entry) { + heapElement := bq.batches.Push(batch) + bq.batchesMap.Set(batch.BatchIndex(), heapElement) +} + +// deleteBatch deletes data committed in the batch from map, because this batch is reverted or finalized +// updates DASyncedL1BlockNumber +func (bq *BatchQueue) deleteBatch(batch da.Entry) { + batchHeapElement, exists := bq.batchesMap.Get(batch.BatchIndex()) + if !exists { + return + } + + bq.batchesMap.Delete(batch.BatchIndex()) + bq.batches.Remove(batchHeapElement) + + // we store here min height of currently loaded batches to be able to start syncing from the same place in case of restart + // TODO: we should store this information when the batch is done being processed to avoid inconsistencies + rawdb.WriteDASyncedL1BlockNumber(bq.db, batch.L1BlockNumber()-1) +} + +func (bq *BatchQueue) Reset(height uint64) { + bq.batches.Clear() + bq.batchesMap.Clear() + bq.DAQueue.Reset(height) +} diff --git a/rollup/da_syncer/blob_client/beacon_node_client.go b/rollup/da_syncer/blob_client/beacon_node_client.go new file mode 100644 index 000000000000..5bfd7b9edf6c --- /dev/null +++ b/rollup/da_syncer/blob_client/beacon_node_client.go @@ -0,0 +1,192 @@ +package blob_client + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" +) + +type BeaconNodeClient struct { + apiEndpoint string + l1Client *rollup_sync_service.L1Client + genesisTime uint64 + secondsPerSlot uint64 +} + +var ( + beaconNodeGenesisEndpoint = "/eth/v1/beacon/genesis" + beaconNodeSpecEndpoint = "/eth/v1/config/spec" + beaconNodeBlobEndpoint = "/eth/v1/beacon/blob_sidecars" +) + +func NewBeaconNodeClient(apiEndpoint string, l1Client *rollup_sync_service.L1Client) (*BeaconNodeClient, error) { + // get genesis time + genesisPath, err := url.JoinPath(apiEndpoint, beaconNodeGenesisEndpoint) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + resp, err := http.Get(genesisPath) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("beacon node request failed with status: %s: could not read response body: %w", resp.Status, err) + } + bodyStr := string(body) + return nil, fmt.Errorf("beacon node request failed, status: %s, body: %s", resp.Status, bodyStr) + } + + var genesisResp GenesisResp + err = json.NewDecoder(resp.Body).Decode(&genesisResp) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + genesisTime, err := strconv.ParseUint(genesisResp.Data.GenesisTime, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to decode genesis time %s, err: %w", genesisResp.Data.GenesisTime, err) + } + + // get seconds per slot from spec + specPath, err := url.JoinPath(apiEndpoint, beaconNodeSpecEndpoint) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + resp, err = http.Get(specPath) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("beacon node request failed with status: %s: could not read response body: %w", resp.Status, err) + } + bodyStr := string(body) + return nil, fmt.Errorf("beacon node request failed, status: %s, body: %s", resp.Status, bodyStr) + } + + var specResp SpecResp + err = json.NewDecoder(resp.Body).Decode(&specResp) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + secondsPerSlot, err := strconv.ParseUint(specResp.Data.SecondsPerSlot, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to decode seconds per slot %s, err: %w", specResp.Data.SecondsPerSlot, err) + } + if secondsPerSlot == 0 { + return nil, fmt.Errorf("failed to make new BeaconNodeClient, secondsPerSlot is 0") + } + + return &BeaconNodeClient{ + apiEndpoint: apiEndpoint, + l1Client: l1Client, + genesisTime: genesisTime, + secondsPerSlot: secondsPerSlot, + }, nil +} + +func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { + // get block timestamp to calculate slot + header, err := c.l1Client.GetHeaderByNumber(blockNumber) + if err != nil { + return nil, fmt.Errorf("failed to get header by number, err: %w", err) + } + slot := (header.Time - c.genesisTime) / c.secondsPerSlot + + // get blob sidecar for slot + blobSidecarPath, err := url.JoinPath(c.apiEndpoint, beaconNodeBlobEndpoint, fmt.Sprintf("%d", slot)) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + resp, err := http.Get(blobSidecarPath) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("beacon node request failed with status: %s: could not read response body: %w", resp.Status, err) + } + bodyStr := string(body) + return nil, fmt.Errorf("beacon node request failed, status: %s, body: %s", resp.Status, bodyStr) + } + + var blobSidecarResp BlobSidecarResp + err = json.NewDecoder(resp.Body).Decode(&blobSidecarResp) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + + // find blob with desired versionedHash + for _, blob := range blobSidecarResp.Data { + // calculate blob hash from commitment and check it with desired + commitmentBytes := common.FromHex(blob.KzgCommitment) + if len(commitmentBytes) != lenKZGCommitment { + return nil, fmt.Errorf("len of kzg commitment is not correct, expected: %d, got: %d", lenKZGCommitment, len(commitmentBytes)) + } + commitment := kzg4844.Commitment(commitmentBytes) + blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment) + + if blobVersionedHash == versionedHash { + // found desired blob + blobBytes := common.FromHex(blob.Blob) + if len(blobBytes) != lenBlobBytes { + return nil, fmt.Errorf("len of blob data is not correct, expected: %d, got: %d", lenBlobBytes, len(blobBytes)) + } + + b := kzg4844.Blob(blobBytes) + return &b, nil + } + } + + return nil, fmt.Errorf("missing blob %v in slot %d, block number %d", versionedHash, slot, blockNumber) +} + +type GenesisResp struct { + Data struct { + GenesisTime string `json:"genesis_time"` + } `json:"data"` +} + +type SpecResp struct { + Data struct { + SecondsPerSlot string `json:"SECONDS_PER_SLOT"` + } `json:"data"` +} + +type BlobSidecarResp struct { + Data []struct { + Index string `json:"index"` + Blob string `json:"blob"` + KzgCommitment string `json:"kzg_commitment"` + KzgProof string `json:"kzg_proof"` + SignedBlockHeader struct { + Message struct { + Slot string `json:"slot"` + ProposerIndex string `json:"proposer_index"` + ParentRoot string `json:"parent_root"` + StateRoot string `json:"state_root"` + BodyRoot string `json:"body_root"` + } `json:"message"` + Signature string `json:"signature"` + } `json:"signed_block_header"` + KzgCommitmentInclusionProof []string `json:"kzg_commitment_inclusion_proof"` + } `json:"data"` +} diff --git a/rollup/da_syncer/blob_client/blob_client.go b/rollup/da_syncer/blob_client/blob_client.go new file mode 100644 index 000000000000..814b1d4faf2d --- /dev/null +++ b/rollup/da_syncer/blob_client/blob_client.go @@ -0,0 +1,64 @@ +package blob_client + +import ( + "context" + "errors" + "fmt" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" +) + +const ( + lenBlobBytes int = 131072 + lenKZGCommitment int = 48 +) + +type BlobClient interface { + GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) +} + +type BlobClients struct { + list []BlobClient + curPos int +} + +func NewBlobClients(blobClients ...BlobClient) *BlobClients { + return &BlobClients{ + list: blobClients, + curPos: 0, + } +} + +func (c *BlobClients) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { + if len(c.list) == 0 { + return nil, fmt.Errorf("BlobClients.GetBlobByVersionedHash: list of BlobClients is empty") + } + + for i := 0; i < len(c.list); i++ { + blob, err := c.list[c.curPos].GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, blockNumber) + if err == nil { + return blob, nil + } + c.nextPos() + // there was an error, try the next blob client in following iteration + log.Warn("BlobClients: failed to get blob by versioned hash from BlobClient", "err", err, "blob client pos in BlobClients", c.curPos) + } + + // if we iterated over entire list, return a temporary error that will be handled in syncing_pipeline with a backoff and retry + return nil, serrors.NewTemporaryError(errors.New("BlobClients.GetBlobByVersionedHash: failed to get blob by versioned hash from all BlobClients")) +} + +func (c *BlobClients) nextPos() { + c.curPos = (c.curPos + 1) % len(c.list) +} + +func (c *BlobClients) AddBlobClient(blobClient BlobClient) { + c.list = append(c.list, blobClient) +} + +func (c *BlobClients) Size() int { + return len(c.list) +} diff --git a/rollup/da_syncer/blob_client/blob_scan_client.go b/rollup/da_syncer/blob_client/blob_scan_client.go new file mode 100644 index 000000000000..24b03bed32b9 --- /dev/null +++ b/rollup/da_syncer/blob_client/blob_scan_client.go @@ -0,0 +1,92 @@ +package blob_client + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/common/hexutil" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" +) + +type BlobScanClient struct { + client *http.Client + apiEndpoint string +} + +func NewBlobScanClient(apiEndpoint string) *BlobScanClient { + return &BlobScanClient{ + client: http.DefaultClient, + apiEndpoint: apiEndpoint, + } +} + +func (c *BlobScanClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { + // blobscan api docs https://api.blobscan.com/#/blobs/blob-getByBlobId + path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + req, err := http.NewRequestWithContext(ctx, "GET", path, nil) + if err != nil { + return nil, fmt.Errorf("cannot create request, err: %w", err) + } + req.Header.Set("accept", "application/json") + resp, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("no blob with versioned hash : %s", versionedHash.String()) + } + var res ErrorRespBlobScan + err = json.NewDecoder(resp.Body).Decode(&res) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + return nil, fmt.Errorf("error while fetching blob, message: %s, code: %s, versioned hash: %s", res.Message, res.Code, versionedHash.String()) + } + var result BlobRespBlobScan + + err = json.NewDecoder(resp.Body).Decode(&result) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + blobBytes, err := hex.DecodeString(result.Data[2:]) + if err != nil { + return nil, fmt.Errorf("failed to decode data to bytes, err: %w", err) + } + if len(blobBytes) != lenBlobBytes { + return nil, fmt.Errorf("len of blob data is not correct, expected: %d, got: %d", lenBlobBytes, len(blobBytes)) + } + blob := kzg4844.Blob(blobBytes) + + // sanity check that retrieved blob matches versioned hash + commitment, err := kzg4844.BlobToCommitment(&blob) + if err != nil { + return nil, fmt.Errorf("failed to convert blob to commitment, err: %w", err) + } + + blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment) + if blobVersionedHash != versionedHash { + return nil, fmt.Errorf("blob versioned hash mismatch, expected: %s, got: %s", versionedHash.String(), hexutil.Encode(blobVersionedHash[:])) + } + + return &blob, nil +} + +type BlobRespBlobScan struct { + Data string `json:"data"` +} + +type ErrorRespBlobScan struct { + Message string `json:"message"` + Code string `json:"code"` +} diff --git a/rollup/da_syncer/blob_client/block_native_client.go b/rollup/da_syncer/blob_client/block_native_client.go new file mode 100644 index 000000000000..ddd574d02d10 --- /dev/null +++ b/rollup/da_syncer/blob_client/block_native_client.go @@ -0,0 +1,85 @@ +package blob_client + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/common/hexutil" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" +) + +type BlockNativeClient struct { + apiEndpoint string +} + +func NewBlockNativeClient(apiEndpoint string) *BlockNativeClient { + return &BlockNativeClient{ + apiEndpoint: apiEndpoint, + } +} + +func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { + // blocknative api docs https://docs.blocknative.com/blocknative-data-archive/blob-archive + path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + resp, err := http.Get(path) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var res ErrorRespBlockNative + err = json.NewDecoder(resp.Body).Decode(&res) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + return nil, fmt.Errorf("error while fetching blob, message: %s, code: %d, versioned hash: %s", res.Error.Message, res.Error.Code, versionedHash.String()) + } + var result BlobRespBlockNative + err = json.NewDecoder(resp.Body).Decode(&result) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + blobBytes, err := hex.DecodeString(result.Blob.Data[2:]) + if err != nil { + return nil, fmt.Errorf("failed to decode data to bytes, err: %w", err) + } + if len(blobBytes) != lenBlobBytes { + return nil, fmt.Errorf("len of blob data is not correct, expected: %d, got: %d", lenBlobBytes, len(blobBytes)) + } + blob := kzg4844.Blob(blobBytes) + + // sanity check that retrieved blob matches versioned hash + commitment, err := kzg4844.BlobToCommitment(&blob) + if err != nil { + return nil, fmt.Errorf("failed to convert blob to commitment, err: %w", err) + } + + blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment) + if blobVersionedHash != versionedHash { + return nil, fmt.Errorf("blob versioned hash mismatch, expected: %s, got: %s", versionedHash.String(), hexutil.Encode(blobVersionedHash[:])) + } + + return &blob, nil +} + +type BlobRespBlockNative struct { + Blob struct { + Data string `json:"data"` + } `json:"blob"` +} + +type ErrorRespBlockNative struct { + Error struct { + Code int `json:"code"` + Message string `json:"message"` + } `json:"error"` +} diff --git a/rollup/da_syncer/block_queue.go b/rollup/da_syncer/block_queue.go new file mode 100644 index 000000000000..a122d41ab356 --- /dev/null +++ b/rollup/da_syncer/block_queue.go @@ -0,0 +1,56 @@ +package da_syncer + +import ( + "context" + "fmt" + + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" +) + +// BlockQueue is a pipeline stage that reads batches from BatchQueue, extracts all da.PartialBlock from it and +// provides them to the next stage one-by-one. +type BlockQueue struct { + batchQueue *BatchQueue + blocks []*da.PartialBlock +} + +func NewBlockQueue(batchQueue *BatchQueue) *BlockQueue { + return &BlockQueue{ + batchQueue: batchQueue, + blocks: make([]*da.PartialBlock, 0), + } +} + +func (bq *BlockQueue) NextBlock(ctx context.Context) (*da.PartialBlock, error) { + for len(bq.blocks) == 0 { + err := bq.getBlocksFromBatch(ctx) + if err != nil { + return nil, err + } + } + block := bq.blocks[0] + bq.blocks = bq.blocks[1:] + return block, nil +} + +func (bq *BlockQueue) getBlocksFromBatch(ctx context.Context) error { + daEntry, err := bq.batchQueue.NextBatch(ctx) + if err != nil { + return err + } + + entryWithBlocks, ok := daEntry.(da.EntryWithBlocks) + // this should never happen because we only receive CommitBatch entries + if !ok { + return fmt.Errorf("unexpected type of daEntry: %T", daEntry) + } + + bq.blocks = entryWithBlocks.Blocks() + + return nil +} + +func (bq *BlockQueue) Reset(height uint64) { + bq.blocks = make([]*da.PartialBlock, 0) + bq.batchQueue.Reset(height) +} diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go new file mode 100644 index 000000000000..47eabfceb65f --- /dev/null +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -0,0 +1,246 @@ +package da + +import ( + "context" + "errors" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" +) + +const ( + callDataBlobSourceFetchBlockRange uint64 = 500 + commitBatchEventName = "CommitBatch" + revertBatchEventName = "RevertBatch" + finalizeBatchEventName = "FinalizeBatch" + commitBatchMethodName = "commitBatch" + commitBatchWithBlobProofMethodName = "commitBatchWithBlobProof" + + // the length of method ID at the beginning of transaction data + methodIDLength = 4 +) + +var ( + ErrSourceExhausted = errors.New("data source has been exhausted") +) + +type CalldataBlobSource struct { + ctx context.Context + l1Client *rollup_sync_service.L1Client + blobClient blob_client.BlobClient + l1height uint64 + scrollChainABI *abi.ABI + l1CommitBatchEventSignature common.Hash + l1RevertBatchEventSignature common.Hash + l1FinalizeBatchEventSignature common.Hash + db ethdb.Database + + l1Finalized uint64 +} + +func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) (*CalldataBlobSource, error) { + scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() + if err != nil { + return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) + } + return &CalldataBlobSource{ + ctx: ctx, + l1Client: l1Client, + blobClient: blobClient, + l1height: l1height, + scrollChainABI: scrollChainABI, + l1CommitBatchEventSignature: scrollChainABI.Events[commitBatchEventName].ID, + l1RevertBatchEventSignature: scrollChainABI.Events[revertBatchEventName].ID, + l1FinalizeBatchEventSignature: scrollChainABI.Events[finalizeBatchEventName].ID, + db: db, + }, nil +} + +func (ds *CalldataBlobSource) NextData() (Entries, error) { + var err error + to := ds.l1height + callDataBlobSourceFetchBlockRange + + // If there's not enough finalized blocks to request up to, we need to query finalized block number. + // Otherwise, we know that there's more finalized blocks than we want to request up to + // -> no need to query finalized block number + if to > ds.l1Finalized { + ds.l1Finalized, err = ds.l1Client.GetLatestFinalizedBlockNumber() + if err != nil { + return nil, serrors.NewTemporaryError(fmt.Errorf("failed to query GetLatestFinalizedBlockNumber, error: %v", err)) + } + // make sure we don't request more than finalized blocks + to = min(to, ds.l1Finalized) + } + + if ds.l1height > to { + return nil, ErrSourceExhausted + } + + logs, err := ds.l1Client.FetchRollupEventsInRange(ds.l1height, to) + if err != nil { + return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get events, l1height: %d, error: %v", ds.l1height, err)) + } + da, err := ds.processLogsToDA(logs) + if err != nil { + return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process logs to DA, error: %v", err)) + } + + ds.l1height = to + 1 + return da, nil +} + +func (ds *CalldataBlobSource) L1Height() uint64 { + return ds.l1height +} + +func (ds *CalldataBlobSource) processLogsToDA(logs []types.Log) (Entries, error) { + var entries Entries + var entry Entry + var err error + + for _, vLog := range logs { + switch vLog.Topics[0] { + case ds.l1CommitBatchEventSignature: + event := &rollup_sync_service.L1CommitBatchEvent{} + if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, commitBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) + } + + batchIndex := event.BatchIndex.Uint64() + log.Trace("found new CommitBatch event", "batch index", batchIndex) + + if entry, err = ds.getCommitBatchDA(batchIndex, &vLog); err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", batchIndex, err) + } + + case ds.l1RevertBatchEventSignature: + event := &rollup_sync_service.L1RevertBatchEvent{} + if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, revertBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) + } + + batchIndex := event.BatchIndex.Uint64() + log.Trace("found new RevertBatchType event", "batch index", batchIndex) + entry = NewRevertBatch(batchIndex) + + case ds.l1FinalizeBatchEventSignature: + event := &rollup_sync_service.L1FinalizeBatchEvent{} + if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, finalizeBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) + } + + batchIndex := event.BatchIndex.Uint64() + log.Trace("found new FinalizeBatchType event", "batch index", event.BatchIndex.Uint64()) + entry = NewFinalizeBatch(batchIndex) + + default: + return nil, fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + } + + entries = append(entries, entry) + } + return entries, nil +} + +type commitBatchArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte +} + +func newCommitBatchArgs(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { + var args commitBatchArgs + err := method.Inputs.Copy(&args, values) + return &args, err +} + +func newCommitBatchArgsFromCommitBatchWithProof(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { + var args commitBatchWithBlobProofArgs + err := method.Inputs.Copy(&args, values) + if err != nil { + return nil, err + } + return &commitBatchArgs{ + Version: args.Version, + ParentBatchHeader: args.ParentBatchHeader, + Chunks: args.Chunks, + SkippedL1MessageBitmap: args.SkippedL1MessageBitmap, + }, nil +} + +type commitBatchWithBlobProofArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte + BlobDataProof []byte +} + +func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Log) (Entry, error) { + if batchIndex == 0 { + return NewCommitBatchDAV0Empty(), nil + } + + txData, err := ds.l1Client.FetchTxData(vLog) + if err != nil { + return nil, fmt.Errorf("failed to fetch tx data, tx hash: %v, err: %w", vLog.TxHash.Hex(), err) + } + if len(txData) < methodIDLength { + return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + } + + method, err := ds.scrollChainABI.MethodById(txData[:methodIDLength]) + if err != nil { + return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) + } + values, err := method.Inputs.Unpack(txData[methodIDLength:]) + if err != nil { + return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) + } + if method.Name == commitBatchMethodName { + args, err := newCommitBatchArgs(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) + } + codecVersion := encoding.CodecVersion(args.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) + } + switch args.Version { + case 0: + return NewCommitBatchDAV0(ds.db, codec, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) + case 1, 2: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + default: + return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + } + } else if method.Name == commitBatchWithBlobProofMethodName { + args, err := newCommitBatchArgsFromCommitBatchWithProof(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) + } + codecVersion := encoding.CodecVersion(args.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) + } + switch args.Version { + case 3, 4: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + default: + return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + } + } + + return nil, fmt.Errorf("unknown method name: %s", method.Name) +} diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go new file mode 100644 index 000000000000..135a76d79518 --- /dev/null +++ b/rollup/da_syncer/da/commitV0.go @@ -0,0 +1,172 @@ +package da + +import ( + "encoding/binary" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" +) + +type CommitBatchDAV0 struct { + version uint8 + batchIndex uint64 + parentTotalL1MessagePopped uint64 + skippedL1MessageBitmap []byte + chunks []*encoding.DAChunkRawTx + l1Txs []*types.L1MessageTx + + l1BlockNumber uint64 +} + +func NewCommitBatchDAV0(db ethdb.Database, + codec encoding.Codec, + version uint8, + batchIndex uint64, + parentBatchHeader []byte, + chunks [][]byte, + skippedL1MessageBitmap []byte, + l1BlockNumber uint64, +) (*CommitBatchDAV0, error) { + decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) + if err != nil { + return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", batchIndex, err) + } + + return NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, l1BlockNumber) +} + +func NewCommitBatchDAV0WithChunks(db ethdb.Database, + version uint8, + batchIndex uint64, + parentBatchHeader []byte, + decodedChunks []*encoding.DAChunkRawTx, + skippedL1MessageBitmap []byte, + l1BlockNumber uint64, +) (*CommitBatchDAV0, error) { + parentTotalL1MessagePopped := getBatchTotalL1MessagePopped(parentBatchHeader) + l1Txs, err := getL1Messages(db, parentTotalL1MessagePopped, skippedL1MessageBitmap, getTotalMessagesPoppedFromChunks(decodedChunks)) + if err != nil { + return nil, fmt.Errorf("failed to get L1 messages for v0 batch %d: %w", batchIndex, err) + } + + return &CommitBatchDAV0{ + version: version, + batchIndex: batchIndex, + parentTotalL1MessagePopped: parentTotalL1MessagePopped, + skippedL1MessageBitmap: skippedL1MessageBitmap, + chunks: decodedChunks, + l1Txs: l1Txs, + l1BlockNumber: l1BlockNumber, + }, nil +} + +func NewCommitBatchDAV0Empty() *CommitBatchDAV0 { + return &CommitBatchDAV0{ + batchIndex: 0, + } +} + +func (c *CommitBatchDAV0) Type() Type { + return CommitBatchV0Type +} + +func (c *CommitBatchDAV0) L1BlockNumber() uint64 { + return c.l1BlockNumber +} + +func (c *CommitBatchDAV0) BatchIndex() uint64 { + return c.batchIndex +} + +func (c *CommitBatchDAV0) CompareTo(other Entry) int { + if c.BatchIndex() < other.BatchIndex() { + return -1 + } else if c.BatchIndex() > other.BatchIndex() { + return 1 + } + return 0 +} + +func (c *CommitBatchDAV0) Blocks() []*PartialBlock { + var blocks []*PartialBlock + l1TxPointer := 0 + + curL1TxIndex := c.parentTotalL1MessagePopped + for _, chunk := range c.chunks { + for blockId, daBlock := range chunk.Blocks { + // create txs + txs := make(types.Transactions, 0, daBlock.NumTransactions()) + // insert l1 msgs + for l1TxPointer < len(c.l1Txs) && c.l1Txs[l1TxPointer].QueueIndex < curL1TxIndex+uint64(daBlock.NumL1Messages()) { + l1Tx := types.NewTx(c.l1Txs[l1TxPointer]) + txs = append(txs, l1Tx) + l1TxPointer++ + } + curL1TxIndex += uint64(daBlock.NumL1Messages()) + + // insert l2 txs + txs = append(txs, chunk.Transactions[blockId]...) + + block := NewPartialBlock( + &PartialHeader{ + Number: daBlock.Number(), + Time: daBlock.Timestamp(), + BaseFee: daBlock.BaseFee(), + GasLimit: daBlock.GasLimit(), + Difficulty: 10, // TODO: replace with real difficulty + ExtraData: []byte{1, 2, 3, 4, 5, 6, 7, 8}, // TODO: replace with real extra data + }, + txs) + blocks = append(blocks, block) + } + } + + return blocks +} + +func getTotalMessagesPoppedFromChunks(decodedChunks []*encoding.DAChunkRawTx) int { + totalL1MessagePopped := 0 + for _, chunk := range decodedChunks { + for _, block := range chunk.Blocks { + totalL1MessagePopped += int(block.NumL1Messages()) + } + } + return totalL1MessagePopped +} + +func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skippedBitmap []byte, totalL1MessagePopped int) ([]*types.L1MessageTx, error) { + var txs []*types.L1MessageTx + decodedSkippedBitmap, err := encoding.DecodeBitmap(skippedBitmap, totalL1MessagePopped) + if err != nil { + return nil, fmt.Errorf("failed to decode skipped message bitmap: err: %w", err) + } + + // get all necessary l1 messages without skipped + currentIndex := parentTotalL1MessagePopped + for index := 0; index < totalL1MessagePopped; index++ { + if encoding.IsL1MessageSkipped(decodedSkippedBitmap, currentIndex-parentTotalL1MessagePopped) { + currentIndex++ + continue + } + l1Tx := rawdb.ReadL1Message(db, currentIndex) + if l1Tx == nil { + // message not yet available + // we return serrors.EOFError as this will be handled in the syncing pipeline with a backoff and retry + return nil, serrors.EOFError + } + txs = append(txs, l1Tx) + currentIndex++ + } + + return txs, nil +} + +func getBatchTotalL1MessagePopped(data []byte) uint64 { + // total l1 message popped stored in bytes from 17 to 24, accordingly to codec spec + return binary.BigEndian.Uint64(data[17:25]) +} diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go new file mode 100644 index 000000000000..4670eec8bbcb --- /dev/null +++ b/rollup/da_syncer/da/commitV1.go @@ -0,0 +1,82 @@ +package da + +import ( + "context" + "crypto/sha256" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/ethdb" +) + +type CommitBatchDAV1 struct { + *CommitBatchDAV0 +} + +func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, + codec encoding.Codec, + l1Client *rollup_sync_service.L1Client, + blobClient blob_client.BlobClient, + vLog *types.Log, + version uint8, + batchIndex uint64, + parentBatchHeader []byte, + chunks [][]byte, + skippedL1MessageBitmap []byte, +) (*CommitBatchDAV1, error) { + decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) + if err != nil { + return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", batchIndex, err) + } + + versionedHash, err := l1Client.FetchTxBlobHash(vLog) + if err != nil { + return nil, fmt.Errorf("failed to fetch blob hash, err: %w", err) + } + + blob, err := blobClient.GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, vLog.BlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to fetch blob from blob client, err: %w", err) + } + if blob == nil { + return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", batchIndex, versionedHash.String(), blobClient) + } + + // compute blob versioned hash and compare with one from tx + c, err := kzg4844.BlobToCommitment(blob) + if err != nil { + return nil, fmt.Errorf("failed to create blob commitment") + } + blobVersionedHash := common.Hash(kzg4844.CalcBlobHashV1(sha256.New(), &c)) + if blobVersionedHash != versionedHash { + return nil, fmt.Errorf("blobVersionedHash from blob source is not equal to versionedHash from tx, correct versioned hash: %s, fetched blob hash: %s", versionedHash.String(), blobVersionedHash.String()) + } + + // decode txs from blob + err = codec.DecodeTxsFromBlob(blob, decodedChunks) + if err != nil { + return nil, fmt.Errorf("failed to decode txs from blob: %w", err) + } + + if decodedChunks == nil { + return nil, fmt.Errorf("decodedChunks is nil after decoding") + } + + v0, err := NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, vLog.BlockNumber) + if err != nil { + return nil, err + } + + return &CommitBatchDAV1{v0}, nil +} + +func (c *CommitBatchDAV1) Type() Type { + return CommitBatchWithBlobType +} diff --git a/rollup/da_syncer/da/da.go b/rollup/da_syncer/da/da.go new file mode 100644 index 000000000000..1ad618d7ba3d --- /dev/null +++ b/rollup/da_syncer/da/da.go @@ -0,0 +1,69 @@ +package da + +import ( + "math/big" + + "github.com/scroll-tech/go-ethereum/core/types" +) + +type Type int + +const ( + // CommitBatchV0Type contains data of event of CommitBatchV0Type + CommitBatchV0Type Type = iota + // CommitBatchWithBlobType contains data of event of CommitBatchWithBlobType (v1, v2, v3, v4) + CommitBatchWithBlobType + // RevertBatchType contains data of event of RevertBatchType + RevertBatchType + // FinalizeBatchType contains data of event of FinalizeBatchType + FinalizeBatchType +) + +// Entry represents a single DA event (commit, revert, finalize). +type Entry interface { + Type() Type + BatchIndex() uint64 + L1BlockNumber() uint64 + CompareTo(Entry) int +} + +type EntryWithBlocks interface { + Entry + Blocks() []*PartialBlock +} + +type Entries []Entry + +// PartialHeader represents a partial header (from DA) of a block. +type PartialHeader struct { + Number uint64 + Time uint64 + BaseFee *big.Int + GasLimit uint64 + Difficulty uint64 + ExtraData []byte +} + +func (h *PartialHeader) ToHeader() *types.Header { + return &types.Header{ + Number: big.NewInt(0).SetUint64(h.Number), + Time: h.Time, + BaseFee: h.BaseFee, + GasLimit: h.GasLimit, + Difficulty: new(big.Int).SetUint64(h.Difficulty), + Extra: h.ExtraData, + } +} + +// PartialBlock represents a partial block (from DA). +type PartialBlock struct { + PartialHeader *PartialHeader + Transactions types.Transactions +} + +func NewPartialBlock(partialHeader *PartialHeader, txs types.Transactions) *PartialBlock { + return &PartialBlock{ + PartialHeader: partialHeader, + Transactions: txs, + } +} diff --git a/rollup/da_syncer/da/finalize.go b/rollup/da_syncer/da/finalize.go new file mode 100644 index 000000000000..14d6c2a644cb --- /dev/null +++ b/rollup/da_syncer/da/finalize.go @@ -0,0 +1,34 @@ +package da + +type FinalizeBatch struct { + batchIndex uint64 + + l1BlockNumber uint64 +} + +func NewFinalizeBatch(batchIndex uint64) *FinalizeBatch { + return &FinalizeBatch{ + batchIndex: batchIndex, + } +} + +func (f *FinalizeBatch) Type() Type { + return FinalizeBatchType +} + +func (f *FinalizeBatch) L1BlockNumber() uint64 { + return f.l1BlockNumber +} + +func (f *FinalizeBatch) BatchIndex() uint64 { + return f.batchIndex +} + +func (f *FinalizeBatch) CompareTo(other Entry) int { + if f.BatchIndex() < other.BatchIndex() { + return -1 + } else if f.BatchIndex() > other.BatchIndex() { + return 1 + } + return 0 +} diff --git a/rollup/da_syncer/da/revert.go b/rollup/da_syncer/da/revert.go new file mode 100644 index 000000000000..d84f22ebaa7b --- /dev/null +++ b/rollup/da_syncer/da/revert.go @@ -0,0 +1,33 @@ +package da + +type RevertBatch struct { + batchIndex uint64 + + l1BlockNumber uint64 +} + +func NewRevertBatch(batchIndex uint64) *RevertBatch { + return &RevertBatch{ + batchIndex: batchIndex, + } +} + +func (r *RevertBatch) Type() Type { + return RevertBatchType +} + +func (r *RevertBatch) L1BlockNumber() uint64 { + return r.l1BlockNumber +} +func (r *RevertBatch) BatchIndex() uint64 { + return r.batchIndex +} + +func (r *RevertBatch) CompareTo(other Entry) int { + if r.BatchIndex() < other.BatchIndex() { + return -1 + } else if r.BatchIndex() > other.BatchIndex() { + return 1 + } + return 0 +} diff --git a/rollup/da_syncer/da_queue.go b/rollup/da_syncer/da_queue.go new file mode 100644 index 000000000000..64673a4a646b --- /dev/null +++ b/rollup/da_syncer/da_queue.go @@ -0,0 +1,70 @@ +package da_syncer + +import ( + "context" + "errors" + + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" +) + +// DAQueue is a pipeline stage that reads DA entries from a DataSource and provides them to the next stage. +type DAQueue struct { + l1height uint64 + dataSourceFactory *DataSourceFactory + dataSource DataSource + da da.Entries +} + +func NewDAQueue(l1height uint64, dataSourceFactory *DataSourceFactory) *DAQueue { + return &DAQueue{ + l1height: l1height, + dataSourceFactory: dataSourceFactory, + dataSource: nil, + da: make(da.Entries, 0), + } +} + +func (dq *DAQueue) NextDA(ctx context.Context) (da.Entry, error) { + for len(dq.da) == 0 { + err := dq.getNextData(ctx) + if err != nil { + return nil, err + } + } + daEntry := dq.da[0] + dq.da = dq.da[1:] + return daEntry, nil +} + +func (dq *DAQueue) getNextData(ctx context.Context) error { + var err error + if dq.dataSource == nil { + dq.dataSource, err = dq.dataSourceFactory.OpenDataSource(ctx, dq.l1height) + if err != nil { + return err + } + } + + dq.da, err = dq.dataSource.NextData() + if err == nil { + return nil + } + + // previous dataSource has been exhausted, create new + if errors.Is(err, da.ErrSourceExhausted) { + dq.l1height = dq.dataSource.L1Height() + dq.dataSource = nil + + // we return EOFError to be handled in pipeline + return serrors.EOFError + } + + return err +} + +func (dq *DAQueue) Reset(height uint64) { + dq.l1height = height + dq.dataSource = nil + dq.da = make(da.Entries, 0) +} diff --git a/rollup/da_syncer/da_syncer.go b/rollup/da_syncer/da_syncer.go new file mode 100644 index 000000000000..c3c223ff22a9 --- /dev/null +++ b/rollup/da_syncer/da_syncer.go @@ -0,0 +1,49 @@ +package da_syncer + +import ( + "fmt" + + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" +) + +var ( + ErrBlockTooLow = fmt.Errorf("block number is too low") + ErrBlockTooHigh = fmt.Errorf("block number is too high") +) + +type DASyncer struct { + blockchain *core.BlockChain +} + +func NewDASyncer(blockchain *core.BlockChain) *DASyncer { + return &DASyncer{ + blockchain: blockchain, + } +} + +// SyncOneBlock receives a PartialBlock, makes sure it's the next block in the chain, executes it and inserts it to the blockchain. +func (s *DASyncer) SyncOneBlock(block *da.PartialBlock) error { + currentBlock := s.blockchain.CurrentBlock() + + // we expect blocks to be consecutive. block.PartialHeader.Number == parentBlock.Number+1. + if block.PartialHeader.Number <= currentBlock.Number.Uint64() { + log.Debug("block number is too low", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number.Uint64()) + return ErrBlockTooLow + } else if block.PartialHeader.Number > currentBlock.Number.Uint64()+1 { + log.Debug("block number is too high", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number.Uint64()) + return ErrBlockTooHigh + } + + parentBlock := s.blockchain.GetBlockByNumber(currentBlock.Number.Uint64()) + if _, err := s.blockchain.BuildAndWriteBlock(parentBlock, block.PartialHeader.ToHeader(), block.Transactions); err != nil { + return fmt.Errorf("failed building and writing block, number: %d, error: %v", block.PartialHeader.Number, err) + } + + if s.blockchain.CurrentBlock().Number.Uint64()%1000 == 0 { + log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number.Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root) + } + + return nil +} diff --git a/rollup/da_syncer/data_source.go b/rollup/da_syncer/data_source.go new file mode 100644 index 000000000000..f417d09af00e --- /dev/null +++ b/rollup/da_syncer/data_source.go @@ -0,0 +1,44 @@ +package da_syncer + +import ( + "context" + "errors" + + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" +) + +type DataSource interface { + NextData() (da.Entries, error) + L1Height() uint64 +} + +type DataSourceFactory struct { + config Config + genesisConfig *params.ChainConfig + l1Client *rollup_sync_service.L1Client + blobClient blob_client.BlobClient + db ethdb.Database +} + +func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.ChainConfig, config Config, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) *DataSourceFactory { + return &DataSourceFactory{ + config: config, + genesisConfig: genesisConfig, + l1Client: l1Client, + blobClient: blobClient, + db: db, + } +} + +func (ds *DataSourceFactory) OpenDataSource(ctx context.Context, l1height uint64) (DataSource, error) { + if ds.config.FetcherMode == L1RPC { + return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) + } else { + return nil, errors.New("snapshot_data_source: not implemented") + } +} diff --git a/rollup/da_syncer/modes.go b/rollup/da_syncer/modes.go new file mode 100644 index 000000000000..bfcc1d1dfba0 --- /dev/null +++ b/rollup/da_syncer/modes.go @@ -0,0 +1,52 @@ +package da_syncer + +import "fmt" + +// FetcherMode represents the mode of fetcher +type FetcherMode int + +const ( + // L1RPC mode fetches DA from L1RPC + L1RPC FetcherMode = iota + // Snapshot mode loads DA from snapshot file + Snapshot +) + +func (mode FetcherMode) IsValid() bool { + return mode >= L1RPC && mode <= Snapshot +} + +// String implements the stringer interface. +func (mode FetcherMode) String() string { + switch mode { + case L1RPC: + return "l1rpc" + case Snapshot: + return "snapshot" + default: + return "unknown" + } +} + +func (mode FetcherMode) MarshalText() ([]byte, error) { + switch mode { + case L1RPC: + return []byte("l1rpc"), nil + case Snapshot: + return []byte("snapshot"), nil + default: + return nil, fmt.Errorf("unknown sync mode %d", mode) + } +} + +func (mode *FetcherMode) UnmarshalText(text []byte) error { + switch string(text) { + case "l1rpc": + *mode = L1RPC + case "snapshot": + *mode = Snapshot + default: + return fmt.Errorf(`unknown sync mode %q, want "l1rpc" or "snapshot"`, text) + } + return nil +} diff --git a/rollup/da_syncer/serrors/errors.go b/rollup/da_syncer/serrors/errors.go new file mode 100644 index 000000000000..aa0426f0771d --- /dev/null +++ b/rollup/da_syncer/serrors/errors.go @@ -0,0 +1,62 @@ +package serrors + +import ( + "fmt" +) + +const ( + temporary Type = iota + eof +) + +var ( + TemporaryError = NewTemporaryError(nil) + EOFError = NewEOFError(nil) +) + +type Type uint8 + +func (t Type) String() string { + switch t { + case temporary: + return "temporary" + case eof: + return "EOF" + default: + return "unknown" + } +} + +type syncError struct { + t Type + err error +} + +func NewTemporaryError(err error) error { + return &syncError{t: temporary, err: err} +} + +func NewEOFError(err error) error { + return &syncError{t: eof, err: err} +} + +func (s *syncError) Error() string { + return fmt.Sprintf("%s: %v", s.t, s.err) +} + +func (s *syncError) Unwrap() error { + return s.err +} + +func (s *syncError) Is(target error) bool { + if target == nil { + return s == nil + } + + targetSyncErr, ok := target.(*syncError) + if !ok { + return false + } + + return s.t == targetSyncErr.t +} diff --git a/rollup/da_syncer/syncing_pipeline.go b/rollup/da_syncer/syncing_pipeline.go new file mode 100644 index 000000000000..6795f2608e05 --- /dev/null +++ b/rollup/da_syncer/syncing_pipeline.go @@ -0,0 +1,233 @@ +package da_syncer + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/scroll-tech/go-ethereum/common/backoff" + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/sync_service" +) + +// Config is the configuration parameters of data availability syncing. +type Config struct { + FetcherMode FetcherMode // mode of fetcher + SnapshotFilePath string // path to snapshot file + BlobScanAPIEndpoint string // BlobScan blob api endpoint + BlockNativeAPIEndpoint string // BlockNative blob api endpoint + BeaconNodeAPIEndpoint string // Beacon node api endpoint +} + +// SyncingPipeline is a derivation pipeline for syncing data from L1 and DA and transform it into +// L2 blocks and chain. +type SyncingPipeline struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + expBackoff *backoff.Exponential + + l1DeploymentBlock uint64 + + db ethdb.Database + blockchain *core.BlockChain + blockQueue *BlockQueue + daSyncer *DASyncer +} + +func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesisConfig *params.ChainConfig, db ethdb.Database, ethClient sync_service.EthClient, l1DeploymentBlock uint64, config Config) (*SyncingPipeline, error) { + scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() + if err != nil { + return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) + } + + l1Client, err := rollup_sync_service.NewL1Client(ctx, ethClient, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) + if err != nil { + return nil, err + } + + blobClientList := blob_client.NewBlobClients() + if config.BeaconNodeAPIEndpoint != "" { + beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint, l1Client) + if err != nil { + log.Warn("failed to create BeaconNodeClient", "err", err) + } else { + blobClientList.AddBlobClient(beaconNodeClient) + } + } + if config.BlobScanAPIEndpoint != "" { + blobClientList.AddBlobClient(blob_client.NewBlobScanClient(config.BlobScanAPIEndpoint)) + } + if config.BlockNativeAPIEndpoint != "" { + blobClientList.AddBlobClient(blob_client.NewBlockNativeClient(config.BlockNativeAPIEndpoint)) + } + if blobClientList.Size() == 0 { + return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") + } + + dataSourceFactory := NewDataSourceFactory(blockchain, genesisConfig, config, l1Client, blobClientList, db) + syncedL1Height := l1DeploymentBlock - 1 + from := rawdb.ReadDASyncedL1BlockNumber(db) + if from != nil { + syncedL1Height = *from + } + + daQueue := NewDAQueue(syncedL1Height, dataSourceFactory) + batchQueue := NewBatchQueue(daQueue, db) + blockQueue := NewBlockQueue(batchQueue) + daSyncer := NewDASyncer(blockchain) + + ctx, cancel := context.WithCancel(ctx) + return &SyncingPipeline{ + ctx: ctx, + cancel: cancel, + expBackoff: backoff.NewExponential(100*time.Millisecond, 10*time.Second, 100*time.Millisecond), + wg: sync.WaitGroup{}, + l1DeploymentBlock: l1DeploymentBlock, + db: db, + blockchain: blockchain, + blockQueue: blockQueue, + daSyncer: daSyncer, + }, nil +} + +func (s *SyncingPipeline) Step() error { + block, err := s.blockQueue.NextBlock(s.ctx) + if err != nil { + return err + } + err = s.daSyncer.SyncOneBlock(block) + return err +} + +func (s *SyncingPipeline) Start() { + log.Info("sync from DA: starting pipeline") + + s.wg.Add(1) + go func() { + s.mainLoop() + s.wg.Done() + }() +} + +func (s *SyncingPipeline) mainLoop() { + stepCh := make(chan struct{}, 1) + var delayedStepCh <-chan time.Time + var resetCounter int + var tempErrorCounter int + + // reqStep is a helper function to request a step to be executed. + // If delay is true, it will request a delayed step with exponential backoff, otherwise it will request an immediate step. + reqStep := func(delay bool) { + if delay { + if delayedStepCh == nil { + delayDur := s.expBackoff.NextDuration() + delayedStepCh = time.After(delayDur) + log.Debug("requesting delayed step", "delay", delayDur, "attempt", s.expBackoff.Attempt()) + } else { + log.Debug("ignoring step request because of ongoing delayed step", "attempt", s.expBackoff.Attempt()) + } + } else { + select { + case stepCh <- struct{}{}: + default: + } + } + } + + // start pipeline + reqStep(false) + + for { + select { + case <-s.ctx.Done(): + return + default: + } + + select { + case <-s.ctx.Done(): + return + case <-delayedStepCh: + delayedStepCh = nil + reqStep(false) + case <-stepCh: + err := s.Step() + if err == nil { + // step succeeded, reset exponential backoff and continue + reqStep(false) + s.expBackoff.Reset() + resetCounter = 0 + tempErrorCounter = 0 + continue + } + + if errors.Is(err, serrors.EOFError) { + // pipeline is empty, request a delayed step + // TODO: eventually (with state manager) this should not trigger a delayed step because external events will trigger a new step anyway + reqStep(true) + tempErrorCounter = 0 + continue + } else if errors.Is(err, serrors.TemporaryError) { + log.Warn("syncing pipeline step failed due to temporary error, retrying", "err", err) + if tempErrorCounter > 100 { + log.Warn("syncing pipeline step failed due to 100 consecutive temporary errors, stopping pipeline worker", "last err", err) + return + } + + // temporary error, request a delayed step + reqStep(true) + tempErrorCounter++ + continue + } else if errors.Is(err, ErrBlockTooLow) { + // block number returned by the block queue is too low, + // we skip the blocks until we reach the correct block number again. + reqStep(false) + tempErrorCounter = 0 + continue + } else if errors.Is(err, ErrBlockTooHigh) { + // block number returned by the block queue is too high, + // reset the pipeline and move backwards from the last L1 block we read + s.reset(resetCounter) + resetCounter++ + reqStep(false) + tempErrorCounter = 0 + continue + } else if errors.Is(err, context.Canceled) { + log.Info("syncing pipeline stopped due to cancelled context", "err", err) + return + } + + log.Warn("syncing pipeline step failed due to unrecoverable error, stopping pipeline worker", "err", err) + return + } + } +} + +func (s *SyncingPipeline) Stop() { + log.Info("sync from DA: stopping pipeline...") + s.cancel() + s.wg.Wait() + log.Info("sync from DA: stopping pipeline... done") +} + +func (s *SyncingPipeline) reset(resetCounter int) { + amount := 100 * uint64(resetCounter) + syncedL1Height := s.l1DeploymentBlock - 1 + from := rawdb.ReadDASyncedL1BlockNumber(s.db) + if from != nil && *from+amount > syncedL1Height { + syncedL1Height = *from - amount + rawdb.WriteDASyncedL1BlockNumber(s.db, syncedL1Height) + } + log.Info("resetting syncing pipeline", "syncedL1Height", syncedL1Height) + s.blockQueue.Reset(syncedL1Height) +} diff --git a/rollup/rollup_sync_service/abi.go b/rollup/rollup_sync_service/abi.go index 6975001f1870..428413dec9c2 100644 --- a/rollup/rollup_sync_service/abi.go +++ b/rollup/rollup_sync_service/abi.go @@ -10,8 +10,8 @@ import ( "github.com/scroll-tech/go-ethereum/core/types" ) -// scrollChainMetaData contains ABI of the ScrollChain contract. -var scrollChainMetaData = &bind.MetaData{ +// ScrollChainMetaData contains ABI of the ScrollChain contract. +var ScrollChainMetaData = &bind.MetaData{ ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", } diff --git a/rollup/rollup_sync_service/abi_test.go b/rollup/rollup_sync_service/abi_test.go index d47a2c72e190..550c950bb337 100644 --- a/rollup/rollup_sync_service/abi_test.go +++ b/rollup/rollup_sync_service/abi_test.go @@ -13,7 +13,7 @@ import ( ) func TestEventSignatures(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() if err != nil { t.Fatal("failed to get scroll chain abi", "err", err) } @@ -24,7 +24,7 @@ func TestEventSignatures(t *testing.T) { } func TestUnpackLog(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) mockBatchIndex := big.NewInt(123) diff --git a/rollup/rollup_sync_service/l1client.go b/rollup/rollup_sync_service/l1client.go index 34ffc4db1bc2..b6be3e0bc611 100644 --- a/rollup/rollup_sync_service/l1client.go +++ b/rollup/rollup_sync_service/l1client.go @@ -27,9 +27,9 @@ type L1Client struct { l1FinalizeBatchEventSignature common.Hash } -// newL1Client initializes a new L1Client instance with the provided configuration. +// NewL1Client initializes a new L1Client instance with the provided configuration. // It checks for a valid scrollChainAddress and verifies the chain ID. -func newL1Client(ctx context.Context, l1Client sync_service.EthClient, l1ChainId uint64, scrollChainAddress common.Address, scrollChainABI *abi.ABI) (*L1Client, error) { +func NewL1Client(ctx context.Context, l1Client sync_service.EthClient, l1ChainId uint64, scrollChainAddress common.Address, scrollChainABI *abi.ABI) (*L1Client, error) { if scrollChainAddress == (common.Address{}) { return nil, errors.New("must pass non-zero scrollChainAddress to L1Client") } @@ -55,9 +55,9 @@ func newL1Client(ctx context.Context, l1Client sync_service.EthClient, l1ChainId return &client, nil } -// fetcRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. -func (c *L1Client) fetchRollupEventsInRange(from, to uint64) ([]types.Log, error) { - log.Trace("L1Client fetchRollupEventsInRange", "fromBlock", from, "toBlock", to) +// FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. +func (c *L1Client) FetchRollupEventsInRange(from, to uint64) ([]types.Log, error) { + log.Trace("L1Client FetchRollupEventsInRange", "fromBlock", from, "toBlock", to) query := ethereum.FilterQuery{ FromBlock: big.NewInt(int64(from)), // inclusive @@ -79,8 +79,8 @@ func (c *L1Client) fetchRollupEventsInRange(from, to uint64) ([]types.Log, error return logs, nil } -// getLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. -func (c *L1Client) getLatestFinalizedBlockNumber() (uint64, error) { +// GetLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. +func (c *L1Client) GetLatestFinalizedBlockNumber() (uint64, error) { header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) if err != nil { return 0, err @@ -90,3 +90,69 @@ func (c *L1Client) getLatestFinalizedBlockNumber() (uint64, error) { } return header.Number.Uint64(), nil } + +// FetchTxData fetches tx data corresponding to given event log +func (c *L1Client) FetchTxData(vLog *types.Log) ([]byte, error) { + tx, _, err := c.client.TransactionByHash(c.ctx, vLog.TxHash) + if err != nil { + log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", + "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) + block, err := c.client.BlockByHash(c.ctx, vLog.BlockHash) + if err != nil { + return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + } + + found := false + for _, txInBlock := range block.Transactions() { + if txInBlock.Hash() == vLog.TxHash { + tx = txInBlock + found = true + break + } + } + if !found { + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + } + } + + return tx.Data(), nil +} + +// FetchTxBlobHash fetches tx blob hash corresponding to given event log +func (c *L1Client) FetchTxBlobHash(vLog *types.Log) (common.Hash, error) { + tx, _, err := c.client.TransactionByHash(c.ctx, vLog.TxHash) + if err != nil { + log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", + "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) + block, err := c.client.BlockByHash(c.ctx, vLog.BlockHash) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + } + + found := false + for _, txInBlock := range block.Transactions() { + if txInBlock.Hash() == vLog.TxHash { + tx = txInBlock + found = true + break + } + } + if !found { + return common.Hash{}, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + } + } + blobHashes := tx.BlobHashes() + if len(blobHashes) == 0 { + return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", vLog.TxHash.Hex()) + } + return blobHashes[0], nil +} + +// GetHeaderByNumber fetches the block header by number +func (c *L1Client) GetHeaderByNumber(blockNumber uint64) (*types.Header, error) { + header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(0).SetUint64(blockNumber)) + if err != nil { + return nil, err + } + return header, nil +} diff --git a/rollup/rollup_sync_service/l1client_test.go b/rollup/rollup_sync_service/l1client_test.go index 8c7bd92f8b11..acc3f8daad52 100644 --- a/rollup/rollup_sync_service/l1client_test.go +++ b/rollup/rollup_sync_service/l1client_test.go @@ -23,16 +23,16 @@ func TestL1Client(t *testing.T) { t.Fatal("failed to get scroll chain abi", "err", err) } scrollChainAddress := common.HexToAddress("0x0123456789abcdef") - l1Client, err := newL1Client(ctx, mockClient, 11155111, scrollChainAddress, scrollChainABI) + l1Client, err := NewL1Client(ctx, mockClient, 11155111, scrollChainAddress, scrollChainABI) require.NoError(t, err, "Failed to initialize L1Client") - blockNumber, err := l1Client.getLatestFinalizedBlockNumber() + blockNumber, err := l1Client.GetLatestFinalizedBlockNumber() assert.NoError(t, err, "Error getting latest confirmed block number") assert.Equal(t, uint64(36), blockNumber, "Unexpected block number") - logs, err := l1Client.fetchRollupEventsInRange(0, blockNumber) + logs, err := l1Client.FetchRollupEventsInRange(0, blockNumber) assert.NoError(t, err, "Error fetching rollup events in range") - assert.Empty(t, logs, "Expected no logs from fetchRollupEventsInRange") + assert.Empty(t, logs, "Expected no logs from FetchRollupEventsInRange") } type mockEthClient struct { diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index c03d63e05c47..4c5261511328 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -83,7 +83,7 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } - client, err := newL1Client(ctx, l1Client, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) + client, err := NewL1Client(ctx, l1Client, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) if err != nil { return nil, fmt.Errorf("failed to initialize l1 client: %w", err) } @@ -176,7 +176,7 @@ func (s *RollupSyncService) fetchRollupEvents() { s.stateMu.Lock() defer s.stateMu.Unlock() - latestConfirmed, err := s.client.getLatestFinalizedBlockNumber() + latestConfirmed, err := s.client.GetLatestFinalizedBlockNumber() if err != nil { log.Warn("failed to get latest confirmed block number", "err", err) return @@ -196,7 +196,7 @@ func (s *RollupSyncService) fetchRollupEvents() { to = latestConfirmed } - logs, err := s.client.fetchRollupEventsInRange(from, to) + logs, err := s.client.FetchRollupEventsInRange(from, to) if err != nil { log.Error("failed to fetch rollup events in range", "from block", from, "to block", to, "err", err) return diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 61d63cdb7419..310d4be2515d 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -51,7 +51,7 @@ func TestRollupSyncServiceStartAndStop(t *testing.T) { } func TestDecodeBatchVersionAndChunkBlockRangesCodecv0(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) service := &RollupSyncService{ @@ -110,7 +110,7 @@ func TestDecodeBatchVersionAndChunkBlockRangesCodecv0(t *testing.T) { } func TestDecodeBatchVersionAndChunkBlockRangesCodecv1(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) service := &RollupSyncService{ @@ -163,7 +163,7 @@ func TestDecodeBatchVersionAndChunkBlockRangesCodecv1(t *testing.T) { } func TestDecodeBatchVersionAndChunkBlockRangesCodecv2(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) service := &RollupSyncService{ @@ -216,7 +216,7 @@ func TestDecodeBatchVersionAndChunkBlockRangesCodecv2(t *testing.T) { } func TestDecodeBatchVersionAndChunkBlockRangesCodecv3(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) service := &RollupSyncService{ From b30006ffb803c8eed97cfef2fa2a861be080058c Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 12:08:38 +0800 Subject: [PATCH 02/17] port changes from #1068 --- cmd/utils/flags.go | 7 - core/blockchain.go | 2 +- core/rawdb/accessors_rollup_event.go | 41 --- core/rawdb/accessors_rollup_event_test.go | 64 ----- core/rawdb/schema.go | 6 - eth/ethconfig/config.go | 3 - go.mod | 5 +- go.sum | 20 +- rollup/da_syncer/da_syncer.go | 14 +- rollup/da_syncer/data_source.go | 7 +- rollup/da_syncer/syncing_pipeline.go | 8 +- .../rollup_sync_service.go | 248 +++--------------- .../rollup_sync_service_test.go | 225 ++++++++++------ 13 files changed, 211 insertions(+), 439 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 445248b1ff6f..cc0c8aa3a7e6 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -74,7 +74,6 @@ import ( "github.com/scroll-tech/go-ethereum/p2p/nat" "github.com/scroll-tech/go-ethereum/p2p/netutil" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/rollup/da_syncer" "github.com/scroll-tech/go-ethereum/rollup/tracing" "github.com/scroll-tech/go-ethereum/rpc" ) @@ -1627,12 +1626,6 @@ func setEnableRollupVerify(ctx *cli.Context, cfg *ethconfig.Config) { func setDA(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.IsSet(DASyncEnabledFlag.Name) { cfg.EnableDASyncing = ctx.Bool(DASyncEnabledFlag.Name) - if ctx.IsSet(DAModeFlag.Name) { - cfg.DA.FetcherMode = *flags.GlobalTextMarshaler(ctx, DAModeFlag.Name).(*da_syncer.FetcherMode) - } - if ctx.IsSet(DASnapshotFileFlag.Name) { - cfg.DA.SnapshotFilePath = ctx.String(DASnapshotFileFlag.Name) - } if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) { cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name) } diff --git a/core/blockchain.go b/core/blockchain.go index e3294ded5bde..a0bc05924531 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1850,7 +1850,7 @@ func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types l.BlockHash = blockHash } - return bc.writeBlockAndSetHead(fullBlock, receipts, logs, statedb, false) + return bc.writeBlockWithState(fullBlock, receipts, logs, statedb, false) } // insertSideChain is called when an import batch hits upon a pruned ancestor diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 6670b4b7b85f..1b60f6e4f0d8 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -58,47 +58,6 @@ func ReadRollupEventSyncedL1BlockNumber(db ethdb.Reader) *uint64 { return &rollupEventSyncedL1BlockNumber } -// WriteBatchChunkRanges writes the block ranges for each chunk within a batch to the database. -// It serializes the chunk ranges using RLP and stores them under a key derived from the batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func WriteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64, chunkBlockRanges []*ChunkBlockRange) { - value, err := rlp.EncodeToBytes(chunkBlockRanges) - if err != nil { - log.Crit("failed to RLP encode batch chunk ranges", "batch index", batchIndex, "err", err) - } - if err := db.Put(batchChunkRangesKey(batchIndex), value); err != nil { - log.Crit("failed to store batch chunk ranges", "batch index", batchIndex, "value", value, "err", err) - } -} - -// DeleteBatchChunkRanges removes the block ranges of all chunks associated with a specific batch from the database. -// Note: Only non-finalized batches can be reverted. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func DeleteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64) { - if err := db.Delete(batchChunkRangesKey(batchIndex)); err != nil { - log.Crit("failed to delete batch chunk ranges", "batch index", batchIndex, "err", err) - } -} - -// ReadBatchChunkRanges retrieves the block ranges of all chunks associated with a specific batch from the database. -// It returns a list of ChunkBlockRange pointers, or nil if no chunk ranges are found for the given batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func ReadBatchChunkRanges(db ethdb.Reader, batchIndex uint64) []*ChunkBlockRange { - data, err := db.Get(batchChunkRangesKey(batchIndex)) - if err != nil && isNotFoundErr(err) { - return nil - } - if err != nil { - log.Crit("failed to read batch chunk ranges from database", "err", err) - } - - cr := new([]*ChunkBlockRange) - if err := rlp.Decode(bytes.NewReader(data), cr); err != nil { - log.Crit("Invalid ChunkBlockRange RLP", "batch index", batchIndex, "data", data, "err", err) - } - return *cr -} - // WriteFinalizedBatchMeta stores the metadata of a finalized batch in the database. func WriteFinalizedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, finalizedBatchMeta *FinalizedBatchMeta) { value, err := rlp.EncodeToBytes(finalizedBatchMeta) diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index c74e93524376..a22880ee05a4 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -147,70 +147,6 @@ func TestFinalizedBatchMeta(t *testing.T) { } } -func TestBatchChunkRanges(t *testing.T) { - chunks := [][]*ChunkBlockRange{ - { - {StartBlockNumber: 1, EndBlockNumber: 100}, - {StartBlockNumber: 101, EndBlockNumber: 200}, - }, - { - {StartBlockNumber: 201, EndBlockNumber: 300}, - {StartBlockNumber: 301, EndBlockNumber: 400}, - }, - { - {StartBlockNumber: 401, EndBlockNumber: 500}, - }, - } - - db := NewMemoryDatabase() - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - WriteBatchChunkRanges(db, batchIndex, chunkRange) - } - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if len(readChunkRange) != len(chunkRange) { - t.Fatal("Mismatch in number of chunk ranges", "expected", len(chunkRange), "got", len(readChunkRange)) - } - - for j, cr := range readChunkRange { - if cr.StartBlockNumber != chunkRange[j].StartBlockNumber || cr.EndBlockNumber != chunkRange[j].EndBlockNumber { - t.Fatal("Mismatch in chunk range", "batch index", batchIndex, "expected", chunkRange[j], "got", cr) - } - } - } - - // over-write - newRange := []*ChunkBlockRange{{StartBlockNumber: 1001, EndBlockNumber: 1100}} - WriteBatchChunkRanges(db, 0, newRange) - readChunkRange := ReadBatchChunkRanges(db, 0) - if len(readChunkRange) != 1 || readChunkRange[0].StartBlockNumber != 1001 || readChunkRange[0].EndBlockNumber != 1100 { - t.Fatal("Over-write failed for chunk range", "expected", newRange, "got", readChunkRange) - } - - // read non-existing value - if readChunkRange = ReadBatchChunkRanges(db, uint64(len(chunks)+1)); readChunkRange != nil { - t.Fatal("Expected nil for non-existing value", "got", readChunkRange) - } - - // delete: revert batch - for i := range chunks { - batchIndex := uint64(i) - DeleteBatchChunkRanges(db, batchIndex) - - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if readChunkRange != nil { - t.Fatal("Chunk range was not deleted", "batch index", batchIndex) - } - } - - // delete non-existing value: ensure the delete operation handles non-existing values without errors. - DeleteBatchChunkRanges(db, uint64(len(chunks)+1)) -} - func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) { db := NewMemoryDatabase() diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 2e4f2a18c5de..b4a51935b4ff 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -112,7 +112,6 @@ var ( // Scroll rollup event store rollupEventSyncedL1BlockNumberKey = []byte("R-LastRollupEventSyncedL1BlockNumber") - batchChunkRangesPrefix = []byte("R-bcr") batchMetaPrefix = []byte("R-bm") finalizedL2BlockNumberKey = []byte("R-finalized") lastFinalizedBatchIndexKey = []byte("R-finalizedBatchIndex") @@ -304,11 +303,6 @@ func SkippedTransactionHashKey(index uint64) []byte { return append(skippedTransactionHashPrefix, encodeBigEndian(index)...) } -// batchChunkRangesKey = batchChunkRangesPrefix + batch index (uint64 big endian) -func batchChunkRangesKey(batchIndex uint64) []byte { - return append(batchChunkRangesPrefix, encodeBigEndian(batchIndex)...) -} - // batchMetaKey = batchMetaPrefix + batch index (uint64 big endian) func batchMetaKey(batchIndex uint64) []byte { return append(batchMetaPrefix, encodeBigEndian(batchIndex)...) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index ad295d5de3be..e8c7a5aa178c 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -94,9 +94,6 @@ var Defaults = Config{ GPO: FullNodeGPO, RPCTxFeeCap: 1, // 1 ether MaxBlockRange: -1, // Default unconfigured value: no block range limit for backward compatibility - DA: da_syncer.Config{ - FetcherMode: da_syncer.L1RPC, - }, } func init() { diff --git a/go.mod b/go.mod index 2bda32ee00d7..5cd2a1dccda3 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/Azure/azure-storage-blob-go v0.7.0 - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/aws/aws-sdk-go-v2 v1.2.0 github.com/aws/aws-sdk-go-v2/config v1.1.1 github.com/aws/aws-sdk-go-v2/credentials v1.1.1 @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.2 + github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 @@ -85,6 +85,7 @@ require ( github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/gotestyourself/gotestyourself v1.4.0 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d // indirect github.com/mattn/go-runewidth v0.0.15 // indirect diff --git a/go.sum b/go.sum index 8c96ce6e7cd5..a540bb8de247 100644 --- a/go.sum +++ b/go.sum @@ -38,9 +38,11 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38= +github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -278,6 +280,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -387,15 +391,15 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac h1:DjLrqjoOLVFug9ZkAbJYwjtYW51YZE0Num3p4cZXaZs= -github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= +github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35 h1:sytWSptYjLWiVE4/GiGYUCXa9VBxfM9UpNpF5BSalI4= +github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35/go.mod h1:vHY7S9ivJ7wlusDBrCh6Lq7k5qNFkTWP4TRDKx35yck= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -430,8 +434,7 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0= -github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -580,6 +583,7 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/rollup/da_syncer/da_syncer.go b/rollup/da_syncer/da_syncer.go index c3c223ff22a9..ead133e90b87 100644 --- a/rollup/da_syncer/da_syncer.go +++ b/rollup/da_syncer/da_syncer.go @@ -28,21 +28,21 @@ func (s *DASyncer) SyncOneBlock(block *da.PartialBlock) error { currentBlock := s.blockchain.CurrentBlock() // we expect blocks to be consecutive. block.PartialHeader.Number == parentBlock.Number+1. - if block.PartialHeader.Number <= currentBlock.Number.Uint64() { - log.Debug("block number is too low", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number.Uint64()) + if block.PartialHeader.Number <= currentBlock.Number().Uint64() { + log.Debug("block number is too low", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number().Uint64()) return ErrBlockTooLow - } else if block.PartialHeader.Number > currentBlock.Number.Uint64()+1 { - log.Debug("block number is too high", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number.Uint64()) + } else if block.PartialHeader.Number > currentBlock.Number().Uint64()+1 { + log.Debug("block number is too high", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number().Uint64()) return ErrBlockTooHigh } - parentBlock := s.blockchain.GetBlockByNumber(currentBlock.Number.Uint64()) + parentBlock := s.blockchain.GetBlockByNumber(currentBlock.Number().Uint64()) if _, err := s.blockchain.BuildAndWriteBlock(parentBlock, block.PartialHeader.ToHeader(), block.Transactions); err != nil { return fmt.Errorf("failed building and writing block, number: %d, error: %v", block.PartialHeader.Number, err) } - if s.blockchain.CurrentBlock().Number.Uint64()%1000 == 0 { - log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number.Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root) + if s.blockchain.CurrentBlock().Number().Uint64()%1000 == 0 { + log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number().Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root) } return nil diff --git a/rollup/da_syncer/data_source.go b/rollup/da_syncer/data_source.go index f417d09af00e..7beab3baea32 100644 --- a/rollup/da_syncer/data_source.go +++ b/rollup/da_syncer/data_source.go @@ -2,7 +2,6 @@ package da_syncer import ( "context" - "errors" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/ethdb" @@ -36,9 +35,5 @@ func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.Cha } func (ds *DataSourceFactory) OpenDataSource(ctx context.Context, l1height uint64) (DataSource, error) { - if ds.config.FetcherMode == L1RPC { - return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) - } else { - return nil, errors.New("snapshot_data_source: not implemented") - } + return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) } diff --git a/rollup/da_syncer/syncing_pipeline.go b/rollup/da_syncer/syncing_pipeline.go index 6795f2608e05..27eaf20cb38a 100644 --- a/rollup/da_syncer/syncing_pipeline.go +++ b/rollup/da_syncer/syncing_pipeline.go @@ -21,11 +21,9 @@ import ( // Config is the configuration parameters of data availability syncing. type Config struct { - FetcherMode FetcherMode // mode of fetcher - SnapshotFilePath string // path to snapshot file - BlobScanAPIEndpoint string // BlobScan blob api endpoint - BlockNativeAPIEndpoint string // BlockNative blob api endpoint - BeaconNodeAPIEndpoint string // Beacon node api endpoint + BlobScanAPIEndpoint string // BlobScan blob api endpoint + BlockNativeAPIEndpoint string // BlockNative blob api endpoint + BeaconNodeAPIEndpoint string // Beacon node api endpoint } // SyncingPipeline is a derivation pipeline for syncing data from L1 and DA and transform it into diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 4c5261511328..bbb2b4940393 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -4,18 +4,12 @@ import ( "context" "encoding/json" "fmt" - "math/big" "os" "reflect" "sync" "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/da-codec/encoding/codecv0" - "github.com/scroll-tech/da-codec/encoding/codecv1" - "github.com/scroll-tech/da-codec/encoding/codecv2" - "github.com/scroll-tech/da-codec/encoding/codecv3" - "github.com/scroll-tech/da-codec/encoding/codecv4" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" @@ -26,7 +20,6 @@ import ( "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/rollup/rcfg" "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rollup/withdrawtrie" @@ -78,7 +71,7 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig return nil, fmt.Errorf("missing L1 config in genesis") } - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() if err != nil { return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } @@ -222,12 +215,11 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB batchIndex := event.BatchIndex.Uint64() log.Trace("found new CommitBatch event", "batch index", batchIndex) - committedBatchMeta, chunkBlockRanges, err := s.getCommittedBatchMeta(batchIndex, &vLog) + committedBatchMeta, err := s.getCommittedBatchMeta(batchIndex, &vLog) if err != nil { return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) } rawdb.WriteCommittedBatchMeta(s.db, batchIndex, committedBatchMeta) - rawdb.WriteBatchChunkRanges(s.db, batchIndex, chunkBlockRanges) case s.l1RevertBatchEventSignature: event := &L1RevertBatchEvent{} @@ -238,7 +230,6 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB log.Trace("found new RevertBatch event", "batch index", batchIndex) rawdb.DeleteCommittedBatchMeta(s.db, batchIndex) - rawdb.DeleteBatchChunkRanges(s.db, batchIndex) case s.l1FinalizeBatchEventSignature: event := &L1FinalizeBatchEvent{} @@ -273,12 +264,12 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB for index := startBatchIndex; index <= batchIndex; index++ { committedBatchMeta := rawdb.ReadCommittedBatchMeta(s.db, index) - chunks, err := s.getLocalChunksForBatch(index) + chunks, err := s.getLocalChunksForBatch(committedBatchMeta.ChunkBlockRanges) if err != nil { return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err) } - endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.bc.Config(), s.stack) + endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.stack) if err != nil { return fmt.Errorf("fatal: validateBatch failed: finalize event: %v, err: %w", event, err) } @@ -313,12 +304,10 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB return nil } -func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encoding.Chunk, error) { - chunkBlockRanges := rawdb.ReadBatchChunkRanges(s.db, batchIndex) +func (s *RollupSyncService) getLocalChunksForBatch(chunkBlockRanges []*rawdb.ChunkBlockRange) ([]*encoding.Chunk, error) { if len(chunkBlockRanges) == 0 { - return nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges") + return nil, fmt.Errorf("chunkBlockRanges is empty") } - endBlockNumber := chunkBlockRanges[len(chunkBlockRanges)-1].EndBlockNumber for i := 0; i < defaultMaxRetries; i++ { if s.ctx.Err() != nil { @@ -366,13 +355,13 @@ func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encodi return chunks, nil } -func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, []*rawdb.ChunkBlockRange, error) { +func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, error) { if batchIndex == 0 { return &rawdb.CommittedBatchMeta{ Version: 0, BlobVersionedHashes: nil, ChunkBlockRanges: []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, - }, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil + }, nil } tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) @@ -381,11 +370,11 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) if err != nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) } if block == nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) } found := false @@ -397,7 +386,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types } } if !found { - return nil, nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) } } @@ -406,19 +395,19 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types if tx.Type() == types.BlobTxType { blobVersionedHashes := tx.BlobHashes() if blobVersionedHashes == nil { - return nil, nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) + return nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) } commitBatchMeta.BlobVersionedHashes = blobVersionedHashes } version, ranges, err := s.decodeBatchVersionAndChunkBlockRanges(tx.Data()) if err != nil { - return nil, nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) } commitBatchMeta.Version = version commitBatchMeta.ChunkBlockRanges = ranges - return &commitBatchMeta, ranges, nil + return &commitBatchMeta, nil } // decodeBatchVersionAndChunkBlockRanges decodes version and chunks' block ranges in a batch based on the commit batch transaction's calldata. @@ -493,10 +482,8 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // - batchIndex: batch index of the validated batch // - event: L1 finalize batch event data // - parentFinalizedBatchMeta: metadata of the finalized parent batch -// - committedBatchMeta: committed batch metadata stored in the database. -// Can be nil for older client versions that don't store this information. +// - committedBatchMeta: committed batch metadata stored in the database // - chunks: slice of chunk data for the current batch -// - chainCfg: chain configuration to identify the codec version when committedBatchMeta is nil // - stack: node stack to terminate the node in case of inconsistency // // Returns: @@ -507,7 +494,7 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -532,71 +519,17 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz Chunks: chunks, } - var codecVersion encoding.CodecVersion - if committedBatchMeta != nil { - codecVersion = encoding.CodecVersion(committedBatchMeta.Version) - } else { - codecVersion = determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg) + codecVersion := encoding.CodecVersion(committedBatchMeta.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return 0, nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) } - var localBatchHash common.Hash - if codecVersion == encoding.CodecV0 { - daBatch, err := codecv0.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv0 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV1 { - daBatch, err := codecv1.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv1 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV2 { - daBatch, err := codecv2.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv2 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV3 { - daBatch, err := codecv3.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv3 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV4 { - // Check if committedBatchMeta exists, for backward compatibility with older client versions - if committedBatchMeta == nil { - return 0, nil, fmt.Errorf("missing committed batch metadata for codecV4, please use the latest client version, batch index: %v", batchIndex) - } - - // Validate BlobVersionedHashes - if committedBatchMeta.BlobVersionedHashes == nil || len(committedBatchMeta.BlobVersionedHashes) != 1 { - return 0, nil, fmt.Errorf("invalid blob hashes, batch index: %v, blob hashes: %v", batchIndex, committedBatchMeta.BlobVersionedHashes) - } - - // Attempt to create DA batch with compression - daBatch, err := codecv4.NewDABatch(batch, true) - if err != nil { - // If compression fails, try without compression - log.Warn("failed to create codecv4 DA batch with compress enabling", "batch index", batchIndex, "err", err) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } else if daBatch.BlobVersionedHash != committedBatchMeta.BlobVersionedHashes[0] { - // Inconsistent blob versioned hash, fallback to uncompressed DA batch - log.Warn("impossible case: inconsistent blob versioned hash", "batch index", batchIndex, "expected", committedBatchMeta.BlobVersionedHashes[0], "actual", daBatch.BlobVersionedHash) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } - - localBatchHash = daBatch.Hash() - } else { - return 0, nil, fmt.Errorf("unsupported codec version: %v", codecVersion) + daBatch, err := codec.NewDABatch(batch) + if err != nil { + return 0, nil, fmt.Errorf("failed to create DA batch, batch index: %v, codec version: %v, expected blob hashes: %v, err: %w", batchIndex, codecVersion, committedBatchMeta.BlobVersionedHashes, err) } + localBatchHash := daBatch.Hash() localStateRoot := endBlock.Header.Root localWithdrawRoot := endBlock.WithdrawRoot @@ -648,126 +581,29 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz return endBlock.Header.Number.Uint64(), finalizedBatchMeta, nil } -// determineCodecVersion determines the codec version based on the block number and chain configuration. -func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64, chainCfg *params.ChainConfig) encoding.CodecVersion { - switch { - case startBlockNumber.Uint64() == 0 || !chainCfg.IsBernoulli(startBlockNumber): - return encoding.CodecV0 // codecv0: genesis batch or batches before Bernoulli - case !chainCfg.IsCurie(startBlockNumber): - return encoding.CodecV1 // codecv1: batches after Bernoulli and before Curie - case !chainCfg.IsDarwin(startBlockTimestamp): - return encoding.CodecV2 // codecv2: batches after Curie and before Darwin - case !chainCfg.IsDarwinV2(startBlockTimestamp): - return encoding.CodecV3 // codecv3: batches after Darwin - default: - return encoding.CodecV4 // codecv4: batches after DarwinV2 - } -} - // decodeBlockRangesFromEncodedChunks decodes the provided chunks into a list of block ranges. func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chunks [][]byte) ([]*rawdb.ChunkBlockRange, error) { - var chunkBlockRanges []*rawdb.ChunkBlockRange - for _, chunk := range chunks { - if len(chunk) < 1 { - return nil, fmt.Errorf("invalid chunk, length is less than 1") - } - - numBlocks := int(chunk[0]) - - switch codecVersion { - case encoding.CodecV0: - if len(chunk) < 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv0.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv0.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV1: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv1.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv1.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV2: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv2.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv2.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV3: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv3.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv3.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV4: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv4.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv4.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + daChunksRawTx, err := codec.DecodeDAChunksRawTx(chunks) + if err != nil { + return nil, fmt.Errorf("failed to decode DA chunks, version: %v, err: %w", codecVersion, err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - default: - return nil, fmt.Errorf("unexpected batch version %v", codecVersion) + var chunkBlockRanges []*rawdb.ChunkBlockRange + for _, daChunkRawTx := range daChunksRawTx { + if len(daChunkRawTx.Blocks) == 0 { + return nil, fmt.Errorf("no blocks found in DA chunk, version: %v", codecVersion) } + + chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ + StartBlockNumber: daChunkRawTx.Blocks[0].Number(), + EndBlockNumber: daChunkRawTx.Blocks[len(daChunkRawTx.Blocks)-1].Number(), + }) } + return chunkBlockRanges, nil } diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 310d4be2515d..f1b09a37a1f2 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -313,7 +313,7 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x0"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) @@ -324,13 +324,13 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { {StartBlockNumber: 911156, EndBlockNumber: 911159}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -367,7 +367,7 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x1"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) @@ -376,13 +376,13 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { {StartBlockNumber: 1, EndBlockNumber: 11}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -419,7 +419,7 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x2"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(metadata.Version)) @@ -456,13 +456,13 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { {StartBlockNumber: 174, EndBlockNumber: 174}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -499,7 +499,7 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x3"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(metadata.Version)) @@ -537,20 +537,18 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { {StartBlockNumber: 70, EndBlockNumber: 70}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } func TestValidateBatchCodecv0(t *testing.T) { - chainConfig := ¶ms.ChainConfig{} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -560,50 +558,57 @@ func TestValidateBatchCodecv0(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv1(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -613,50 +618,56 @@ func TestValidateBatchCodecv1(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x0129554070e4323800ca0e5ddd17bc447854601b306a70870002a058741214b3")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a327088bb2b13151449d8313c281d0006d12e8453e863637b746898b6ad5a6")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv2(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -666,50 +677,56 @@ func TestValidateBatchCodecv2(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv3(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -719,7 +736,7 @@ func TestValidateBatchCodecv3(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), @@ -727,46 +744,53 @@ func TestValidateBatchCodecv3(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchUpgrades(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(3), CurieBlock: big.NewInt(14), DarwinTime: func() *uint64 { t := uint64(1684762320); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), @@ -774,82 +798,97 @@ func TestValidateBatchUpgrades(t *testing.T) { WithdrawRoot: chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 0, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), StateRoot: chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, WithdrawRoot: chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a688c6e137310df38a62f5ad1e5119b8cb0455c386a9a4079b14fe92a239aa")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 0, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) event3 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(2), BatchHash: common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentBatchMeta3, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01ea66c4de196d36e2c3a5d7c0045100b9e46ef65be8f7a921ef20e6f2e99ebd")}, + } + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta4 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta4 := &rawdb.FinalizedBatchMeta{ BatchHash: event3.BatchHash, TotalL1MessagePopped: 11, StateRoot: event3.StateRoot, WithdrawRoot: event3.WithdrawRoot, } - assert.Equal(t, parentBatchMeta4, finalizedBatchMeta3) + assert.Equal(t, parentFinalizedBatchMeta4, finalizedBatchMeta3) event4 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(3), BatchHash: common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentBatchMeta4, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event4.BatchHash, TotalL1MessagePopped: 42, StateRoot: event4.StateRoot, WithdrawRoot: event4.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func TestValidateBatchInFinalizeByBundle(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: func() *uint64 { t := uint64(0); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") @@ -867,29 +906,49 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01bbc6b98d7d3783730b6208afac839ad37dcf211b9d9e7c83a5f9d02125ddd7")}, + } + + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01c81e5696e00f1e6e7d76c197f74ed51650147c49c4e6e5b0b702cdcc54352a")}, + } + + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x012e15203534ae3f4cbe1b0f58fe6db6e5c29432115a8ece6ef5550bf2ffce4c")}, + } + + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) - endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) - endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) - endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event.BatchHash, TotalL1MessagePopped: 42, StateRoot: event.StateRoot, WithdrawRoot: event.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func readBlockFromJSON(t *testing.T, filename string) *encoding.Block { From de37d472eab1b45807b49121fbd53c820834021b Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:26:22 +0800 Subject: [PATCH 03/17] go.mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index a540bb8de247..95119a13f63d 100644 --- a/go.sum +++ b/go.sum @@ -396,8 +396,6 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= -github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35 h1:sytWSptYjLWiVE4/GiGYUCXa9VBxfM9UpNpF5BSalI4= github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35/go.mod h1:vHY7S9ivJ7wlusDBrCh6Lq7k5qNFkTWP4TRDKx35yck= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= From e34fecf3a8e6f5e79f99b34daccab89f2cef76a6 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:28:13 +0800 Subject: [PATCH 04/17] fix compile error --- rollup/rollup_sync_service/l1client_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rollup/rollup_sync_service/l1client_test.go b/rollup/rollup_sync_service/l1client_test.go index acc3f8daad52..394f455b80c5 100644 --- a/rollup/rollup_sync_service/l1client_test.go +++ b/rollup/rollup_sync_service/l1client_test.go @@ -18,7 +18,7 @@ func TestL1Client(t *testing.T) { ctx := context.Background() mockClient := &mockEthClient{} - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() if err != nil { t.Fatal("failed to get scroll chain abi", "err", err) } From 1327771c7089b51dccf7c62c54fded78e9e134d3 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 18:09:50 +0800 Subject: [PATCH 05/17] fix goimports --- rollup/da_syncer/da/calldata_blob_source.go | 1 + 1 file changed, 1 insertion(+) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 47eabfceb65f..db0f5f01c107 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" From b05954d5b03502cc67e1c45a3b414353ff6488d1 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 19:42:11 +0800 Subject: [PATCH 06/17] fix log --- rollup/da_syncer/da_syncer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rollup/da_syncer/da_syncer.go b/rollup/da_syncer/da_syncer.go index ead133e90b87..b787abff3d8a 100644 --- a/rollup/da_syncer/da_syncer.go +++ b/rollup/da_syncer/da_syncer.go @@ -42,7 +42,7 @@ func (s *DASyncer) SyncOneBlock(block *da.PartialBlock) error { } if s.blockchain.CurrentBlock().Number().Uint64()%1000 == 0 { - log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number().Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root) + log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number().Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root()) } return nil From ce8f7856ea6f9ea5c720aa56a3fcd9992f868855 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:20:34 +0700 Subject: [PATCH 07/17] address review comments --- rollup/da_syncer/batch_queue.go | 1 + .../blob_client/block_native_client.go | 6 ++- rollup/da_syncer/da/commitV1.go | 2 +- rollup/da_syncer/da_queue.go | 6 +++ rollup/da_syncer/da_syncer.go | 4 ++ rollup/da_syncer/modes.go | 52 ------------------- 6 files changed, 17 insertions(+), 54 deletions(-) delete mode 100644 rollup/da_syncer/modes.go diff --git a/rollup/da_syncer/batch_queue.go b/rollup/da_syncer/batch_queue.go index a0172a86c077..093ce12d830e 100644 --- a/rollup/da_syncer/batch_queue.go +++ b/rollup/da_syncer/batch_queue.go @@ -98,5 +98,6 @@ func (bq *BatchQueue) deleteBatch(batch da.Entry) { func (bq *BatchQueue) Reset(height uint64) { bq.batches.Clear() bq.batchesMap.Clear() + bq.lastFinalizedBatchIndex = 0 bq.DAQueue.Reset(height) } diff --git a/rollup/da_syncer/blob_client/block_native_client.go b/rollup/da_syncer/blob_client/block_native_client.go index ddd574d02d10..7b1cce86f083 100644 --- a/rollup/da_syncer/blob_client/block_native_client.go +++ b/rollup/da_syncer/blob_client/block_native_client.go @@ -30,7 +30,11 @@ func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Con if err != nil { return nil, fmt.Errorf("failed to join path, err: %w", err) } - resp, err := http.Get(path) + req, err := http.NewRequestWithContext(ctx, "GET", path, nil) + if err != nil { + return nil, fmt.Errorf("cannot create request, err: %w", err) + } + resp, err := http.DefaultClient.Do(req) if err != nil { return nil, fmt.Errorf("cannot do request, err: %w", err) } diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 4670eec8bbcb..532b0f81abd6 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -52,7 +52,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, // compute blob versioned hash and compare with one from tx c, err := kzg4844.BlobToCommitment(blob) if err != nil { - return nil, fmt.Errorf("failed to create blob commitment") + return nil, fmt.Errorf("failed to create blob commitment: %w", err) } blobVersionedHash := common.Hash(kzg4844.CalcBlobHashV1(sha256.New(), &c)) if blobVersionedHash != versionedHash { diff --git a/rollup/da_syncer/da_queue.go b/rollup/da_syncer/da_queue.go index 64673a4a646b..3602947f51e2 100644 --- a/rollup/da_syncer/da_queue.go +++ b/rollup/da_syncer/da_queue.go @@ -27,6 +27,12 @@ func NewDAQueue(l1height uint64, dataSourceFactory *DataSourceFactory) *DAQueue func (dq *DAQueue) NextDA(ctx context.Context) (da.Entry, error) { for len(dq.da) == 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + err := dq.getNextData(ctx) if err != nil { return nil, err diff --git a/rollup/da_syncer/da_syncer.go b/rollup/da_syncer/da_syncer.go index b787abff3d8a..e0970d37bc9a 100644 --- a/rollup/da_syncer/da_syncer.go +++ b/rollup/da_syncer/da_syncer.go @@ -37,6 +37,10 @@ func (s *DASyncer) SyncOneBlock(block *da.PartialBlock) error { } parentBlock := s.blockchain.GetBlockByNumber(currentBlock.Number().Uint64()) + if parentBlock == nil { + return fmt.Errorf("parent block not found at height %d", currentBlock.Number().Uint64()) + } + if _, err := s.blockchain.BuildAndWriteBlock(parentBlock, block.PartialHeader.ToHeader(), block.Transactions); err != nil { return fmt.Errorf("failed building and writing block, number: %d, error: %v", block.PartialHeader.Number, err) } diff --git a/rollup/da_syncer/modes.go b/rollup/da_syncer/modes.go deleted file mode 100644 index bfcc1d1dfba0..000000000000 --- a/rollup/da_syncer/modes.go +++ /dev/null @@ -1,52 +0,0 @@ -package da_syncer - -import "fmt" - -// FetcherMode represents the mode of fetcher -type FetcherMode int - -const ( - // L1RPC mode fetches DA from L1RPC - L1RPC FetcherMode = iota - // Snapshot mode loads DA from snapshot file - Snapshot -) - -func (mode FetcherMode) IsValid() bool { - return mode >= L1RPC && mode <= Snapshot -} - -// String implements the stringer interface. -func (mode FetcherMode) String() string { - switch mode { - case L1RPC: - return "l1rpc" - case Snapshot: - return "snapshot" - default: - return "unknown" - } -} - -func (mode FetcherMode) MarshalText() ([]byte, error) { - switch mode { - case L1RPC: - return []byte("l1rpc"), nil - case Snapshot: - return []byte("snapshot"), nil - default: - return nil, fmt.Errorf("unknown sync mode %d", mode) - } -} - -func (mode *FetcherMode) UnmarshalText(text []byte) error { - switch string(text) { - case "l1rpc": - *mode = L1RPC - case "snapshot": - *mode = Snapshot - default: - return fmt.Errorf(`unknown sync mode %q, want "l1rpc" or "snapshot"`, text) - } - return nil -} From f10c383837cab17b0a27f0962041e15ca2a9c94e Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:27:07 +0700 Subject: [PATCH 08/17] upgrade golang.org/x/net to 0.23.0 --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 5cd2a1dccda3..7a1a9f7d2880 100644 --- a/go.mod +++ b/go.mod @@ -58,9 +58,9 @@ require ( github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef - golang.org/x/crypto v0.17.0 + golang.org/x/crypto v0.21.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.18.0 golang.org/x/text v0.14.0 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce @@ -103,8 +103,8 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/net v0.16.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/term v0.18.0 // indirect google.golang.org/protobuf v1.23.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 95119a13f63d..fbb9af9bc659 100644 --- a/go.sum +++ b/go.sum @@ -474,8 +474,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -526,8 +526,8 @@ golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -582,14 +582,14 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= From b918a2bede3d165a38b0c3dd105c93951c2c5115 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:13:31 +0800 Subject: [PATCH 09/17] port changes from #1018 --- common/heapmap.go | 90 +++++ common/shrinkingmap.go | 16 + eth/backend.go | 3 +- .../blob_client/beacon_node_client.go | 16 +- rollup/da_syncer/blob_client/blob_client.go | 6 +- .../da_syncer/blob_client/blob_scan_client.go | 2 +- .../blob_client/block_native_client.go | 2 +- rollup/da_syncer/da/calldata_blob_source.go | 168 ++------ rollup/da_syncer/da/commitV0.go | 10 +- rollup/da_syncer/da/commitV1.go | 25 +- rollup/da_syncer/data_source.go | 10 +- rollup/da_syncer/syncing_pipeline.go | 21 +- rollup/l1/abi.go | 245 +++++++++++ rollup/l1/abi_test.go | 82 ++++ rollup/l1/l1msg_bindings.go | 150 +++++++ rollup/l1/reader.go | 381 ++++++++++++++++++ rollup/l1/reader_test.go | 125 ++++++ rollup/l1/types.go | 22 + 18 files changed, 1193 insertions(+), 181 deletions(-) create mode 100644 common/heapmap.go create mode 100644 rollup/l1/abi.go create mode 100644 rollup/l1/abi_test.go create mode 100644 rollup/l1/l1msg_bindings.go create mode 100644 rollup/l1/reader.go create mode 100644 rollup/l1/reader_test.go create mode 100644 rollup/l1/types.go diff --git a/common/heapmap.go b/common/heapmap.go new file mode 100644 index 000000000000..90f51e2db25d --- /dev/null +++ b/common/heapmap.go @@ -0,0 +1,90 @@ +package common + +type HeapMap[K comparable, T Comparable[T]] struct { + h *Heap[T] + m *ShrinkingMap[K, *HeapElement[T]] + keyFromElement func(T) K +} + +func NewHeapMap[K comparable, T Comparable[T]](keyFromElement func(T) K) *HeapMap[K, T] { + return &HeapMap[K, T]{ + h: NewHeap[T](), + m: NewShrinkingMap[K, *HeapElement[T]](1000), + keyFromElement: keyFromElement, + } +} + +func (hm *HeapMap[K, T]) Len() int { + return hm.h.Len() +} + +func (hm *HeapMap[K, T]) Push(element T) bool { + k := hm.keyFromElement(element) + + if hm.m.Has(k) { + return false + } + + heapElement := hm.h.Push(element) + hm.m.Set(k, heapElement) + + return true +} + +func (hm *HeapMap[K, T]) Pop() T { + element := hm.h.Pop() + k := hm.keyFromElement(element.Value()) + hm.m.Delete(k) + + return element.Value() +} + +func (hm *HeapMap[K, T]) Peek() T { + return hm.h.Peek().Value() +} + +func (hm *HeapMap[K, T]) RemoveByElement(element T) bool { + key := hm.keyFromElement(element) + heapElement, exists := hm.m.Get(key) + if !exists { + return false + } + + hm.h.Remove(heapElement) + hm.m.Delete(key) + + return true +} + +func (hm *HeapMap[K, T]) RemoveByKey(key K) bool { + heapElement, exists := hm.m.Get(key) + if !exists { + return false + } + + hm.h.Remove(heapElement) + hm.m.Delete(key) + + return true +} + +func (hm *HeapMap[K, T]) Clear() { + hm.h.Clear() + hm.m = NewShrinkingMap[K, *HeapElement[T]](1000) +} + +func (hm *HeapMap[K, T]) Keys() []K { + return hm.m.Keys() +} + +func (hm *HeapMap[K, T]) Elements() []T { + var elements []T + for _, element := range hm.m.Values() { + elements = append(elements, element.Value()) + } + return elements +} + +func (hm *HeapMap[K, T]) Has(element T) bool { + return hm.m.Has(hm.keyFromElement(element)) +} diff --git a/common/shrinkingmap.go b/common/shrinkingmap.go index 4bf98f87c2da..a62c23a7b6c8 100644 --- a/common/shrinkingmap.go +++ b/common/shrinkingmap.go @@ -47,6 +47,22 @@ func (s *ShrinkingMap[K, V]) Delete(key K) (deleted bool) { return true } +func (s *ShrinkingMap[K, V]) Keys() []K { + var keys []K + for k := range s.m { + keys = append(keys, k) + } + return keys +} + +func (s *ShrinkingMap[K, V]) Values() []V { + var values []V + for _, v := range s.m { + values = append(values, v) + } + return values +} + func (s *ShrinkingMap[K, V]) Size() (size int) { return len(s.m) } diff --git a/eth/backend.go b/eth/backend.go index 2b6c663d2744..a119708e52be 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -57,6 +57,7 @@ import ( "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/rollup/ccc" "github.com/scroll-tech/go-ethereum/rollup/da_syncer" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rpc" @@ -109,7 +110,7 @@ type Ethereum struct { // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) -func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthClient) (*Ethereum, error) { +func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ethereum, error) { // Ensure configuration values are compatible and sane if config.SyncMode == downloader.LightSync { return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum") diff --git a/rollup/da_syncer/blob_client/beacon_node_client.go b/rollup/da_syncer/blob_client/beacon_node_client.go index 5bfd7b9edf6c..adb61a4199ff 100644 --- a/rollup/da_syncer/blob_client/beacon_node_client.go +++ b/rollup/da_syncer/blob_client/beacon_node_client.go @@ -12,12 +12,10 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/crypto/kzg4844" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" ) type BeaconNodeClient struct { apiEndpoint string - l1Client *rollup_sync_service.L1Client genesisTime uint64 secondsPerSlot uint64 } @@ -28,7 +26,7 @@ var ( beaconNodeBlobEndpoint = "/eth/v1/beacon/blob_sidecars" ) -func NewBeaconNodeClient(apiEndpoint string, l1Client *rollup_sync_service.L1Client) (*BeaconNodeClient, error) { +func NewBeaconNodeClient(apiEndpoint string) (*BeaconNodeClient, error) { // get genesis time genesisPath, err := url.JoinPath(apiEndpoint, beaconNodeGenesisEndpoint) if err != nil { @@ -94,19 +92,13 @@ func NewBeaconNodeClient(apiEndpoint string, l1Client *rollup_sync_service.L1Cli return &BeaconNodeClient{ apiEndpoint: apiEndpoint, - l1Client: l1Client, genesisTime: genesisTime, secondsPerSlot: secondsPerSlot, }, nil } -func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { - // get block timestamp to calculate slot - header, err := c.l1Client.GetHeaderByNumber(blockNumber) - if err != nil { - return nil, fmt.Errorf("failed to get header by number, err: %w", err) - } - slot := (header.Time - c.genesisTime) / c.secondsPerSlot +func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { + slot := (blockTime - c.genesisTime) / c.secondsPerSlot // get blob sidecar for slot blobSidecarPath, err := url.JoinPath(c.apiEndpoint, beaconNodeBlobEndpoint, fmt.Sprintf("%d", slot)) @@ -156,7 +148,7 @@ func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Cont } } - return nil, fmt.Errorf("missing blob %v in slot %d, block number %d", versionedHash, slot, blockNumber) + return nil, fmt.Errorf("missing blob %v in slot %d", versionedHash, slot) } type GenesisResp struct { diff --git a/rollup/da_syncer/blob_client/blob_client.go b/rollup/da_syncer/blob_client/blob_client.go index 814b1d4faf2d..70635311559f 100644 --- a/rollup/da_syncer/blob_client/blob_client.go +++ b/rollup/da_syncer/blob_client/blob_client.go @@ -17,7 +17,7 @@ const ( ) type BlobClient interface { - GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) + GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) } type BlobClients struct { @@ -32,13 +32,13 @@ func NewBlobClients(blobClients ...BlobClient) *BlobClients { } } -func (c *BlobClients) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlobClients) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { if len(c.list) == 0 { return nil, fmt.Errorf("BlobClients.GetBlobByVersionedHash: list of BlobClients is empty") } for i := 0; i < len(c.list); i++ { - blob, err := c.list[c.curPos].GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, blockNumber) + blob, err := c.list[c.curPos].GetBlobByVersionedHashAndBlockTime(ctx, versionedHash, blockTime) if err == nil { return blob, nil } diff --git a/rollup/da_syncer/blob_client/blob_scan_client.go b/rollup/da_syncer/blob_client/blob_scan_client.go index 24b03bed32b9..0185cc9dc96d 100644 --- a/rollup/da_syncer/blob_client/blob_scan_client.go +++ b/rollup/da_syncer/blob_client/blob_scan_client.go @@ -26,7 +26,7 @@ func NewBlobScanClient(apiEndpoint string) *BlobScanClient { } } -func (c *BlobScanClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlobScanClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { // blobscan api docs https://api.blobscan.com/#/blobs/blob-getByBlobId path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) if err != nil { diff --git a/rollup/da_syncer/blob_client/block_native_client.go b/rollup/da_syncer/blob_client/block_native_client.go index 7b1cce86f083..1fe6efbbab27 100644 --- a/rollup/da_syncer/blob_client/block_native_client.go +++ b/rollup/da_syncer/blob_client/block_native_client.go @@ -24,7 +24,7 @@ func NewBlockNativeClient(apiEndpoint string) *BlockNativeClient { } } -func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { // blocknative api docs https://docs.blocknative.com/blocknative-data-archive/blob-archive path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) if err != nil { diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index db0f5f01c107..a7489c72c838 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -9,12 +9,10 @@ import ( "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) const ( @@ -35,7 +33,7 @@ var ( type CalldataBlobSource struct { ctx context.Context - l1Client *rollup_sync_service.L1Client + l1Reader *l1.Reader blobClient blob_client.BlobClient l1height uint64 scrollChainABI *abi.ABI @@ -47,14 +45,14 @@ type CalldataBlobSource struct { l1Finalized uint64 } -func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) (*CalldataBlobSource, error) { - scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() +func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Reader, blobClient blob_client.BlobClient, db ethdb.Database) (*CalldataBlobSource, error) { + scrollChainABI, err := l1.ScrollChainMetaData.GetAbi() if err != nil { return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } return &CalldataBlobSource{ ctx: ctx, - l1Client: l1Client, + l1Reader: l1Reader, blobClient: blobClient, l1height: l1height, scrollChainABI: scrollChainABI, @@ -73,7 +71,7 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { // Otherwise, we know that there's more finalized blocks than we want to request up to // -> no need to query finalized block number if to > ds.l1Finalized { - ds.l1Finalized, err = ds.l1Client.GetLatestFinalizedBlockNumber() + ds.l1Finalized, err = ds.l1Reader.GetLatestFinalizedBlockNumber() if err != nil { return nil, serrors.NewTemporaryError(fmt.Errorf("failed to query GetLatestFinalizedBlockNumber, error: %v", err)) } @@ -85,13 +83,13 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { return nil, ErrSourceExhausted } - logs, err := ds.l1Client.FetchRollupEventsInRange(ds.l1height, to) + rollupEvents, err := ds.l1Reader.FetchRollupEventsInRange(ds.l1height, to) if err != nil { - return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get events, l1height: %d, error: %v", ds.l1height, err)) + return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get rollup events, l1height: %d, error: %v", ds.l1height, err)) } - da, err := ds.processLogsToDA(logs) + da, err := ds.processRollupEventsToDA(rollupEvents) if err != nil { - return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process logs to DA, error: %v", err)) + return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process rollup events to DA, error: %v", err)) } ds.l1height = to + 1 @@ -102,48 +100,30 @@ func (ds *CalldataBlobSource) L1Height() uint64 { return ds.l1height } -func (ds *CalldataBlobSource) processLogsToDA(logs []types.Log) (Entries, error) { +func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { var entries Entries var entry Entry var err error - - for _, vLog := range logs { - switch vLog.Topics[0] { - case ds.l1CommitBatchEventSignature: - event := &rollup_sync_service.L1CommitBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, commitBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) + for _, rollupEvent := range rollupEvents { + switch rollupEvent.Type() { + case l1.CommitEventType: + commitEvent, ok := rollupEvent.(*l1.CommitBatchEvent) + // this should never happen because we just check event type + if !ok { + return nil, fmt.Errorf("unexpected type of rollup event: %T", rollupEvent) } - - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new CommitBatch event", "batch index", batchIndex) - - if entry, err = ds.getCommitBatchDA(batchIndex, &vLog); err != nil { - return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", batchIndex, err) + if entry, err = ds.getCommitBatchDA(commitEvent); err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) } - case ds.l1RevertBatchEventSignature: - event := &rollup_sync_service.L1RevertBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, revertBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) - } + case l1.RevertEventType: + entry = NewRevertBatch(rollupEvent.BatchIndex().Uint64()) - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new RevertBatchType event", "batch index", batchIndex) - entry = NewRevertBatch(batchIndex) - - case ds.l1FinalizeBatchEventSignature: - event := &rollup_sync_service.L1FinalizeBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, finalizeBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) - } - - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new FinalizeBatchType event", "batch index", event.BatchIndex.Uint64()) - entry = NewFinalizeBatch(batchIndex) + case l1.FinalizeEventType: + entry = NewFinalizeBatch(rollupEvent.BatchIndex().Uint64()) default: - return nil, fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + return nil, fmt.Errorf("unknown rollup event, type: %v", rollupEvent.Type()) } entries = append(entries, entry) @@ -151,97 +131,27 @@ func (ds *CalldataBlobSource) processLogsToDA(logs []types.Log) (Entries, error) return entries, nil } -type commitBatchArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte -} - -func newCommitBatchArgs(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { - var args commitBatchArgs - err := method.Inputs.Copy(&args, values) - return &args, err -} - -func newCommitBatchArgsFromCommitBatchWithProof(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { - var args commitBatchWithBlobProofArgs - err := method.Inputs.Copy(&args, values) - if err != nil { - return nil, err - } - return &commitBatchArgs{ - Version: args.Version, - ParentBatchHeader: args.ParentBatchHeader, - Chunks: args.Chunks, - SkippedL1MessageBitmap: args.SkippedL1MessageBitmap, - }, nil -} - -type commitBatchWithBlobProofArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte - BlobDataProof []byte -} - -func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Log) (Entry, error) { - if batchIndex == 0 { +func (ds *CalldataBlobSource) getCommitBatchDA(commitEvent *l1.CommitBatchEvent) (Entry, error) { + if commitEvent.BatchIndex().Uint64() == 0 { return NewCommitBatchDAV0Empty(), nil } - txData, err := ds.l1Client.FetchTxData(vLog) + args, err := ds.l1Reader.FetchCommitTxData(commitEvent) if err != nil { - return nil, fmt.Errorf("failed to fetch tx data, tx hash: %v, err: %w", vLog.TxHash.Hex(), err) - } - if len(txData) < methodIDLength { - return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + return nil, fmt.Errorf("failed to fetch commit tx data of batch %d, tx hash: %v, err: %w", commitEvent.BatchIndex().Uint64(), commitEvent.TxHash().Hex(), err) } - method, err := ds.scrollChainABI.MethodById(txData[:methodIDLength]) - if err != nil { - return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) - } - values, err := method.Inputs.Unpack(txData[methodIDLength:]) + codec, err := encoding.CodecFromVersion(encoding.CodecVersion(args.Version)) if err != nil { - return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) - } - if method.Name == commitBatchMethodName { - args, err := newCommitBatchArgs(method, values) - if err != nil { - return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) - } - codecVersion := encoding.CodecVersion(args.Version) - codec, err := encoding.CodecFromVersion(codecVersion) - if err != nil { - return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) - } - switch args.Version { - case 0: - return NewCommitBatchDAV0(ds.db, codec, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) - case 1, 2: - return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - default: - return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) - } - } else if method.Name == commitBatchWithBlobProofMethodName { - args, err := newCommitBatchArgsFromCommitBatchWithProof(method, values) - if err != nil { - return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) - } - codecVersion := encoding.CodecVersion(args.Version) - codec, err := encoding.CodecFromVersion(codecVersion) - if err != nil { - return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) - } - switch args.Version { - case 3, 4: - return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - default: - return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) - } + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", args.Version, commitEvent.BatchIndex().Uint64(), err) } - return nil, fmt.Errorf("unknown method name: %s", method.Name) + switch codec.Version() { + case 0: + return NewCommitBatchDAV0(ds.db, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + case 1, 2, 3, 4: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, ds.l1Reader, ds.blobClient, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + default: + return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + } } diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 135a76d79518..2c4f07869da1 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -10,6 +10,7 @@ import ( "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) type CommitBatchDAV0 struct { @@ -25,19 +26,17 @@ type CommitBatchDAV0 struct { func NewCommitBatchDAV0(db ethdb.Database, codec encoding.Codec, - version uint8, - batchIndex uint64, + commitEvent *l1.CommitBatchEvent, parentBatchHeader []byte, chunks [][]byte, skippedL1MessageBitmap []byte, - l1BlockNumber uint64, ) (*CommitBatchDAV0, error) { decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { - return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - return NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, l1BlockNumber) + return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) } func NewCommitBatchDAV0WithChunks(db ethdb.Database, @@ -141,6 +140,7 @@ func getTotalMessagesPoppedFromChunks(decodedChunks []*encoding.DAChunkRawTx) in func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skippedBitmap []byte, totalL1MessagePopped int) ([]*types.L1MessageTx, error) { var txs []*types.L1MessageTx + decodedSkippedBitmap, err := encoding.DecodeBitmap(skippedBitmap, totalL1MessagePopped) if err != nil { return nil, fmt.Errorf("failed to decode skipped message bitmap: err: %w", err) diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 532b0f81abd6..0433479c950b 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -8,10 +8,9 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto/kzg4844" "github.com/scroll-tech/go-ethereum/ethdb" ) @@ -21,32 +20,34 @@ type CommitBatchDAV1 struct { } func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, - codec encoding.Codec, - l1Client *rollup_sync_service.L1Client, + l1Reader *l1.Reader, blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, + codec encoding.Codec, + commitEvent *l1.CommitBatchEvent, parentBatchHeader []byte, chunks [][]byte, skippedL1MessageBitmap []byte, ) (*CommitBatchDAV1, error) { decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { - return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", commitEvent.BatchIndex().Uint64(), err) } - versionedHash, err := l1Client.FetchTxBlobHash(vLog) + versionedHash, err := l1Reader.FetchTxBlobHash(commitEvent.TxHash(), commitEvent.BlockHash()) if err != nil { return nil, fmt.Errorf("failed to fetch blob hash, err: %w", err) } - blob, err := blobClient.GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, vLog.BlockNumber) + header, err := l1Reader.FetchBlockHeaderByNumber(commitEvent.BlockNumber()) + if err != nil { + return nil, fmt.Errorf("failed to get header by number, err: %w", err) + } + blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(ctx, versionedHash, header.Time) if err != nil { return nil, fmt.Errorf("failed to fetch blob from blob client, err: %w", err) } if blob == nil { - return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", batchIndex, versionedHash.String(), blobClient) + return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", commitEvent.BatchIndex().Uint64(), versionedHash.String(), blobClient) } // compute blob versioned hash and compare with one from tx @@ -69,7 +70,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("decodedChunks is nil after decoding") } - v0, err := NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, vLog.BlockNumber) + v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) if err != nil { return nil, err } diff --git a/rollup/da_syncer/data_source.go b/rollup/da_syncer/data_source.go index 7beab3baea32..048fec6bb3e2 100644 --- a/rollup/da_syncer/data_source.go +++ b/rollup/da_syncer/data_source.go @@ -8,7 +8,7 @@ import ( "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) type DataSource interface { @@ -19,21 +19,21 @@ type DataSource interface { type DataSourceFactory struct { config Config genesisConfig *params.ChainConfig - l1Client *rollup_sync_service.L1Client + l1Reader *l1.Reader blobClient blob_client.BlobClient db ethdb.Database } -func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.ChainConfig, config Config, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) *DataSourceFactory { +func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.ChainConfig, config Config, l1Reader *l1.Reader, blobClient blob_client.BlobClient, db ethdb.Database) *DataSourceFactory { return &DataSourceFactory{ config: config, genesisConfig: genesisConfig, - l1Client: l1Client, + l1Reader: l1Reader, blobClient: blobClient, db: db, } } func (ds *DataSourceFactory) OpenDataSource(ctx context.Context, l1height uint64) (DataSource, error) { - return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) + return da.NewCalldataBlobSource(ctx, l1height, ds.l1Reader, ds.blobClient, ds.db) } diff --git a/rollup/da_syncer/syncing_pipeline.go b/rollup/da_syncer/syncing_pipeline.go index 27eaf20cb38a..6ed84fe85186 100644 --- a/rollup/da_syncer/syncing_pipeline.go +++ b/rollup/da_syncer/syncing_pipeline.go @@ -15,8 +15,7 @@ import ( "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" - "github.com/scroll-tech/go-ethereum/rollup/sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) // Config is the configuration parameters of data availability syncing. @@ -42,20 +41,18 @@ type SyncingPipeline struct { daSyncer *DASyncer } -func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesisConfig *params.ChainConfig, db ethdb.Database, ethClient sync_service.EthClient, l1DeploymentBlock uint64, config Config) (*SyncingPipeline, error) { - scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() +func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesisConfig *params.ChainConfig, db ethdb.Database, ethClient l1.Client, l1DeploymentBlock uint64, config Config) (*SyncingPipeline, error) { + l1Reader, err := l1.NewReader(ctx, l1.Config{ + ScrollChainAddress: genesisConfig.Scroll.L1Config.ScrollChainAddress, + L1MessageQueueAddress: genesisConfig.Scroll.L1Config.L1MessageQueueAddress, + }, ethClient) if err != nil { - return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) - } - - l1Client, err := rollup_sync_service.NewL1Client(ctx, ethClient, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) - if err != nil { - return nil, err + return nil, fmt.Errorf("failed to initialize l1.Reader, err = %w", err) } blobClientList := blob_client.NewBlobClients() if config.BeaconNodeAPIEndpoint != "" { - beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint, l1Client) + beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint) if err != nil { log.Warn("failed to create BeaconNodeClient", "err", err) } else { @@ -72,7 +69,7 @@ func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesi return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") } - dataSourceFactory := NewDataSourceFactory(blockchain, genesisConfig, config, l1Client, blobClientList, db) + dataSourceFactory := NewDataSourceFactory(blockchain, genesisConfig, config, l1Reader, blobClientList, db) syncedL1Height := l1DeploymentBlock - 1 from := rawdb.ReadDASyncedL1BlockNumber(db) if from != nil { diff --git a/rollup/l1/abi.go b/rollup/l1/abi.go new file mode 100644 index 000000000000..c16123aa5e8b --- /dev/null +++ b/rollup/l1/abi.go @@ -0,0 +1,245 @@ +package l1 + +import ( + "fmt" + "math/big" + + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/accounts/abi/bind" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +var ( + // ScrollChainABI holds information about ScrollChain's context and available invokable methods. + ScrollChainABI *abi.ABI + // L1MessageQueueABIManual holds information about L1MessageQueue's context and available invokable methods. + L1MessageQueueABIManual *abi.ABI +) + +func init() { + ScrollChainABI, _ = ScrollChainMetaData.GetAbi() + L1MessageQueueABIManual, _ = L1MessageQueueMetaDataManual.GetAbi() +} + +// ScrollChainMetaData contains ABI of the ScrollChain contract. +var ScrollChainMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", +} + +// L1MessageQueueMetaDataManual contains all meta data concerning the L1MessageQueue contract. +var L1MessageQueueMetaDataManual = &bind.MetaData{ + ABI: "[{\"type\":\"constructor\",\"inputs\":[{\"name\":\"_messenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_scrollChain\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_enforcedTxGateway\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"appendCrossDomainMessage\",\"inputs\":[{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"appendEnforcedTransaction\",\"inputs\":[{\"name\":\"_sender\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"calculateIntrinsicGasFee\",\"inputs\":[{\"name\":\"_calldata\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"computeTransactionHash\",\"inputs\":[{\"name\":\"_sender\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_value\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"pure\"},{\"type\":\"function\",\"name\":\"dropCrossDomainMessage\",\"inputs\":[{\"name\":\"_index\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"enforcedTxGateway\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"estimateCrossDomainMessageFee\",\"inputs\":[{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"finalizePoppedCrossDomainMessage\",\"inputs\":[{\"name\":\"_newFinalizedQueueIndexPlusOne\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"gasOracle\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getCrossDomainMessage\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"initialize\",\"inputs\":[{\"name\":\"_messenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_scrollChain\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_enforcedTxGateway\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasOracle\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_maxGasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"isMessageDropped\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"isMessageSkipped\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"maxGasLimit\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"messageQueue\",\"inputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"messenger\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"nextCrossDomainMessageIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"nextUnfinalizedQueueIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"owner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"pendingQueueIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"popCrossDomainMessage\",\"inputs\":[{\"name\":\"_startIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_count\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_skippedBitmap\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"renounceOwnership\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"resetPoppedCrossDomainMessage\",\"inputs\":[{\"name\":\"_startIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"scrollChain\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"transferOwnership\",\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"updateGasOracle\",\"inputs\":[{\"name\":\"_newGasOracle\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"updateMaxGasLimit\",\"inputs\":[{\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"DequeueTransaction\",\"inputs\":[{\"name\":\"startIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"count\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"skippedBitmap\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"DropTransaction\",\"inputs\":[{\"name\":\"index\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"FinalizedDequeuedTransaction\",\"inputs\":[{\"name\":\"finalizedIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Initialized\",\"inputs\":[{\"name\":\"version\",\"type\":\"uint8\",\"indexed\":false,\"internalType\":\"uint8\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"OwnershipTransferred\",\"inputs\":[{\"name\":\"previousOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"QueueTransaction\",\"inputs\":[{\"name\":\"sender\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"target\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"value\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"queueIndex\",\"type\":\"uint64\",\"indexed\":false,\"internalType\":\"uint64\"},{\"name\":\"gasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ResetDequeuedTransaction\",\"inputs\":[{\"name\":\"startIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"UpdateGasOracle\",\"inputs\":[{\"name\":\"_oldGasOracle\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"_newGasOracle\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"UpdateMaxGasLimit\",\"inputs\":[{\"name\":\"_oldMaxGasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"ErrorZeroAddress\",\"inputs\":[]}]", +} + +const ( + // CommitEventType contains data of event of commit batch + CommitEventType int = iota + // RevertEventType contains data of event of revert batch + RevertEventType + // FinalizeEventType contains data of event of finalize batch + FinalizeEventType + + commitBatchMethodName = "commitBatch" + commitBatchWithBlobProofMethodName = "commitBatchWithBlobProof" + + // the length of method ID at the beginning of transaction data + methodIDLength = 4 +) + +// RollupEvent represents a single rollup event (commit, revert, finalize) +type RollupEvent interface { + Type() int + BatchIndex() *big.Int + BatchHash() common.Hash + TxHash() common.Hash + BlockHash() common.Hash + BlockNumber() uint64 +} + +type RollupEvents []RollupEvent + +// CommitBatchEventUnpacked represents a CommitBatch event raised by the ScrollChain contract. +type CommitBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash +} + +// CommitBatchEvent represents a CommitBatch event raised by the ScrollChain contract with additional fields. +type CommitBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (c *CommitBatchEvent) Type() int { + return CommitEventType +} + +func (c *CommitBatchEvent) BatchIndex() *big.Int { + return c.batchIndex +} + +func (c *CommitBatchEvent) BatchHash() common.Hash { + return c.batchHash +} + +func (c *CommitBatchEvent) TxHash() common.Hash { + return c.txHash +} + +func (c *CommitBatchEvent) BlockHash() common.Hash { + return c.blockHash +} + +func (c *CommitBatchEvent) BlockNumber() uint64 { + return c.blockNumber +} + +func (c *CommitBatchEvent) CompareTo(other *CommitBatchEvent) int { + return c.batchIndex.Cmp(other.batchIndex) +} + +type RevertBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash +} + +// RevertBatchEvent represents a RevertBatch event raised by the ScrollChain contract. +type RevertBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (r *RevertBatchEvent) BlockNumber() uint64 { + return r.blockNumber +} + +func (r *RevertBatchEvent) BlockHash() common.Hash { + return r.blockHash +} + +func (r *RevertBatchEvent) TxHash() common.Hash { + return r.txHash +} + +func (r *RevertBatchEvent) Type() int { + return RevertEventType +} + +func (r *RevertBatchEvent) BatchIndex() *big.Int { + return r.batchIndex +} + +func (r *RevertBatchEvent) BatchHash() common.Hash { + return r.batchHash +} + +type FinalizeBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash + StateRoot common.Hash + WithdrawRoot common.Hash +} + +// FinalizeBatchEvent represents a FinalizeBatch event raised by the ScrollChain contract. +type FinalizeBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + stateRoot common.Hash + withdrawRoot common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (f *FinalizeBatchEvent) TxHash() common.Hash { + return f.txHash +} + +func (f *FinalizeBatchEvent) BlockHash() common.Hash { + return f.blockHash +} + +func (f *FinalizeBatchEvent) BlockNumber() uint64 { + return f.blockNumber +} + +func (f *FinalizeBatchEvent) Type() int { + return FinalizeEventType +} + +func (f *FinalizeBatchEvent) BatchIndex() *big.Int { + return f.batchIndex +} + +func (f *FinalizeBatchEvent) BatchHash() common.Hash { + return f.batchHash +} + +func (f *FinalizeBatchEvent) StateRoot() common.Hash { + return f.stateRoot +} + +func (f *FinalizeBatchEvent) WithdrawRoot() common.Hash { + return f.withdrawRoot +} + +// UnpackLog unpacks a retrieved log into the provided output structure. +func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error { + if log.Topics[0] != c.Events[event].ID { + return fmt.Errorf("event signature mismatch") + } + if len(log.Data) > 0 { + if err := c.UnpackIntoInterface(out, event, log.Data); err != nil { + return err + } + } + var indexed abi.Arguments + for _, arg := range c.Events[event].Inputs { + if arg.Indexed { + indexed = append(indexed, arg) + } + } + return abi.ParseTopics(out, indexed, log.Topics[1:]) +} + +type CommitBatchArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte +} + +func newCommitBatchArgs(method *abi.Method, values []interface{}) (*CommitBatchArgs, error) { + var args CommitBatchArgs + err := method.Inputs.Copy(&args, values) + return &args, err +} + +func newCommitBatchArgsFromCommitBatchWithProof(method *abi.Method, values []interface{}) (*CommitBatchArgs, error) { + var args commitBatchWithBlobProofArgs + err := method.Inputs.Copy(&args, values) + if err != nil { + return nil, err + } + return &CommitBatchArgs{ + Version: args.Version, + ParentBatchHeader: args.ParentBatchHeader, + Chunks: args.Chunks, + SkippedL1MessageBitmap: args.SkippedL1MessageBitmap, + }, nil +} + +type commitBatchWithBlobProofArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte + BlobDataProof []byte +} diff --git a/rollup/l1/abi_test.go b/rollup/l1/abi_test.go new file mode 100644 index 000000000000..ab4c9d473a16 --- /dev/null +++ b/rollup/l1/abi_test.go @@ -0,0 +1,82 @@ +package l1 + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/crypto" +) + +func TestEventSignatures(t *testing.T) { + scrollChainABI, err := ScrollChainMetaData.GetAbi() + if err != nil { + t.Fatal("failed to get scroll chain abi", "err", err) + } + + assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), scrollChainABI.Events["CommitBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), scrollChainABI.Events["RevertBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), scrollChainABI.Events["FinalizeBatch"].ID) +} + +func TestUnpackLog(t *testing.T) { + scrollChainABI, err := ScrollChainMetaData.GetAbi() + require.NoError(t, err) + + mockBatchIndex := big.NewInt(123) + mockBatchHash := crypto.Keccak256Hash([]byte("mockBatch")) + mockStateRoot := crypto.Keccak256Hash([]byte("mockStateRoot")) + mockWithdrawRoot := crypto.Keccak256Hash([]byte("mockWithdrawRoot")) + + tests := []struct { + eventName string + mockLog types.Log + expected interface{} + out interface{} + }{ + { + "CommitBatch", + types.Log{ + Data: []byte{}, + Topics: []common.Hash{scrollChainABI.Events["CommitBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &CommitBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, + &CommitBatchEvent{}, + }, + { + "RevertBatch", + types.Log{ + Data: []byte{}, + Topics: []common.Hash{scrollChainABI.Events["RevertBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &RevertBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, + &RevertBatchEvent{}, + }, + { + "FinalizeBatch", + types.Log{ + Data: append(mockStateRoot.Bytes(), mockWithdrawRoot.Bytes()...), + Topics: []common.Hash{scrollChainABI.Events["FinalizeBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &FinalizeBatchEvent{ + batchIndex: mockBatchIndex, + batchHash: mockBatchHash, + stateRoot: mockStateRoot, + withdrawRoot: mockWithdrawRoot, + }, + &FinalizeBatchEvent{}, + }, + } + + for _, tt := range tests { + t.Run(tt.eventName, func(t *testing.T) { + err := UnpackLog(scrollChainABI, tt.out, tt.eventName, tt.mockLog) + assert.NoError(t, err) + assert.Equal(t, tt.expected, tt.out) + }) + } +} diff --git a/rollup/l1/l1msg_bindings.go b/rollup/l1/l1msg_bindings.go new file mode 100644 index 000000000000..679623818423 --- /dev/null +++ b/rollup/l1/l1msg_bindings.go @@ -0,0 +1,150 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +// generated using: +// forge flatten src/L1/rollup/L1MessageQueue.sol > flatten.sol +// go run github.com/scroll-tech/go-ethereum/cmd/abigen@develop --sol flatten.sol --pkg rollup --out ./L1MessageQueue.go --contract L1MessageQueue + +package l1 + +import ( + "math/big" + "strings" + + ethereum "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/accounts/abi/bind" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +// L1MessageQueueMetaData contains all meta data concerning the L1MessageQueue contract. +var L1MessageQueueMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"skippedBitmap\",\"type\":\"uint256\"}],\"name\":\"DequeueTransaction\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"queueIndex\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"QueueTransaction\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldGateway\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newGateway\",\"type\":\"address\"}],\"name\":\"UpdateEnforcedTxGateway\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldGasOracle\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newGasOracle\",\"type\":\"address\"}],\"name\":\"UpdateGasOracle\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_oldMaxGasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\"}],\"name\":\"UpdateMaxGasLimit\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"appendCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"appendEnforcedTransaction\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_calldata\",\"type\":\"bytes\"}],\"name\":\"calculateIntrinsicGasFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"computeTransactionHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enforcedTxGateway\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"gasOracle\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"}],\"name\":\"getCrossDomainMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_messenger\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_scrollChain\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_enforcedTxGateway\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_gasOracle\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_maxGasLimit\",\"type\":\"uint256\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxGasLimit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"messageQueue\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messenger\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextCrossDomainMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingQueueIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_count\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_skippedBitmap\",\"type\":\"uint256\"}],\"name\":\"popCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scrollChain\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newGateway\",\"type\":\"address\"}],\"name\":\"updateEnforcedTxGateway\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newGasOracle\",\"type\":\"address\"}],\"name\":\"updateGasOracle\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\"}],\"name\":\"updateMaxGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +// L1MessageQueueABI is the input ABI used to generate the binding from. +// Deprecated: Use L1MessageQueueMetaData.ABI instead. +var L1MessageQueueABI = L1MessageQueueMetaData.ABI + +// L1MessageQueueFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type L1MessageQueueFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// NewL1MessageQueueFilterer creates a new log filterer instance of L1MessageQueue, bound to a specific deployed contract. +func NewL1MessageQueueFilterer(address common.Address, filterer bind.ContractFilterer) (*L1MessageQueueFilterer, error) { + contract, err := bindL1MessageQueue(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &L1MessageQueueFilterer{contract: contract}, nil +} + +// bindL1MessageQueue binds a generic wrapper to an already deployed contract. +func bindL1MessageQueue(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(L1MessageQueueABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// L1MessageQueueQueueTransactionIterator is returned from FilterQueueTransaction and is used to iterate over the raw logs and unpacked data for QueueTransaction events raised by the L1MessageQueue contract. +type L1MessageQueueQueueTransactionIterator struct { + Event *L1MessageQueueQueueTransaction // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *L1MessageQueueQueueTransactionIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(L1MessageQueueQueueTransaction) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(L1MessageQueueQueueTransaction) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *L1MessageQueueQueueTransactionIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *L1MessageQueueQueueTransactionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// L1MessageQueueQueueTransaction represents a QueueTransaction event raised by the L1MessageQueue contract. +type L1MessageQueueQueueTransaction struct { + Sender common.Address + Target common.Address + Value *big.Int + QueueIndex uint64 + GasLimit *big.Int + Data []byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterQueueTransaction is a free log retrieval operation binding the contract event 0x69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e. +// +// Solidity: event QueueTransaction(address indexed sender, address indexed target, uint256 value, uint64 queueIndex, uint256 gasLimit, bytes data) +func (_L1MessageQueue *L1MessageQueueFilterer) FilterQueueTransaction(opts *bind.FilterOpts, sender []common.Address, target []common.Address) (*L1MessageQueueQueueTransactionIterator, error) { + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var targetRule []interface{} + for _, targetItem := range target { + targetRule = append(targetRule, targetItem) + } + + logs, sub, err := _L1MessageQueue.contract.FilterLogs(opts, "QueueTransaction", senderRule, targetRule) + if err != nil { + return nil, err + } + return &L1MessageQueueQueueTransactionIterator{contract: _L1MessageQueue.contract, event: "QueueTransaction", logs: logs, sub: sub}, nil +} diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go new file mode 100644 index 000000000000..cc06296b657e --- /dev/null +++ b/rollup/l1/reader.go @@ -0,0 +1,381 @@ +package l1 + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rpc" +) + +const ( + commitBatchEventName = "CommitBatch" + revertBatchEventName = "RevertBatch" + finalizeBatchEventName = "FinalizeBatch" + nextUnfinalizedQueueIndex = "nextUnfinalizedQueueIndex" + lastFinalizedBatchIndex = "lastFinalizedBatchIndex" + + defaultL1MsgFetchBlockRange = 500 + defaultRollupEventsFetchBlockRange = 100 +) + +type Reader struct { + ctx context.Context + config Config + client Client + + scrollChainABI *abi.ABI + l1MessageQueueABI *abi.ABI + l1CommitBatchEventSignature common.Hash + l1RevertBatchEventSignature common.Hash + l1FinalizeBatchEventSignature common.Hash +} + +// Config is the configuration parameters of data availability syncing. +type Config struct { + ScrollChainAddress common.Address // address of ScrollChain contract + L1MessageQueueAddress common.Address // address of L1MessageQueue contract +} + +// NewReader initializes a new Reader instance +func NewReader(ctx context.Context, config Config, l1Client Client) (*Reader, error) { + if config.ScrollChainAddress == (common.Address{}) { + return nil, errors.New("must pass non-zero scrollChainAddress to L1Client") + } + + if config.L1MessageQueueAddress == (common.Address{}) { + return nil, errors.New("must pass non-zero l1MessageQueueAddress to L1Client") + } + + reader := Reader{ + ctx: ctx, + config: config, + client: l1Client, + + scrollChainABI: ScrollChainABI, + l1MessageQueueABI: L1MessageQueueABIManual, + l1CommitBatchEventSignature: ScrollChainABI.Events[commitBatchEventName].ID, + l1RevertBatchEventSignature: ScrollChainABI.Events[revertBatchEventName].ID, + l1FinalizeBatchEventSignature: ScrollChainABI.Events[finalizeBatchEventName].ID, + } + + return &reader, nil +} + +func (r *Reader) FinalizedL1MessageQueueIndex(blockNumber uint64) (uint64, error) { + data, err := r.l1MessageQueueABI.Pack(nextUnfinalizedQueueIndex) + if err != nil { + return 0, fmt.Errorf("failed to pack %s: %w", nextUnfinalizedQueueIndex, err) + } + + result, err := r.client.CallContract(r.ctx, ethereum.CallMsg{ + To: &r.config.L1MessageQueueAddress, + Data: data, + }, new(big.Int).SetUint64(blockNumber)) + if err != nil { + return 0, fmt.Errorf("failed to call %s: %w", nextUnfinalizedQueueIndex, err) + } + + var parsedResult *big.Int + if err = r.l1MessageQueueABI.UnpackIntoInterface(&parsedResult, nextUnfinalizedQueueIndex, result); err != nil { + return 0, fmt.Errorf("failed to unpack result: %w", err) + } + + next := parsedResult.Uint64() + if next == 0 { + return 0, nil + } + + return next - 1, nil +} + +func (r *Reader) LatestFinalizedBatch(blockNumber uint64) (uint64, error) { + data, err := r.scrollChainABI.Pack(lastFinalizedBatchIndex) + if err != nil { + return 0, fmt.Errorf("failed to pack %s: %w", lastFinalizedBatchIndex, err) + } + + result, err := r.client.CallContract(r.ctx, ethereum.CallMsg{ + To: &r.config.ScrollChainAddress, + Data: data, + }, new(big.Int).SetUint64(blockNumber)) + if err != nil { + return 0, fmt.Errorf("failed to call %s: %w", lastFinalizedBatchIndex, err) + } + + var parsedResult *big.Int + if err = r.scrollChainABI.UnpackIntoInterface(&parsedResult, lastFinalizedBatchIndex, result); err != nil { + return 0, fmt.Errorf("failed to unpack result: %w", err) + } + + return parsedResult.Uint64(), nil +} + +// GetLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. +func (r *Reader) GetLatestFinalizedBlockNumber() (uint64, error) { + header, err := r.client.HeaderByNumber(r.ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) + if err != nil { + return 0, err + } + if !header.Number.IsInt64() { + return 0, fmt.Errorf("received unexpected block number in L1Client: %v", header.Number) + } + return header.Number.Uint64(), nil +} + +// FetchBlockHeaderByNumber fetches the block header by number +func (r *Reader) FetchBlockHeaderByNumber(blockNumber uint64) (*types.Header, error) { + return r.client.HeaderByNumber(r.ctx, big.NewInt(int64(blockNumber))) +} + +// FetchTxData fetches tx data corresponding to given event log +func (r *Reader) FetchTxData(txHash, blockHash common.Hash) ([]byte, error) { + tx, err := r.fetchTx(txHash, blockHash) + if err != nil { + return nil, err + } + return tx.Data(), nil +} + +// FetchTxBlobHash fetches tx blob hash corresponding to given event log +func (r *Reader) FetchTxBlobHash(txHash, blockHash common.Hash) (common.Hash, error) { + tx, err := r.fetchTx(txHash, blockHash) + if err != nil { + return common.Hash{}, err + } + blobHashes := tx.BlobHashes() + if len(blobHashes) == 0 { + return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", txHash.Hex()) + } + return blobHashes[0], nil +} + +// FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. +func (r *Reader) FetchRollupEventsInRange(from, to uint64) (RollupEvents, error) { + log.Trace("L1Client fetchRollupEventsInRange", "fromBlock", from, "toBlock", to) + var logs []types.Log + + err := queryInBatches(r.ctx, from, to, defaultRollupEventsFetchBlockRange, func(from, to uint64) (bool, error) { + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(from)), // inclusive + ToBlock: big.NewInt(int64(to)), // inclusive + Addresses: []common.Address{ + r.config.ScrollChainAddress, + }, + Topics: make([][]common.Hash, 1), + } + query.Topics[0] = make([]common.Hash, 3) + query.Topics[0][0] = r.l1CommitBatchEventSignature + query.Topics[0][1] = r.l1RevertBatchEventSignature + query.Topics[0][2] = r.l1FinalizeBatchEventSignature + + logsBatch, err := r.client.FilterLogs(r.ctx, query) + if err != nil { + return false, fmt.Errorf("failed to filter logs, err: %w", err) + } + logs = append(logs, logsBatch...) + return true, nil + }) + if err != nil { + return nil, err + } + return r.processLogsToRollupEvents(logs) +} + +// FetchRollupEventsInRangeWithCallback retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. +func (r *Reader) FetchRollupEventsInRangeWithCallback(from, to uint64, callback func(event RollupEvent) bool) error { + log.Trace("L1Client fetchRollupEventsInRange", "fromBlock", from, "toBlock", to) + + err := queryInBatches(r.ctx, from, to, defaultRollupEventsFetchBlockRange, func(from, to uint64) (bool, error) { + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(from)), // inclusive + ToBlock: big.NewInt(int64(to)), // inclusive + Addresses: []common.Address{ + r.config.ScrollChainAddress, + }, + Topics: make([][]common.Hash, 1), + } + query.Topics[0] = make([]common.Hash, 3) + query.Topics[0][0] = r.l1CommitBatchEventSignature + query.Topics[0][1] = r.l1RevertBatchEventSignature + query.Topics[0][2] = r.l1FinalizeBatchEventSignature + + logsBatch, err := r.client.FilterLogs(r.ctx, query) + if err != nil { + return false, fmt.Errorf("failed to filter logs, err: %w", err) + } + + rollupEvents, err := r.processLogsToRollupEvents(logsBatch) + if err != nil { + return false, fmt.Errorf("failed to process logs to rollup events, err: %w", err) + } + + for _, event := range rollupEvents { + if !callback(event) { + return false, nil + } + } + + return true, nil + }) + if err != nil { + return err + } + + return nil +} + +func (r *Reader) processLogsToRollupEvents(logs []types.Log) (RollupEvents, error) { + var rollupEvents RollupEvents + var rollupEvent RollupEvent + var err error + + for _, vLog := range logs { + switch vLog.Topics[0] { + case r.l1CommitBatchEventSignature: + event := &CommitBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, commitBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) + } + log.Trace("found new CommitBatch event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &CommitBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + case r.l1RevertBatchEventSignature: + event := &RevertBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, revertBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) + } + log.Trace("found new RevertBatchType event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &RevertBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + case r.l1FinalizeBatchEventSignature: + event := &FinalizeBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, finalizeBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) + } + log.Trace("found new FinalizeBatchType event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &FinalizeBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + stateRoot: event.StateRoot, + withdrawRoot: event.WithdrawRoot, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + default: + return nil, fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + } + + rollupEvents = append(rollupEvents, rollupEvent) + } + return rollupEvents, nil +} + +func queryInBatches(ctx context.Context, fromBlock, toBlock uint64, batchSize uint64, queryFunc func(from, to uint64) (bool, error)) error { + for from := fromBlock; from <= toBlock; from += batchSize { + // check if context is done and return if it is + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + to := from + batchSize - 1 + if to > toBlock { + to = toBlock + } + cont, err := queryFunc(from, to) + if err != nil { + return fmt.Errorf("error querying blocks %d to %d: %w", from, to, err) + } + if !cont { + break + } + } + return nil +} + +// fetchTx fetches tx corresponding to given event log +func (r *Reader) fetchTx(txHash, blockHash common.Hash) (*types.Transaction, error) { + tx, _, err := r.client.TransactionByHash(r.ctx, txHash) + if err != nil { + log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", + "tx hash", txHash.Hex(), "block hash", blockHash.Hex(), "err", err) + block, err := r.client.BlockByHash(r.ctx, blockHash) + if err != nil { + return nil, fmt.Errorf("failed to get block by hash, block hash: %v, err: %w", blockHash.Hex(), err) + } + + found := false + for _, txInBlock := range block.Transactions() { + if txInBlock.Hash() == txHash { + tx = txInBlock + found = true + break + } + } + if !found { + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block hash: %v", txHash.Hex(), blockHash.Hex()) + } + } + + return tx, nil +} + +func (r *Reader) FetchCommitTxData(commitEvent *CommitBatchEvent) (*CommitBatchArgs, error) { + tx, err := r.fetchTx(commitEvent.TxHash(), commitEvent.BlockHash()) + if err != nil { + return nil, err + } + txData := tx.Data() + + if len(txData) < methodIDLength { + return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + } + + method, err := r.scrollChainABI.MethodById(txData[:methodIDLength]) + if err != nil { + return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) + } + values, err := method.Inputs.Unpack(txData[methodIDLength:]) + if err != nil { + return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) + } + + var args *CommitBatchArgs + if method.Name == commitBatchMethodName { + args, err = newCommitBatchArgs(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args %s, values: %+v, err: %w", commitBatchMethodName, values, err) + } + } else if method.Name == commitBatchWithBlobProofMethodName { + args, err = newCommitBatchArgsFromCommitBatchWithProof(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args %s, values: %+v, err: %w", commitBatchWithBlobProofMethodName, values, err) + } + } else { + return nil, fmt.Errorf("unknown method name for commit transaction: %s", method.Name) + } + + return args, nil +} diff --git a/rollup/l1/reader_test.go b/rollup/l1/reader_test.go new file mode 100644 index 000000000000..5f4a2c95817a --- /dev/null +++ b/rollup/l1/reader_test.go @@ -0,0 +1,125 @@ +package l1 + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestQueryInBatches(t *testing.T) { + tests := []struct { + name string + fromBlock uint64 + toBlock uint64 + batchSize uint64 + queryFunc func(from, to uint64) (bool, error) + expectErr bool + expectedErr string + expectedCalls []struct { + from uint64 + to uint64 + } + }{ + { + name: "Successful query in single batch", + fromBlock: 1, + toBlock: 10, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + { + name: "Successful query in multiple batches", + fromBlock: 1, + toBlock: 80, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + {from: 11, to: 20}, + {from: 21, to: 30}, + {from: 31, to: 40}, + {from: 41, to: 50}, + {from: 51, to: 60}, + {from: 61, to: 70}, + {from: 71, to: 80}, + }, + }, + { + name: "Query function returns error", + fromBlock: 1, + toBlock: 10, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return false, errors.New("query error") + }, + expectErr: true, + expectedErr: "error querying blocks 1 to 10: query error", + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + { + name: "Query function returns false to stop", + fromBlock: 1, + toBlock: 20, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + if from == 1 { + return false, nil + } + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var calls []struct { + from uint64 + to uint64 + } + queryFunc := func(from, to uint64) (bool, error) { + calls = append(calls, struct { + from uint64 + to uint64 + }{from, to}) + return tt.queryFunc(from, to) + } + err := queryInBatches(context.Background(), tt.fromBlock, tt.toBlock, tt.batchSize, queryFunc) + if tt.expectErr { + require.Error(t, err) + require.EqualError(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.expectedCalls, calls) + }) + } +} diff --git a/rollup/l1/types.go b/rollup/l1/types.go new file mode 100644 index 000000000000..8c030815ec28 --- /dev/null +++ b/rollup/l1/types.go @@ -0,0 +1,22 @@ +package l1 + +import ( + "context" + "math/big" + + "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +type Client interface { + BlockNumber(ctx context.Context) (uint64, error) + ChainID(ctx context.Context) (*big.Int, error) + FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) + SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) + TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, isPending bool, err error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) +} From e51182d752621e7cdb40d08d4e49471aac519afd Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Wed, 11 Dec 2024 10:51:08 +0800 Subject: [PATCH 10/17] fix tests and linter errors --- rollup/da_syncer/da/calldata_blob_source.go | 41 +++++--------- rollup/l1/abi_test.go | 59 ++++++++++----------- rollup/l1/reader.go | 1 - 3 files changed, 41 insertions(+), 60 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index a7489c72c838..30ac5ca7f145 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -8,7 +8,6 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/accounts/abi" - "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" @@ -16,15 +15,7 @@ import ( ) const ( - callDataBlobSourceFetchBlockRange uint64 = 500 - commitBatchEventName = "CommitBatch" - revertBatchEventName = "RevertBatch" - finalizeBatchEventName = "FinalizeBatch" - commitBatchMethodName = "commitBatch" - commitBatchWithBlobProofMethodName = "commitBatchWithBlobProof" - - // the length of method ID at the beginning of transaction data - methodIDLength = 4 + callDataBlobSourceFetchBlockRange uint64 = 500 ) var ( @@ -32,15 +23,12 @@ var ( ) type CalldataBlobSource struct { - ctx context.Context - l1Reader *l1.Reader - blobClient blob_client.BlobClient - l1height uint64 - scrollChainABI *abi.ABI - l1CommitBatchEventSignature common.Hash - l1RevertBatchEventSignature common.Hash - l1FinalizeBatchEventSignature common.Hash - db ethdb.Database + ctx context.Context + l1Reader *l1.Reader + blobClient blob_client.BlobClient + l1height uint64 + scrollChainABI *abi.ABI + db ethdb.Database l1Finalized uint64 } @@ -51,15 +39,12 @@ func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Re return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } return &CalldataBlobSource{ - ctx: ctx, - l1Reader: l1Reader, - blobClient: blobClient, - l1height: l1height, - scrollChainABI: scrollChainABI, - l1CommitBatchEventSignature: scrollChainABI.Events[commitBatchEventName].ID, - l1RevertBatchEventSignature: scrollChainABI.Events[revertBatchEventName].ID, - l1FinalizeBatchEventSignature: scrollChainABI.Events[finalizeBatchEventName].ID, - db: db, + ctx: ctx, + l1Reader: l1Reader, + blobClient: blobClient, + l1height: l1height, + scrollChainABI: scrollChainABI, + db: db, }, nil } diff --git a/rollup/l1/abi_test.go b/rollup/l1/abi_test.go index ab4c9d473a16..e50e8ccaa269 100644 --- a/rollup/l1/abi_test.go +++ b/rollup/l1/abi_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" @@ -13,20 +12,12 @@ import ( ) func TestEventSignatures(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - if err != nil { - t.Fatal("failed to get scroll chain abi", "err", err) - } - - assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), scrollChainABI.Events["CommitBatch"].ID) - assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), scrollChainABI.Events["RevertBatch"].ID) - assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), scrollChainABI.Events["FinalizeBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), ScrollChainABI.Events["CommitBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), ScrollChainABI.Events["RevertBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), ScrollChainABI.Events["FinalizeBatch"].ID) } func TestUnpackLog(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - mockBatchIndex := big.NewInt(123) mockBatchHash := crypto.Keccak256Hash([]byte("mockBatch")) mockStateRoot := crypto.Keccak256Hash([]byte("mockStateRoot")) @@ -39,42 +30,48 @@ func TestUnpackLog(t *testing.T) { out interface{} }{ { - "CommitBatch", + commitBatchEventName, types.Log{ - Data: []byte{}, - Topics: []common.Hash{scrollChainABI.Events["CommitBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + Data: nil, + Topics: []common.Hash{ScrollChainABI.Events[commitBatchEventName].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, }, - &CommitBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, - &CommitBatchEvent{}, + &CommitBatchEventUnpacked{ + BatchIndex: mockBatchIndex, + BatchHash: mockBatchHash, + }, + &CommitBatchEventUnpacked{}, }, { - "RevertBatch", + revertBatchEventName, types.Log{ - Data: []byte{}, - Topics: []common.Hash{scrollChainABI.Events["RevertBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + Data: nil, + Topics: []common.Hash{ScrollChainABI.Events[revertBatchEventName].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &RevertBatchEventUnpacked{ + BatchIndex: mockBatchIndex, + BatchHash: mockBatchHash, }, - &RevertBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, - &RevertBatchEvent{}, + &RevertBatchEventUnpacked{}, }, { - "FinalizeBatch", + finalizeBatchEventName, types.Log{ Data: append(mockStateRoot.Bytes(), mockWithdrawRoot.Bytes()...), - Topics: []common.Hash{scrollChainABI.Events["FinalizeBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + Topics: []common.Hash{ScrollChainABI.Events[finalizeBatchEventName].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, }, - &FinalizeBatchEvent{ - batchIndex: mockBatchIndex, - batchHash: mockBatchHash, - stateRoot: mockStateRoot, - withdrawRoot: mockWithdrawRoot, + &FinalizeBatchEventUnpacked{ + BatchIndex: mockBatchIndex, + BatchHash: mockBatchHash, + StateRoot: mockStateRoot, + WithdrawRoot: mockWithdrawRoot, }, - &FinalizeBatchEvent{}, + &FinalizeBatchEventUnpacked{}, }, } for _, tt := range tests { t.Run(tt.eventName, func(t *testing.T) { - err := UnpackLog(scrollChainABI, tt.out, tt.eventName, tt.mockLog) + err := UnpackLog(ScrollChainABI, tt.out, tt.eventName, tt.mockLog) assert.NoError(t, err) assert.Equal(t, tt.expected, tt.out) }) diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go index cc06296b657e..eddc77d71350 100644 --- a/rollup/l1/reader.go +++ b/rollup/l1/reader.go @@ -21,7 +21,6 @@ const ( nextUnfinalizedQueueIndex = "nextUnfinalizedQueueIndex" lastFinalizedBatchIndex = "lastFinalizedBatchIndex" - defaultL1MsgFetchBlockRange = 500 defaultRollupEventsFetchBlockRange = 100 ) From 4e6f759c79eb5aaf3277fba23d684c86380c41c0 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:23:32 +0700 Subject: [PATCH 11/17] address review comments --- rollup/da_syncer/da/calldata_blob_source.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 30ac5ca7f145..5b665aa0160f 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -26,7 +26,7 @@ type CalldataBlobSource struct { ctx context.Context l1Reader *l1.Reader blobClient blob_client.BlobClient - l1height uint64 + l1Height uint64 scrollChainABI *abi.ABI db ethdb.Database @@ -42,7 +42,7 @@ func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Re ctx: ctx, l1Reader: l1Reader, blobClient: blobClient, - l1height: l1height, + l1Height: l1height, scrollChainABI: scrollChainABI, db: db, }, nil @@ -50,7 +50,7 @@ func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Re func (ds *CalldataBlobSource) NextData() (Entries, error) { var err error - to := ds.l1height + callDataBlobSourceFetchBlockRange + to := ds.l1Height + callDataBlobSourceFetchBlockRange // If there's not enough finalized blocks to request up to, we need to query finalized block number. // Otherwise, we know that there's more finalized blocks than we want to request up to @@ -64,25 +64,25 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { to = min(to, ds.l1Finalized) } - if ds.l1height > to { + if ds.l1Height > to { return nil, ErrSourceExhausted } - rollupEvents, err := ds.l1Reader.FetchRollupEventsInRange(ds.l1height, to) + rollupEvents, err := ds.l1Reader.FetchRollupEventsInRange(ds.l1Height, to) if err != nil { - return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get rollup events, l1height: %d, error: %v", ds.l1height, err)) + return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get rollup events, l1Height: %d, error: %v", ds.l1Height, err)) } da, err := ds.processRollupEventsToDA(rollupEvents) if err != nil { return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process rollup events to DA, error: %v", err)) } - ds.l1height = to + 1 + ds.l1Height = to + 1 return da, nil } func (ds *CalldataBlobSource) L1Height() uint64 { - return ds.l1height + return ds.l1Height } func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { From ab3e8732f99126c143050fd132387b44a5a43144 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 26 Dec 2024 15:00:45 +0800 Subject: [PATCH 12/17] refactor rollup sync service / verifier to use CalldataBlobSource to retrieve data from L1 --- rollup/da_syncer/da/calldata_blob_source.go | 24 +- rollup/da_syncer/da/commitV0.go | 27 +- rollup/da_syncer/da/commitV1.go | 21 +- rollup/da_syncer/da/da.go | 8 + rollup/da_syncer/da/finalize.go | 20 +- rollup/da_syncer/da/revert.go | 20 +- rollup/l1/reader.go | 13 +- .../rollup_sync_service.go | 340 ++++++------------ 8 files changed, 210 insertions(+), 263 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 5b665aa0160f..bf4a2a24ef2c 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -81,10 +81,18 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { return da, nil } +func (ds *CalldataBlobSource) SetL1Height(l1Height uint64) { + ds.l1Height = l1Height +} + func (ds *CalldataBlobSource) L1Height() uint64 { return ds.l1Height } +func (ds *CalldataBlobSource) L1Finalized() uint64 { + return ds.l1Finalized +} + func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { var entries Entries var entry Entry @@ -102,10 +110,22 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven } case l1.RevertEventType: - entry = NewRevertBatch(rollupEvent.BatchIndex().Uint64()) + revertEvent, ok := rollupEvent.(*l1.RevertBatchEvent) + // this should never happen because we just check event type + if !ok { + return nil, fmt.Errorf("unexpected type of rollup event: %T", rollupEvent) + } + + entry = NewRevertBatch(revertEvent) case l1.FinalizeEventType: - entry = NewFinalizeBatch(rollupEvent.BatchIndex().Uint64()) + finalizeEvent, ok := rollupEvent.(*l1.FinalizeBatchEvent) + // this should never happen because we just check event type + if !ok { + return nil, fmt.Errorf("unexpected type of rollup event: %T", rollupEvent) + } + + entry = NewFinalizeBatch(finalizeEvent) default: return nil, fmt.Errorf("unknown rollup event, type: %v", rollupEvent.Type()) diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 2c4f07869da1..960151e6cda4 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -6,6 +6,7 @@ import ( "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" @@ -21,7 +22,7 @@ type CommitBatchDAV0 struct { chunks []*encoding.DAChunkRawTx l1Txs []*types.L1MessageTx - l1BlockNumber uint64 + event *l1.CommitBatchEvent } func NewCommitBatchDAV0(db ethdb.Database, @@ -36,7 +37,7 @@ func NewCommitBatchDAV0(db ethdb.Database, return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) + return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) } func NewCommitBatchDAV0WithChunks(db ethdb.Database, @@ -45,7 +46,7 @@ func NewCommitBatchDAV0WithChunks(db ethdb.Database, parentBatchHeader []byte, decodedChunks []*encoding.DAChunkRawTx, skippedL1MessageBitmap []byte, - l1BlockNumber uint64, + event *l1.CommitBatchEvent, ) (*CommitBatchDAV0, error) { parentTotalL1MessagePopped := getBatchTotalL1MessagePopped(parentBatchHeader) l1Txs, err := getL1Messages(db, parentTotalL1MessagePopped, skippedL1MessageBitmap, getTotalMessagesPoppedFromChunks(decodedChunks)) @@ -60,7 +61,7 @@ func NewCommitBatchDAV0WithChunks(db ethdb.Database, skippedL1MessageBitmap: skippedL1MessageBitmap, chunks: decodedChunks, l1Txs: l1Txs, - l1BlockNumber: l1BlockNumber, + event: event, }, nil } @@ -70,12 +71,28 @@ func NewCommitBatchDAV0Empty() *CommitBatchDAV0 { } } +func (c *CommitBatchDAV0) Version() uint8 { + return c.version +} + +func (c *CommitBatchDAV0) Chunks() []*encoding.DAChunkRawTx { + return c.chunks +} + +func (c *CommitBatchDAV0) BlobVersionedHashes() []common.Hash { + return nil +} + func (c *CommitBatchDAV0) Type() Type { return CommitBatchV0Type } func (c *CommitBatchDAV0) L1BlockNumber() uint64 { - return c.l1BlockNumber + return c.event.BlockNumber() +} + +func (c *CommitBatchDAV0) Event() l1.RollupEvent { + return c.event } func (c *CommitBatchDAV0) BatchIndex() uint64 { diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 0433479c950b..29eb065ed3e8 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -17,6 +17,8 @@ import ( type CommitBatchDAV1 struct { *CommitBatchDAV0 + + versionedHashes []common.Hash } func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, @@ -33,11 +35,17 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", commitEvent.BatchIndex().Uint64(), err) } - versionedHash, err := l1Reader.FetchTxBlobHash(commitEvent.TxHash(), commitEvent.BlockHash()) + versionedHashes, err := l1Reader.FetchTxBlobHashes(commitEvent.TxHash(), commitEvent.BlockHash()) if err != nil { return nil, fmt.Errorf("failed to fetch blob hash, err: %w", err) } + // with CommitBatchDAV1 we expect only one versioned hash as we commit only one blob per batch submission + if len(versionedHashes) != 1 { + return nil, fmt.Errorf("unexpected number of versioned hashes: %d", len(versionedHashes)) + } + versionedHash := versionedHashes[0] + header, err := l1Reader.FetchBlockHeaderByNumber(commitEvent.BlockNumber()) if err != nil { return nil, fmt.Errorf("failed to get header by number, err: %w", err) @@ -70,14 +78,21 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("decodedChunks is nil after decoding") } - v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) + v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) if err != nil { return nil, err } - return &CommitBatchDAV1{v0}, nil + return &CommitBatchDAV1{ + CommitBatchDAV0: v0, + versionedHashes: versionedHashes, + }, nil } func (c *CommitBatchDAV1) Type() Type { return CommitBatchWithBlobType } + +func (c *CommitBatchDAV1) BlobVersionedHashes() []common.Hash { + return c.versionedHashes +} diff --git a/rollup/da_syncer/da/da.go b/rollup/da_syncer/da/da.go index 1ad618d7ba3d..2773da2951be 100644 --- a/rollup/da_syncer/da/da.go +++ b/rollup/da_syncer/da/da.go @@ -3,7 +3,11 @@ package da import ( "math/big" + "github.com/scroll-tech/da-codec/encoding" + + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) type Type int @@ -25,11 +29,15 @@ type Entry interface { BatchIndex() uint64 L1BlockNumber() uint64 CompareTo(Entry) int + Event() l1.RollupEvent } type EntryWithBlocks interface { Entry Blocks() []*PartialBlock + Version() uint8 + Chunks() []*encoding.DAChunkRawTx + BlobVersionedHashes() []common.Hash } type Entries []Entry diff --git a/rollup/da_syncer/da/finalize.go b/rollup/da_syncer/da/finalize.go index 14d6c2a644cb..eab805d52482 100644 --- a/rollup/da_syncer/da/finalize.go +++ b/rollup/da_syncer/da/finalize.go @@ -1,14 +1,16 @@ package da -type FinalizeBatch struct { - batchIndex uint64 +import ( + "github.com/scroll-tech/go-ethereum/rollup/l1" +) - l1BlockNumber uint64 +type FinalizeBatch struct { + event *l1.FinalizeBatchEvent } -func NewFinalizeBatch(batchIndex uint64) *FinalizeBatch { +func NewFinalizeBatch(event *l1.FinalizeBatchEvent) *FinalizeBatch { return &FinalizeBatch{ - batchIndex: batchIndex, + event: event, } } @@ -17,11 +19,15 @@ func (f *FinalizeBatch) Type() Type { } func (f *FinalizeBatch) L1BlockNumber() uint64 { - return f.l1BlockNumber + return f.event.BlockNumber() } func (f *FinalizeBatch) BatchIndex() uint64 { - return f.batchIndex + return f.event.BatchIndex().Uint64() +} + +func (f *FinalizeBatch) Event() l1.RollupEvent { + return f.event } func (f *FinalizeBatch) CompareTo(other Entry) int { diff --git a/rollup/da_syncer/da/revert.go b/rollup/da_syncer/da/revert.go index d84f22ebaa7b..f8120fd3f150 100644 --- a/rollup/da_syncer/da/revert.go +++ b/rollup/da_syncer/da/revert.go @@ -1,14 +1,16 @@ package da -type RevertBatch struct { - batchIndex uint64 +import ( + "github.com/scroll-tech/go-ethereum/rollup/l1" +) - l1BlockNumber uint64 +type RevertBatch struct { + event *l1.RevertBatchEvent } -func NewRevertBatch(batchIndex uint64) *RevertBatch { +func NewRevertBatch(event *l1.RevertBatchEvent) *RevertBatch { return &RevertBatch{ - batchIndex: batchIndex, + event: event, } } @@ -17,10 +19,14 @@ func (r *RevertBatch) Type() Type { } func (r *RevertBatch) L1BlockNumber() uint64 { - return r.l1BlockNumber + return r.event.BlockNumber() } func (r *RevertBatch) BatchIndex() uint64 { - return r.batchIndex + return r.event.BatchIndex().Uint64() +} + +func (r *RevertBatch) Event() l1.RollupEvent { + return r.event } func (r *RevertBatch) CompareTo(other Entry) int { diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go index eddc77d71350..2902b48caefa 100644 --- a/rollup/l1/reader.go +++ b/rollup/l1/reader.go @@ -139,20 +139,23 @@ func (r *Reader) FetchTxData(txHash, blockHash common.Hash) ([]byte, error) { if err != nil { return nil, err } + return tx.Data(), nil } -// FetchTxBlobHash fetches tx blob hash corresponding to given event log -func (r *Reader) FetchTxBlobHash(txHash, blockHash common.Hash) (common.Hash, error) { +// FetchTxBlobHashes fetches tx blob hash corresponding to given event log +func (r *Reader) FetchTxBlobHashes(txHash, blockHash common.Hash) ([]common.Hash, error) { tx, err := r.fetchTx(txHash, blockHash) if err != nil { - return common.Hash{}, err + return nil, fmt.Errorf("failed to fetch tx, tx hash: %v, block hash: %v, err: %w", txHash.Hex(), blockHash.Hex(), err) } + blobHashes := tx.BlobHashes() if len(blobHashes) == 0 { - return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", txHash.Hex()) + return nil, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", txHash.Hex()) } - return blobHashes[0], nil + + return blobHashes, nil } // FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index bbb2b4940393..58ba26bf21e4 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -3,25 +3,23 @@ package rollup_sync_service import ( "context" "encoding/json" + "errors" "fmt" "os" - "reflect" "sync" "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/go-ethereum/accounts/abi" - "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/rollup/rcfg" - "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rollup/withdrawtrie" ) @@ -46,41 +44,21 @@ const ( // RollupSyncService collects ScrollChain batch commit/revert/finalize events and stores metadata into db. type RollupSyncService struct { - ctx context.Context - cancel context.CancelFunc - client *L1Client - db ethdb.Database - latestProcessedBlock uint64 - scrollChainABI *abi.ABI - l1CommitBatchEventSignature common.Hash - l1RevertBatchEventSignature common.Hash - l1FinalizeBatchEventSignature common.Hash - bc *core.BlockChain - stack *node.Node - stateMu sync.Mutex + ctx context.Context + cancel context.CancelFunc + db ethdb.Database + bc *core.BlockChain + stack *node.Node + stateMu sync.Mutex + + callDataBlobSource *da.CalldataBlobSource } -func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig, db ethdb.Database, l1Client sync_service.EthClient, bc *core.BlockChain, stack *node.Node) (*RollupSyncService, error) { - // terminate if the caller does not provide an L1 client (e.g. in tests) - if l1Client == nil || (reflect.ValueOf(l1Client).Kind() == reflect.Ptr && reflect.ValueOf(l1Client).IsNil()) { - log.Warn("No L1 client provided, L1 rollup sync service will not run") - return nil, nil - } - +func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig, db ethdb.Database, l1Client l1.Client, bc *core.BlockChain, stack *node.Node) (*RollupSyncService, error) { if genesisConfig.Scroll.L1Config == nil { return nil, fmt.Errorf("missing L1 config in genesis") } - scrollChainABI, err := ScrollChainMetaData.GetAbi() - if err != nil { - return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) - } - - client, err := NewL1Client(ctx, l1Client, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) - if err != nil { - return nil, fmt.Errorf("failed to initialize l1 client: %w", err) - } - // Initialize the latestProcessedBlock with the block just before the L1 deployment block. // This serves as a default value when there's no L1 rollup events synced in the database. var latestProcessedBlock uint64 @@ -94,20 +72,31 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig latestProcessedBlock = *block } + l1Reader, err := l1.NewReader(ctx, l1.Config{ + ScrollChainAddress: genesisConfig.Scroll.L1Config.ScrollChainAddress, + L1MessageQueueAddress: genesisConfig.Scroll.L1Config.L1MessageQueueAddress, + }, l1Client) + if err != nil { + return nil, fmt.Errorf("failed to initialize l1.Reader, err = %w", err) + } + + // TODO: create blob clients based on new config parameters + + calldataBlobSource, err := da.NewCalldataBlobSource(ctx, latestProcessedBlock, l1Reader, nil, db) + if err != nil { + return nil, fmt.Errorf("failed to create calldata blob source: %w", err) + } + ctx, cancel := context.WithCancel(ctx) service := RollupSyncService{ - ctx: ctx, - cancel: cancel, - client: client, - db: db, - latestProcessedBlock: latestProcessedBlock, - scrollChainABI: scrollChainABI, - l1CommitBatchEventSignature: scrollChainABI.Events["CommitBatch"].ID, - l1RevertBatchEventSignature: scrollChainABI.Events["RevertBatch"].ID, - l1FinalizeBatchEventSignature: scrollChainABI.Events["FinalizeBatch"].ID, - bc: bc, - stack: stack, + ctx: ctx, + cancel: cancel, + db: db, + bc: bc, + stack: stack, + + callDataBlobSource: calldataBlobSource, } return &service, nil @@ -118,7 +107,7 @@ func (s *RollupSyncService) Start() { return } - log.Info("Starting rollup event sync background service", "latest processed block", s.latestProcessedBlock) + log.Info("Starting rollup event sync background service", "latest processed block", s.callDataBlobSource.L1Height()) go func() { syncTicker := time.NewTicker(defaultSyncInterval) @@ -132,9 +121,12 @@ func (s *RollupSyncService) Start() { case <-s.ctx.Done(): return case <-syncTicker.C: - s.fetchRollupEvents() + err := s.fetchRollupEvents() + if err != nil { + log.Error("failed to fetch rollup events", "err", err) + } case <-logTicker.C: - log.Info("Sync rollup events progress update", "latestProcessedBlock", s.latestProcessedBlock) + log.Info("Sync rollup events progress update", "latestProcessedBlock", s.callDataBlobSource.L1Height()) } } }() @@ -161,90 +153,79 @@ func (s *RollupSyncService) ResetStartSyncHeight(height uint64) { s.stateMu.Lock() defer s.stateMu.Unlock() - s.latestProcessedBlock = height + s.callDataBlobSource.SetL1Height(height) log.Info("Reset sync service", "height", height) } -func (s *RollupSyncService) fetchRollupEvents() { +func (s *RollupSyncService) fetchRollupEvents() error { s.stateMu.Lock() defer s.stateMu.Unlock() - latestConfirmed, err := s.client.GetLatestFinalizedBlockNumber() - if err != nil { - log.Warn("failed to get latest confirmed block number", "err", err) - return - } - - log.Trace("Sync service fetch rollup events", "latest processed block", s.latestProcessedBlock, "latest confirmed", latestConfirmed) + for { + prevL1Height := s.callDataBlobSource.L1Height() - // query in batches - for from := s.latestProcessedBlock + 1; from <= latestConfirmed; from += defaultFetchBlockRange { - if s.ctx.Err() != nil { - log.Info("Context canceled", "reason", s.ctx.Err()) - return - } + daEntries, err := s.callDataBlobSource.NextData() + if err != nil { + if errors.Is(err, da.ErrSourceExhausted) { + log.Trace("Sync service exhausted data source, waiting for next data") + return nil + } - to := from + defaultFetchBlockRange - 1 - if to > latestConfirmed { - to = latestConfirmed + return fmt.Errorf("failed to get next data: %w", err) } - logs, err := s.client.FetchRollupEventsInRange(from, to) - if err != nil { - log.Error("failed to fetch rollup events in range", "from block", from, "to block", to, "err", err) - return + if err = s.updateRollupEvents(daEntries); err != nil { + // Reset the L1 height to the previous value to retry fetching the same data. + s.callDataBlobSource.SetL1Height(prevL1Height) + return fmt.Errorf("failed to parse and update rollup event logs: %w", err) } - if err := s.parseAndUpdateRollupEventLogs(logs, to); err != nil { - log.Error("failed to parse and update rollup event logs", "err", err) - return - } + log.Trace("Sync service fetched rollup events", "latest processed L1 block", s.callDataBlobSource.L1Height(), "latest finalized L1 block", s.callDataBlobSource.L1Finalized()) - s.latestProcessedBlock = to + // note: the batch updates in updateRollupEvents are idempotent, if we crash + // before this line and re-execute the previous steps, we will get the same result. + rawdb.WriteRollupEventSyncedL1BlockNumber(s.db, s.callDataBlobSource.L1Height()) } } -func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endBlockNumber uint64) error { - for _, vLog := range logs { - switch vLog.Topics[0] { - case s.l1CommitBatchEventSignature: - event := &L1CommitBatchEvent{} - if err := UnpackLog(s.scrollChainABI, event, "CommitBatch", vLog); err != nil { - return fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) +func (s *RollupSyncService) updateRollupEvents(daEntries da.Entries) error { + for _, entry := range daEntries { + switch entry.Type() { + case da.CommitBatchV0Type, da.CommitBatchWithBlobType: + log.Trace("found new CommitBatch event", "batch index", entry.BatchIndex()) + + entryWithBlocks, ok := entry.(da.EntryWithBlocks) + if !ok { + return fmt.Errorf("failed to cast to EntryWithBlocks, batch index: %v", entry.BatchIndex()) } - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new CommitBatch event", "batch index", batchIndex) - committedBatchMeta, err := s.getCommittedBatchMeta(batchIndex, &vLog) + committedBatchMeta, err := s.getCommittedBatchMeta(entryWithBlocks) if err != nil { - return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) + return fmt.Errorf("failed to get committed batch meta, batch index: %v, err: %w", entry.BatchIndex(), err) } - rawdb.WriteCommittedBatchMeta(s.db, batchIndex, committedBatchMeta) - case s.l1RevertBatchEventSignature: - event := &L1RevertBatchEvent{} - if err := UnpackLog(s.scrollChainABI, event, "RevertBatch", vLog); err != nil { - return fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) - } - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new RevertBatch event", "batch index", batchIndex) + rawdb.WriteCommittedBatchMeta(s.db, entry.BatchIndex(), committedBatchMeta) - rawdb.DeleteCommittedBatchMeta(s.db, batchIndex) + case da.RevertBatchType: + log.Trace("found new RevertBatch event", "batch index", entry.BatchIndex()) + rawdb.DeleteCommittedBatchMeta(s.db, entry.BatchIndex()) - case s.l1FinalizeBatchEventSignature: - event := &L1FinalizeBatchEvent{} - if err := UnpackLog(s.scrollChainABI, event, "FinalizeBatch", vLog); err != nil { - return fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) + case da.FinalizeBatchType: + event, ok := entry.Event().(*l1.FinalizeBatchEvent) + // This should never happen because we just checked the batch type + if !ok { + return fmt.Errorf("failed to cast to FinalizeBatchEvent, batch index: %v", entry.BatchIndex()) } - batchIndex := event.BatchIndex.Uint64() + + batchIndex := entry.BatchIndex() log.Trace("found new FinalizeBatch event", "batch index", batchIndex) lastFinalizedBatchIndex := rawdb.ReadLastFinalizedBatchIndex(s.db) - // After darwin, FinalizeBatch event emitted every bundle, which contains multiple batches. - // Therefore there are a range of finalized batches need to be saved into db. + // After Darwin, FinalizeBatch event emitted every bundle, which contains multiple batches. + // Therefore, there are a range of finalized batches need to be saved into db. // - // The range logic also applies to the batches before darwin when FinalizeBatch event emitted + // The range logic also applies to the batches before Darwin when FinalizeBatch event emitted // per single batch. In this situation, `batchIndex` just equals to `*lastFinalizedBatchIndex + 1` // and only one batch is processed through the for loop. startBatchIndex := batchIndex @@ -293,14 +274,10 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB log.Debug("write finalized l2 block number", "batch index", batchIndex, "finalized l2 block height", highestFinalizedBlockNumber) default: - return fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + return fmt.Errorf("unknown daEntry, type: %d, batch index: %d", entry.Type(), entry.BatchIndex()) } } - // note: the batch updates above are idempotent, if we crash - // before this line and reexecute the previous steps, we will - // get the same result. - rawdb.WriteRollupEventSyncedL1BlockNumber(s.db, endBlockNumber) return nil } @@ -355,8 +332,8 @@ func (s *RollupSyncService) getLocalChunksForBatch(chunkBlockRanges []*rawdb.Chu return chunks, nil } -func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, error) { - if batchIndex == 0 { +func (s *RollupSyncService) getCommittedBatchMeta(commitedBatch da.EntryWithBlocks) (*rawdb.CommittedBatchMeta, error) { + if commitedBatch.BatchIndex() == 0 { return &rawdb.CommittedBatchMeta{ Version: 0, BlobVersionedHashes: nil, @@ -364,111 +341,16 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types }, nil } - tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) - if err != nil { - log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", - "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) - block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) - if err != nil { - return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) - } - - if block == nil { - return nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) - } - - found := false - for _, txInBlock := range block.Transactions() { - if txInBlock.Hash() == vLog.TxHash { - tx = txInBlock - found = true - break - } - } - if !found { - return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) - } - } - - var commitBatchMeta rawdb.CommittedBatchMeta - - if tx.Type() == types.BlobTxType { - blobVersionedHashes := tx.BlobHashes() - if blobVersionedHashes == nil { - return nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) - } - commitBatchMeta.BlobVersionedHashes = blobVersionedHashes - } - - version, ranges, err := s.decodeBatchVersionAndChunkBlockRanges(tx.Data()) - if err != nil { - return nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) - } - - commitBatchMeta.Version = version - commitBatchMeta.ChunkBlockRanges = ranges - return &commitBatchMeta, nil -} - -// decodeBatchVersionAndChunkBlockRanges decodes version and chunks' block ranges in a batch based on the commit batch transaction's calldata. -func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) (uint8, []*rawdb.ChunkBlockRange, error) { - const methodIDLength = 4 - if len(txData) < methodIDLength { - return 0, nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) - } - - method, err := s.scrollChainABI.MethodById(txData[:methodIDLength]) - if err != nil { - return 0, nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) - } - - values, err := method.Inputs.Unpack(txData[methodIDLength:]) + chunkRanges, err := blockRangesFromChunks(commitedBatch.Chunks()) if err != nil { - return 0, nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) - } - - if method.Name == "commitBatch" { - type commitBatchArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte - } - - var args commitBatchArgs - if err = method.Inputs.Copy(&args, values); err != nil { - return 0, nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) - } - - chunkRanges, err := decodeBlockRangesFromEncodedChunks(encoding.CodecVersion(args.Version), args.Chunks) - if err != nil { - return 0, nil, fmt.Errorf("failed to decode block ranges from encoded chunks, version: %v, chunks: %+v, err: %w", args.Version, args.Chunks, err) - } - - return args.Version, chunkRanges, nil - } else if method.Name == "commitBatchWithBlobProof" { - type commitBatchWithBlobProofArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte - BlobDataProof []byte - } - - var args commitBatchWithBlobProofArgs - if err = method.Inputs.Copy(&args, values); err != nil { - return 0, nil, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err) - } - - chunkRanges, err := decodeBlockRangesFromEncodedChunks(encoding.CodecVersion(args.Version), args.Chunks) - if err != nil { - return 0, nil, fmt.Errorf("failed to decode block ranges from encoded chunks, version: %v, chunks: %+v, err: %w", args.Version, args.Chunks, err) - } - - return args.Version, chunkRanges, nil + return nil, fmt.Errorf("failed to decode block ranges from chunks, batch index: %v, err: %w", commitedBatch.BatchIndex(), err) } - return 0, nil, fmt.Errorf("unexpected method name: %v", method.Name) + return &rawdb.CommittedBatchMeta{ + Version: commitedBatch.Version(), + ChunkBlockRanges: chunkRanges, + BlobVersionedHashes: commitedBatch.BlobVersionedHashes(), + }, nil } // validateBatch verifies the consistency between the L1 contract and L2 node data. @@ -494,7 +376,7 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *l1.FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -540,15 +422,15 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz // Only check when batch index matches the index of the event. This is compatible with both "finalize by batch" and "finalize by bundle": // - finalize by batch: check all batches // - finalize by bundle: check the last batch, because only one event (containing the info of the last batch) is emitted per bundle - if batchIndex == event.BatchIndex.Uint64() { - if localStateRoot != event.StateRoot { - log.Error("State root mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized state root", event.StateRoot.Hex(), "l2 state root", localStateRoot.Hex()) + if batchIndex == event.BatchIndex().Uint64() { + if localStateRoot != event.StateRoot() { + log.Error("State root mismatch", "batch index", event.BatchIndex().Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized state root", event.StateRoot().Hex(), "l2 state root", localStateRoot.Hex()) stack.Close() os.Exit(1) } - if localWithdrawRoot != event.WithdrawRoot { - log.Error("Withdraw root mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized withdraw root", event.WithdrawRoot.Hex(), "l2 withdraw root", localWithdrawRoot.Hex()) + if localWithdrawRoot != event.WithdrawRoot() { + log.Error("Withdraw root mismatch", "batch index", event.BatchIndex().Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized withdraw root", event.WithdrawRoot().Hex(), "l2 withdraw root", localWithdrawRoot.Hex()) stack.Close() os.Exit(1) } @@ -556,8 +438,8 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz // Verify batch hash // This check ensures the correctness of all batch hashes in the bundle // due to the parent-child relationship between batch hashes - if localBatchHash != event.BatchHash { - log.Error("Batch hash mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "parent TotalL1MessagePopped", parentFinalizedBatchMeta.TotalL1MessagePopped, "l1 finalized batch hash", event.BatchHash.Hex(), "l2 batch hash", localBatchHash.Hex()) + if localBatchHash != event.BatchHash() { + log.Error("Batch hash mismatch", "batch index", event.BatchIndex().Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "parent TotalL1MessagePopped", parentFinalizedBatchMeta.TotalL1MessagePopped, "l1 finalized batch hash", event.BatchHash().Hex(), "l2 batch hash", localBatchHash.Hex()) chunksJson, err := json.Marshal(chunks) if err != nil { log.Error("marshal chunks failed", "err", err) @@ -581,22 +463,12 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz return endBlock.Header.Number.Uint64(), finalizedBatchMeta, nil } -// decodeBlockRangesFromEncodedChunks decodes the provided chunks into a list of block ranges. -func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chunks [][]byte) ([]*rawdb.ChunkBlockRange, error) { - codec, err := encoding.CodecFromVersion(codecVersion) - if err != nil { - return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err) - } - - daChunksRawTx, err := codec.DecodeDAChunksRawTx(chunks) - if err != nil { - return nil, fmt.Errorf("failed to decode DA chunks, version: %v, err: %w", codecVersion, err) - } - +// blockRangesFromChunks decodes the provided chunks into a list of block ranges. +func blockRangesFromChunks(chunks []*encoding.DAChunkRawTx) ([]*rawdb.ChunkBlockRange, error) { var chunkBlockRanges []*rawdb.ChunkBlockRange - for _, daChunkRawTx := range daChunksRawTx { + for _, daChunkRawTx := range chunks { if len(daChunkRawTx.Blocks) == 0 { - return nil, fmt.Errorf("no blocks found in DA chunk, version: %v", codecVersion) + return nil, fmt.Errorf("no blocks found in DA chunk, chunk: %+v", daChunkRawTx) } chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ From 4ced6f29f512e5ac4f8da40ebe6ebe90321d35b0 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Fri, 27 Dec 2024 08:32:07 +0800 Subject: [PATCH 13/17] add configuration and initialize blob clients --- cmd/utils/flags.go | 18 ++++---- eth/backend.go | 2 +- .../rollup_sync_service.go | 41 +++++++++++++++---- 3 files changed, 43 insertions(+), 18 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index bccd6017b36e..090f16d55c27 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1629,15 +1629,15 @@ func setEnableRollupVerify(ctx *cli.Context, cfg *ethconfig.Config) { func setDA(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.IsSet(DASyncEnabledFlag.Name) { cfg.EnableDASyncing = ctx.Bool(DASyncEnabledFlag.Name) - if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) { - cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name) - } - if ctx.IsSet(DABlockNativeAPIEndpointFlag.Name) { - cfg.DA.BlockNativeAPIEndpoint = ctx.String(DABlockNativeAPIEndpointFlag.Name) - } - if ctx.IsSet(DABeaconNodeAPIEndpointFlag.Name) { - cfg.DA.BeaconNodeAPIEndpoint = ctx.String(DABeaconNodeAPIEndpointFlag.Name) - } + } + if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) { + cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name) + } + if ctx.IsSet(DABlockNativeAPIEndpointFlag.Name) { + cfg.DA.BlockNativeAPIEndpoint = ctx.String(DABlockNativeAPIEndpointFlag.Name) + } + if ctx.IsSet(DABeaconNodeAPIEndpointFlag.Name) { + cfg.DA.BeaconNodeAPIEndpoint = ctx.String(DABeaconNodeAPIEndpointFlag.Name) } } diff --git a/eth/backend.go b/eth/backend.go index a119708e52be..bd432cb7131c 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -245,7 +245,7 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ether if config.EnableRollupVerify { // initialize and start rollup event sync service - eth.rollupSyncService, err = rollup_sync_service.NewRollupSyncService(context.Background(), chainConfig, eth.chainDb, l1Client, eth.blockchain, stack) + eth.rollupSyncService, err = rollup_sync_service.NewRollupSyncService(context.Background(), chainConfig, eth.chainDb, l1Client, eth.blockchain, stack, config.DA) if err != nil { return nil, fmt.Errorf("cannot initialize rollup event sync service: %w", err) } diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 58ba26bf21e4..ec782a60f535 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -17,6 +17,8 @@ import ( "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/rollup/rcfg" @@ -54,7 +56,7 @@ type RollupSyncService struct { callDataBlobSource *da.CalldataBlobSource } -func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig, db ethdb.Database, l1Client l1.Client, bc *core.BlockChain, stack *node.Node) (*RollupSyncService, error) { +func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig, db ethdb.Database, l1Client l1.Client, bc *core.BlockChain, stack *node.Node, config da_syncer.Config) (*RollupSyncService, error) { if genesisConfig.Scroll.L1Config == nil { return nil, fmt.Errorf("missing L1 config in genesis") } @@ -72,6 +74,14 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig latestProcessedBlock = *block } + var success bool + ctx, cancel := context.WithCancel(ctx) + defer func() { + if !success { + cancel() + } + }() + l1Reader, err := l1.NewReader(ctx, l1.Config{ ScrollChainAddress: genesisConfig.Scroll.L1Config.ScrollChainAddress, L1MessageQueueAddress: genesisConfig.Scroll.L1Config.L1MessageQueueAddress, @@ -80,16 +90,33 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig return nil, fmt.Errorf("failed to initialize l1.Reader, err = %w", err) } - // TODO: create blob clients based on new config parameters + blobClientList := blob_client.NewBlobClients() + if config.BeaconNodeAPIEndpoint != "" { + beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint) + if err != nil { + log.Warn("failed to create BeaconNodeClient", "err", err) + } else { + blobClientList.AddBlobClient(beaconNodeClient) + } + } + if config.BlobScanAPIEndpoint != "" { + blobClientList.AddBlobClient(blob_client.NewBlobScanClient(config.BlobScanAPIEndpoint)) + } + if config.BlockNativeAPIEndpoint != "" { + blobClientList.AddBlobClient(blob_client.NewBlockNativeClient(config.BlockNativeAPIEndpoint)) + } + if blobClientList.Size() == 0 { + return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") + } - calldataBlobSource, err := da.NewCalldataBlobSource(ctx, latestProcessedBlock, l1Reader, nil, db) + calldataBlobSource, err := da.NewCalldataBlobSource(ctx, latestProcessedBlock, l1Reader, blobClientList, db) if err != nil { return nil, fmt.Errorf("failed to create calldata blob source: %w", err) } - ctx, cancel := context.WithCancel(ctx) + success = true - service := RollupSyncService{ + return &RollupSyncService{ ctx: ctx, cancel: cancel, db: db, @@ -97,9 +124,7 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig stack: stack, callDataBlobSource: calldataBlobSource, - } - - return &service, nil + }, nil } func (s *RollupSyncService) Start() { From 6aafa74c11e89cc11b4c1e8de4b853908563edba Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Fri, 27 Dec 2024 11:19:25 +0800 Subject: [PATCH 14/17] fix unit tests --- rollup/da_syncer/da/commitV0.go | 8 +- rollup/da_syncer/da/commitV1.go | 2 +- rollup/da_syncer/da/da.go | 2 +- rollup/l1/abi.go | 20 + rollup/l1/types.go | 38 + .../rollup_sync_service.go | 4 +- .../rollup_sync_service_test.go | 870 ++++++------------ 7 files changed, 363 insertions(+), 581 deletions(-) diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 960151e6cda4..c8e34ec01a7e 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -15,7 +15,7 @@ import ( ) type CommitBatchDAV0 struct { - version uint8 + version encoding.CodecVersion batchIndex uint64 parentTotalL1MessagePopped uint64 skippedL1MessageBitmap []byte @@ -37,11 +37,11 @@ func NewCommitBatchDAV0(db ethdb.Database, return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) + return NewCommitBatchDAV0WithChunks(db, codec.Version(), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) } func NewCommitBatchDAV0WithChunks(db ethdb.Database, - version uint8, + version encoding.CodecVersion, batchIndex uint64, parentBatchHeader []byte, decodedChunks []*encoding.DAChunkRawTx, @@ -71,7 +71,7 @@ func NewCommitBatchDAV0Empty() *CommitBatchDAV0 { } } -func (c *CommitBatchDAV0) Version() uint8 { +func (c *CommitBatchDAV0) Version() encoding.CodecVersion { return c.version } diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 29eb065ed3e8..6fdcf45b6d14 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -78,7 +78,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("decodedChunks is nil after decoding") } - v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) + v0, err := NewCommitBatchDAV0WithChunks(db, codec.Version(), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) if err != nil { return nil, err } diff --git a/rollup/da_syncer/da/da.go b/rollup/da_syncer/da/da.go index 2773da2951be..cd7320f1c04f 100644 --- a/rollup/da_syncer/da/da.go +++ b/rollup/da_syncer/da/da.go @@ -35,7 +35,7 @@ type Entry interface { type EntryWithBlocks interface { Entry Blocks() []*PartialBlock - Version() uint8 + Version() encoding.CodecVersion Chunks() []*encoding.DAChunkRawTx BlobVersionedHashes() []common.Hash } diff --git a/rollup/l1/abi.go b/rollup/l1/abi.go index c16123aa5e8b..dcf09f25fd13 100644 --- a/rollup/l1/abi.go +++ b/rollup/l1/abi.go @@ -158,6 +158,26 @@ type FinalizeBatchEvent struct { blockNumber uint64 } +func NewFinalizeBatchEvent( + batchIndex *big.Int, + batchHash common.Hash, + stateRoot common.Hash, + withdrawRoot common.Hash, + txHash common.Hash, + blockHash common.Hash, + blockNumber uint64, +) *FinalizeBatchEvent { + return &FinalizeBatchEvent{ + batchIndex: batchIndex, + batchHash: batchHash, + stateRoot: stateRoot, + withdrawRoot: withdrawRoot, + txHash: txHash, + blockHash: blockHash, + blockNumber: blockNumber, + } +} + func (f *FinalizeBatchEvent) TxHash() common.Hash { return f.txHash } diff --git a/rollup/l1/types.go b/rollup/l1/types.go index 8c030815ec28..0adb734bac09 100644 --- a/rollup/l1/types.go +++ b/rollup/l1/types.go @@ -20,3 +20,41 @@ type Client interface { BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) } + +type MockNopClient struct{} + +func (m *MockNopClient) BlockNumber(ctx context.Context) (uint64, error) { + return 0, nil +} + +func (m *MockNopClient) ChainID(ctx context.Context) (*big.Int, error) { + return big.NewInt(0), nil +} + +func (m *MockNopClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + return nil, nil +} + +func (m *MockNopClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + return nil, nil +} + +func (m *MockNopClient) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + return nil, nil +} + +func (m *MockNopClient) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + return nil, nil +} + +func (m *MockNopClient) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, isPending bool, err error) { + return nil, false, nil +} + +func (m *MockNopClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return nil, nil +} + +func (m *MockNopClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + return nil, nil +} diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index ec782a60f535..3380b7dcff0a 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -106,7 +106,7 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig blobClientList.AddBlobClient(blob_client.NewBlockNativeClient(config.BlockNativeAPIEndpoint)) } if blobClientList.Size() == 0 { - return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") + return nil, errors.New("no blob client is configured for rollup verifier. Please provide at least one blob client via command line flag") } calldataBlobSource, err := da.NewCalldataBlobSource(ctx, latestProcessedBlock, l1Reader, blobClientList, db) @@ -372,7 +372,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(commitedBatch da.EntryWithBloc } return &rawdb.CommittedBatchMeta{ - Version: commitedBatch.Version(), + Version: uint8(commitedBatch.Version()), ChunkBlockRanges: chunkRanges, BlobVersionedHashes: commitedBatch.BlobVersionedHashes(), }, nil diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index f1b09a37a1f2..c34f9385b515 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -2,12 +2,10 @@ package rollup_sync_service import ( "context" - "encoding/hex" "encoding/json" "math/big" "os" "testing" - "time" "github.com/scroll-tech/da-codec/encoding" "github.com/stretchr/testify/assert" @@ -16,415 +14,86 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb/memorydb" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) -func TestRollupSyncServiceStartAndStop(t *testing.T) { - genesisConfig := ¶ms.ChainConfig{ - Scroll: params.ScrollConfig{ - L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - }, - }, - } - db := rawdb.NewDatabase(memorydb.New()) - l1Client := &mockEthClient{} - bc := &core.BlockChain{} - stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } - defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) - } - - assert.NotNil(t, service) - service.Start() - time.Sleep(10 * time.Millisecond) - service.Stop() -} - -func TestDecodeBatchVersionAndChunkBlockRangesCodecv0(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - service := &RollupSyncService{ - scrollChainABI: scrollChainABI, - } - - data, err := os.ReadFile("./testdata/commitBatch_input_codecv0.json") - require.NoError(t, err, "Failed to read json file") - - type tx struct { - Input string `json:"input"` - } - var commitBatch tx - err = json.Unmarshal(data, &commitBatch) - require.NoError(t, err, "Failed to unmarshal transaction json") - - testTxData, err := hex.DecodeString(commitBatch.Input[2:]) - if err != nil { - t.Fatalf("Failed to decode string: %v", err) - } - - version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) - if err != nil { - t.Fatalf("Failed to decode chunk ranges: %v", err) - } - - assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 4435142, EndBlockNumber: 4435142}, - {StartBlockNumber: 4435143, EndBlockNumber: 4435144}, - {StartBlockNumber: 4435145, EndBlockNumber: 4435145}, - {StartBlockNumber: 4435146, EndBlockNumber: 4435146}, - {StartBlockNumber: 4435147, EndBlockNumber: 4435147}, - {StartBlockNumber: 4435148, EndBlockNumber: 4435148}, - {StartBlockNumber: 4435149, EndBlockNumber: 4435150}, - {StartBlockNumber: 4435151, EndBlockNumber: 4435151}, - {StartBlockNumber: 4435152, EndBlockNumber: 4435152}, - {StartBlockNumber: 4435153, EndBlockNumber: 4435153}, - {StartBlockNumber: 4435154, EndBlockNumber: 4435154}, - {StartBlockNumber: 4435155, EndBlockNumber: 4435155}, - {StartBlockNumber: 4435156, EndBlockNumber: 4435156}, - {StartBlockNumber: 4435157, EndBlockNumber: 4435157}, - {StartBlockNumber: 4435158, EndBlockNumber: 4435158}, - } - - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) - } - - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Errorf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) - } - } -} - -func TestDecodeBatchVersionAndChunkBlockRangesCodecv1(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - service := &RollupSyncService{ - scrollChainABI: scrollChainABI, - } - - data, err := os.ReadFile("./testdata/commitBatch_input_codecv1.json") - require.NoError(t, err, "Failed to read json file") - - type tx struct { - Input string `json:"input"` - } - var commitBatch tx - err = json.Unmarshal(data, &commitBatch) - require.NoError(t, err, "Failed to unmarshal transaction json") - - testTxData, err := hex.DecodeString(commitBatch.Input[2:]) - if err != nil { - t.Fatalf("Failed to decode string: %v", err) - } - - version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) - if err != nil { - t.Fatalf("Failed to decode chunk ranges: %v", err) - } - - assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 1690, EndBlockNumber: 1780}, - {StartBlockNumber: 1781, EndBlockNumber: 1871}, - {StartBlockNumber: 1872, EndBlockNumber: 1962}, - {StartBlockNumber: 1963, EndBlockNumber: 2053}, - {StartBlockNumber: 2054, EndBlockNumber: 2144}, - {StartBlockNumber: 2145, EndBlockNumber: 2235}, - {StartBlockNumber: 2236, EndBlockNumber: 2326}, - {StartBlockNumber: 2327, EndBlockNumber: 2417}, - {StartBlockNumber: 2418, EndBlockNumber: 2508}, - } - - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) - } - - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Errorf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) - } - } -} - -func TestDecodeBatchVersionAndChunkBlockRangesCodecv2(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - service := &RollupSyncService{ - scrollChainABI: scrollChainABI, - } - - data, err := os.ReadFile("./testdata/commitBatch_input_codecv2.json") - require.NoError(t, err, "Failed to read json file") - - type tx struct { - Input string `json:"input"` - } - var commitBatch tx - err = json.Unmarshal(data, &commitBatch) - require.NoError(t, err, "Failed to unmarshal transaction json") - - testTxData, err := hex.DecodeString(commitBatch.Input[2:]) - if err != nil { - t.Fatalf("Failed to decode string: %v", err) - } - - version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) - if err != nil { - t.Fatalf("Failed to decode chunk ranges: %v", err) - } - - assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 200, EndBlockNumber: 290}, - {StartBlockNumber: 291, EndBlockNumber: 381}, - {StartBlockNumber: 382, EndBlockNumber: 472}, - {StartBlockNumber: 473, EndBlockNumber: 563}, - {StartBlockNumber: 564, EndBlockNumber: 654}, - {StartBlockNumber: 655, EndBlockNumber: 745}, - {StartBlockNumber: 746, EndBlockNumber: 836}, - {StartBlockNumber: 837, EndBlockNumber: 927}, - {StartBlockNumber: 928, EndBlockNumber: 1018}, - } - - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) - } - - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Errorf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) - } - } -} - -func TestDecodeBatchVersionAndChunkBlockRangesCodecv3(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - service := &RollupSyncService{ - scrollChainABI: scrollChainABI, - } - - data, err := os.ReadFile("./testdata/commitBatchWithBlobProof_input_codecv3.json") - require.NoError(t, err, "Failed to read json file") - - type tx struct { - Input string `json:"input"` - } - var commitBatch tx - err = json.Unmarshal(data, &commitBatch) - require.NoError(t, err, "Failed to unmarshal transaction json") - - testTxData, err := hex.DecodeString(commitBatch.Input[2:]) - if err != nil { - t.Fatalf("Failed to decode string: %v", err) - } - - version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) - if err != nil { - t.Fatalf("Failed to decode chunk ranges: %v", err) - } - - assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 1, EndBlockNumber: 9}, - {StartBlockNumber: 10, EndBlockNumber: 20}, - {StartBlockNumber: 21, EndBlockNumber: 21}, - {StartBlockNumber: 22, EndBlockNumber: 22}, - {StartBlockNumber: 23, EndBlockNumber: 23}, - {StartBlockNumber: 24, EndBlockNumber: 24}, - {StartBlockNumber: 25, EndBlockNumber: 25}, - {StartBlockNumber: 26, EndBlockNumber: 26}, - {StartBlockNumber: 27, EndBlockNumber: 27}, - {StartBlockNumber: 28, EndBlockNumber: 28}, - {StartBlockNumber: 29, EndBlockNumber: 29}, - {StartBlockNumber: 30, EndBlockNumber: 30}, - {StartBlockNumber: 31, EndBlockNumber: 31}, - {StartBlockNumber: 32, EndBlockNumber: 32}, - {StartBlockNumber: 33, EndBlockNumber: 33}, - {StartBlockNumber: 34, EndBlockNumber: 34}, - {StartBlockNumber: 35, EndBlockNumber: 35}, - {StartBlockNumber: 36, EndBlockNumber: 36}, - {StartBlockNumber: 37, EndBlockNumber: 37}, - {StartBlockNumber: 38, EndBlockNumber: 38}, - {StartBlockNumber: 39, EndBlockNumber: 39}, - {StartBlockNumber: 40, EndBlockNumber: 40}, - } - - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) - } - - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Errorf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) - } - } -} - func TestGetCommittedBatchMetaCodecv0(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), + L1ChainId: 11155111, + ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), + L1MessageQueueAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a1"), }, }, } db := rawdb.NewDatabase(memorydb.New()) - rlpData, err := os.ReadFile("./testdata/commitBatch_codecv0.rlp") - if err != nil { - t.Fatalf("Failed to read RLP data: %v", err) - } - l1Client := &mockEthClient{ - txRLP: rlpData, - } - bc := &core.BlockChain{} stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } + require.NoError(t, err, "Failed to create new P2P node") defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) - } - vLog := &types.Log{ - TxHash: common.HexToHash("0x0"), - } - metadata, err := service.getCommittedBatchMeta(1, vLog) + service, err := NewRollupSyncService(context.Background(), genesisConfig, db, &l1.MockNopClient{}, &core.BlockChain{}, stack, da_syncer.Config{ + BlobScanAPIEndpoint: "http://localhost:8080", + }) require.NoError(t, err) - assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) - expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 911145, EndBlockNumber: 911151}, {StartBlockNumber: 911152, EndBlockNumber: 911155}, {StartBlockNumber: 911156, EndBlockNumber: 911159}, } - if len(expectedRanges) != len(metadata.ChunkBlockRanges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) - } - - for i := range metadata.ChunkBlockRanges { - if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) + var chunks []*encoding.DAChunkRawTx + for _, r := range expectedRanges { + var blocks []encoding.DABlock + for i := r.StartBlockNumber; i <= r.EndBlockNumber; i++ { + blocks = append(blocks, &mockDABlock{number: i}) } + chunks = append(chunks, &encoding.DAChunkRawTx{Blocks: blocks}) } -} -func TestGetCommittedBatchMetaCodecv1(t *testing.T) { - genesisConfig := ¶ms.ChainConfig{ - Scroll: params.ScrollConfig{ - L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - }, - }, - } - db := rawdb.NewDatabase(memorydb.New()) - - rlpData, err := os.ReadFile("./testdata/commitBatch_codecv1.rlp") - if err != nil { - t.Fatalf("Failed to read RLP data: %v", err) - } - l1Client := &mockEthClient{ - txRLP: rlpData, - } - bc := &core.BlockChain{} - stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } - defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) + committedBatch := mockEntryWithBlocks{ + batchIndex: 1, + version: encoding.CodecV0, + chunks: chunks, } - vLog := &types.Log{ - TxHash: common.HexToHash("0x1"), - } - metadata, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(committedBatch) require.NoError(t, err) - assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 1, EndBlockNumber: 11}, - } - - if len(expectedRanges) != len(metadata.ChunkBlockRanges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) - } - - for i := range metadata.ChunkBlockRanges { - if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) - } - } + require.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) + require.EqualValues(t, expectedRanges, metadata.ChunkBlockRanges) } -func TestGetCommittedBatchMetaCodecv2(t *testing.T) { +func TestGetCommittedBatchMetaCodecV1(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), + L1ChainId: 11155111, + ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), + L1MessageQueueAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a1"), }, }, } db := rawdb.NewDatabase(memorydb.New()) - rlpData, err := os.ReadFile("./testdata/commitBatch_codecv2.rlp") - if err != nil { - t.Fatalf("Failed to read RLP data: %v", err) - } - l1Client := &mockEthClient{ - txRLP: rlpData, - } - bc := &core.BlockChain{} stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } + require.NoError(t, err, "Failed to create new P2P node") defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) - } - vLog := &types.Log{ - TxHash: common.HexToHash("0x2"), - } - metadata, err := service.getCommittedBatchMeta(1, vLog) + service, err := NewRollupSyncService(context.Background(), genesisConfig, db, &l1.MockNopClient{}, &core.BlockChain{}, stack, da_syncer.Config{ + BlobScanAPIEndpoint: "http://localhost:8080", + }) require.NoError(t, err) - assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(metadata.Version)) - expectedRanges := []*rawdb.ChunkBlockRange{ + {StartBlockNumber: 100, EndBlockNumber: 142}, {StartBlockNumber: 143, EndBlockNumber: 143}, {StartBlockNumber: 144, EndBlockNumber: 144}, {StartBlockNumber: 145, EndBlockNumber: 145}, @@ -456,96 +125,112 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { {StartBlockNumber: 174, EndBlockNumber: 174}, } - if len(expectedRanges) != len(metadata.ChunkBlockRanges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) - } - - for i := range metadata.ChunkBlockRanges { - if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) + var chunks []*encoding.DAChunkRawTx + for _, r := range expectedRanges { + var blocks []encoding.DABlock + for i := r.StartBlockNumber; i <= r.EndBlockNumber; i++ { + blocks = append(blocks, &mockDABlock{number: i}) } + chunks = append(chunks, &encoding.DAChunkRawTx{Blocks: blocks}) } -} -func TestGetCommittedBatchMetaCodecv3(t *testing.T) { - genesisConfig := ¶ms.ChainConfig{ - Scroll: params.ScrollConfig{ - L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - }, - }, + expectedVersionedHashes := []common.Hash{ + common.HexToHash("0x1"), + common.HexToHash("0x2"), } - db := rawdb.NewDatabase(memorydb.New()) - rlpData, err := os.ReadFile("./testdata/commitBatchWithBlobProof_codecv3.rlp") - if err != nil { - t.Fatalf("Failed to read RLP data: %v", err) - } - l1Client := &mockEthClient{ - txRLP: rlpData, - } - bc := &core.BlockChain{} - stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } - defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) + committedBatch := mockEntryWithBlocks{ + batchIndex: 1, + version: encoding.CodecV1, + chunks: chunks, + versionedHashes: expectedVersionedHashes, } - vLog := &types.Log{ - TxHash: common.HexToHash("0x3"), - } - metadata, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(committedBatch) require.NoError(t, err) - assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(metadata.Version)) + require.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) + require.EqualValues(t, expectedRanges, metadata.ChunkBlockRanges) + require.EqualValues(t, expectedVersionedHashes, metadata.BlobVersionedHashes) +} - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 41, EndBlockNumber: 41}, - {StartBlockNumber: 42, EndBlockNumber: 42}, - {StartBlockNumber: 43, EndBlockNumber: 43}, - {StartBlockNumber: 44, EndBlockNumber: 44}, - {StartBlockNumber: 45, EndBlockNumber: 45}, - {StartBlockNumber: 46, EndBlockNumber: 46}, - {StartBlockNumber: 47, EndBlockNumber: 47}, - {StartBlockNumber: 48, EndBlockNumber: 48}, - {StartBlockNumber: 49, EndBlockNumber: 49}, - {StartBlockNumber: 50, EndBlockNumber: 50}, - {StartBlockNumber: 51, EndBlockNumber: 51}, - {StartBlockNumber: 52, EndBlockNumber: 52}, - {StartBlockNumber: 53, EndBlockNumber: 53}, - {StartBlockNumber: 54, EndBlockNumber: 54}, - {StartBlockNumber: 55, EndBlockNumber: 55}, - {StartBlockNumber: 56, EndBlockNumber: 56}, - {StartBlockNumber: 57, EndBlockNumber: 57}, - {StartBlockNumber: 58, EndBlockNumber: 58}, - {StartBlockNumber: 59, EndBlockNumber: 59}, - {StartBlockNumber: 60, EndBlockNumber: 60}, - {StartBlockNumber: 61, EndBlockNumber: 61}, - {StartBlockNumber: 62, EndBlockNumber: 62}, - {StartBlockNumber: 63, EndBlockNumber: 63}, - {StartBlockNumber: 64, EndBlockNumber: 64}, - {StartBlockNumber: 65, EndBlockNumber: 65}, - {StartBlockNumber: 66, EndBlockNumber: 66}, - {StartBlockNumber: 67, EndBlockNumber: 67}, - {StartBlockNumber: 68, EndBlockNumber: 68}, - {StartBlockNumber: 69, EndBlockNumber: 69}, - {StartBlockNumber: 70, EndBlockNumber: 70}, - } - - if len(expectedRanges) != len(metadata.ChunkBlockRanges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) - } - - for i := range metadata.ChunkBlockRanges { - if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) - } - } +type mockEntryWithBlocks struct { + batchIndex uint64 + version encoding.CodecVersion + chunks []*encoding.DAChunkRawTx + versionedHashes []common.Hash +} + +func (m mockEntryWithBlocks) Type() da.Type { + panic("implement me") +} + +func (m mockEntryWithBlocks) BatchIndex() uint64 { + return m.batchIndex +} + +func (m mockEntryWithBlocks) L1BlockNumber() uint64 { + panic("implement me") +} + +func (m mockEntryWithBlocks) CompareTo(entry da.Entry) int { + panic("implement me") +} + +func (m mockEntryWithBlocks) Event() l1.RollupEvent { + panic("implement me") +} + +func (m mockEntryWithBlocks) Blocks() []*da.PartialBlock { + panic("implement me") +} + +func (m mockEntryWithBlocks) Version() encoding.CodecVersion { + return m.version +} + +func (m mockEntryWithBlocks) Chunks() []*encoding.DAChunkRawTx { + return m.chunks +} + +func (m mockEntryWithBlocks) BlobVersionedHashes() []common.Hash { + return m.versionedHashes +} + +type mockDABlock struct { + number uint64 +} + +func (b *mockDABlock) Encode() []byte { + panic("implement me") +} + +func (b *mockDABlock) Decode(bytes []byte) error { + panic("implement me") +} + +func (b *mockDABlock) NumTransactions() uint16 { + panic("implement me") +} + +func (b *mockDABlock) NumL1Messages() uint16 { + panic("implement me") +} + +func (b *mockDABlock) Timestamp() uint64 { + panic("implement me") +} + +func (b *mockDABlock) BaseFee() *big.Int { + panic("implement me") +} + +func (b *mockDABlock) GasLimit() uint64 { + panic("implement me") +} + +func (b *mockDABlock) Number() uint64 { + return b.number } func TestValidateBatchCodecv0(t *testing.T) { @@ -559,18 +244,21 @@ func TestValidateBatchCodecv0(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV0), BlobVersionedHashes: nil, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -578,32 +266,36 @@ func TestValidateBatchCodecv0(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 2, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV0), BlobVersionedHashes: nil, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } @@ -619,18 +311,21 @@ func TestValidateBatchCodecv1(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV1), BlobVersionedHashes: []common.Hash{common.HexToHash("0x0129554070e4323800ca0e5ddd17bc447854601b306a70870002a058741214b3")}, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -638,31 +333,34 @@ func TestValidateBatchCodecv1(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV1), BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a327088bb2b13151449d8313c281d0006d12e8453e863637b746898b6ad5a6")}, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } @@ -678,18 +376,21 @@ func TestValidateBatchCodecv2(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV2), BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -697,31 +398,34 @@ func TestValidateBatchCodecv2(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV2), BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } @@ -737,19 +441,22 @@ func TestValidateBatchCodecv3(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV3), BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -757,31 +464,34 @@ func TestValidateBatchCodecv3(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV3), BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } @@ -791,19 +501,22 @@ func TestValidateBatchUpgrades(t *testing.T) { chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), - StateRoot: chunk1.Blocks[len(chunk1.Blocks)-1].Header.Root, - WithdrawRoot: chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), + chunk1.Blocks[len(chunk1.Blocks)-1].Header.Root, + chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV0), BlobVersionedHashes: nil, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) @@ -811,23 +524,26 @@ func TestValidateBatchUpgrades(t *testing.T) { chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 0, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), - StateRoot: chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, - WithdrawRoot: chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, - } + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), + chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, + chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV1), BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a688c6e137310df38a62f5ad1e5119b8cb0455c386a9a4079b14fe92a239aa")}, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) @@ -835,23 +551,26 @@ func TestValidateBatchUpgrades(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 0, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) - event3 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(2), - BatchHash: common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event3 := l1.NewFinalizeBatchEvent( + big.NewInt(2), + common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta3 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV1), BlobVersionedHashes: []common.Hash{common.HexToHash("0x01ea66c4de196d36e2c3a5d7c0045100b9e46ef65be8f7a921ef20e6f2e99ebd")}, } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex().Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) @@ -859,31 +578,34 @@ func TestValidateBatchUpgrades(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta4 := &rawdb.FinalizedBatchMeta{ - BatchHash: event3.BatchHash, + BatchHash: event3.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event3.StateRoot, - WithdrawRoot: event3.WithdrawRoot, + StateRoot: event3.StateRoot(), + WithdrawRoot: event3.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta4, finalizedBatchMeta3) - event4 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(3), - BatchHash: common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event4 := l1.NewFinalizeBatchEvent( + big.NewInt(3), + common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta4 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV3), BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex().Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ - BatchHash: event4.BatchHash, + BatchHash: event4.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event4.StateRoot, - WithdrawRoot: event4.WithdrawRoot, + StateRoot: event4.StateRoot(), + WithdrawRoot: event4.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } @@ -898,13 +620,15 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - - event := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(3), - BatchHash: common.HexToHash("0xaa6dc7cc432c8d46a9373e1e96d829a1e24e52fe0468012ff062793ea8f5b55e"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event := l1.NewFinalizeBatchEvent( + big.NewInt(3), + common.HexToHash("0xaa6dc7cc432c8d46a9373e1e96d829a1e24e52fe0468012ff062793ea8f5b55e"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV3), @@ -943,10 +667,10 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { assert.Equal(t, uint64(17), endBlock4) parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ - BatchHash: event.BatchHash, + BatchHash: event.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event.StateRoot, - WithdrawRoot: event.WithdrawRoot, + StateRoot: event.StateRoot(), + WithdrawRoot: event.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } From da81a2ea6cb6b9183190be63181f74dd7d1fe57b Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Fri, 27 Dec 2024 11:19:35 +0800 Subject: [PATCH 15/17] remove unused code --- rollup/rollup_sync_service/abi.go | 55 ------ rollup/rollup_sync_service/abi_test.go | 82 --------- rollup/rollup_sync_service/l1client.go | 158 ------------------ rollup/rollup_sync_service/l1client_test.go | 74 -------- .../rollup_sync_service.go | 3 - 5 files changed, 372 deletions(-) delete mode 100644 rollup/rollup_sync_service/abi.go delete mode 100644 rollup/rollup_sync_service/abi_test.go delete mode 100644 rollup/rollup_sync_service/l1client.go delete mode 100644 rollup/rollup_sync_service/l1client_test.go diff --git a/rollup/rollup_sync_service/abi.go b/rollup/rollup_sync_service/abi.go deleted file mode 100644 index 428413dec9c2..000000000000 --- a/rollup/rollup_sync_service/abi.go +++ /dev/null @@ -1,55 +0,0 @@ -package rollup_sync_service - -import ( - "fmt" - "math/big" - - "github.com/scroll-tech/go-ethereum/accounts/abi" - "github.com/scroll-tech/go-ethereum/accounts/abi/bind" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" -) - -// ScrollChainMetaData contains ABI of the ScrollChain contract. -var ScrollChainMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", -} - -// L1CommitBatchEvent represents a CommitBatch event raised by the ScrollChain contract. -type L1CommitBatchEvent struct { - BatchIndex *big.Int - BatchHash common.Hash -} - -// L1RevertBatchEvent represents a RevertBatch event raised by the ScrollChain contract. -type L1RevertBatchEvent struct { - BatchIndex *big.Int - BatchHash common.Hash -} - -// L1FinalizeBatchEvent represents a FinalizeBatch event raised by the ScrollChain contract. -type L1FinalizeBatchEvent struct { - BatchIndex *big.Int - BatchHash common.Hash - StateRoot common.Hash - WithdrawRoot common.Hash -} - -// UnpackLog unpacks a retrieved log into the provided output structure. -func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error { - if log.Topics[0] != c.Events[event].ID { - return fmt.Errorf("event signature mismatch") - } - if len(log.Data) > 0 { - if err := c.UnpackIntoInterface(out, event, log.Data); err != nil { - return err - } - } - var indexed abi.Arguments - for _, arg := range c.Events[event].Inputs { - if arg.Indexed { - indexed = append(indexed, arg) - } - } - return abi.ParseTopics(out, indexed, log.Topics[1:]) -} diff --git a/rollup/rollup_sync_service/abi_test.go b/rollup/rollup_sync_service/abi_test.go deleted file mode 100644 index 550c950bb337..000000000000 --- a/rollup/rollup_sync_service/abi_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package rollup_sync_service - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/crypto" -) - -func TestEventSignatures(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - if err != nil { - t.Fatal("failed to get scroll chain abi", "err", err) - } - - assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), scrollChainABI.Events["CommitBatch"].ID) - assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), scrollChainABI.Events["RevertBatch"].ID) - assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), scrollChainABI.Events["FinalizeBatch"].ID) -} - -func TestUnpackLog(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - mockBatchIndex := big.NewInt(123) - mockBatchHash := crypto.Keccak256Hash([]byte("mockBatch")) - mockStateRoot := crypto.Keccak256Hash([]byte("mockStateRoot")) - mockWithdrawRoot := crypto.Keccak256Hash([]byte("mockWithdrawRoot")) - - tests := []struct { - eventName string - mockLog types.Log - expected interface{} - out interface{} - }{ - { - "CommitBatch", - types.Log{ - Data: []byte{}, - Topics: []common.Hash{scrollChainABI.Events["CommitBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, - }, - &L1CommitBatchEvent{BatchIndex: mockBatchIndex, BatchHash: mockBatchHash}, - &L1CommitBatchEvent{}, - }, - { - "RevertBatch", - types.Log{ - Data: []byte{}, - Topics: []common.Hash{scrollChainABI.Events["RevertBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, - }, - &L1RevertBatchEvent{BatchIndex: mockBatchIndex, BatchHash: mockBatchHash}, - &L1RevertBatchEvent{}, - }, - { - "FinalizeBatch", - types.Log{ - Data: append(mockStateRoot.Bytes(), mockWithdrawRoot.Bytes()...), - Topics: []common.Hash{scrollChainABI.Events["FinalizeBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, - }, - &L1FinalizeBatchEvent{ - BatchIndex: mockBatchIndex, - BatchHash: mockBatchHash, - StateRoot: mockStateRoot, - WithdrawRoot: mockWithdrawRoot, - }, - &L1FinalizeBatchEvent{}, - }, - } - - for _, tt := range tests { - t.Run(tt.eventName, func(t *testing.T) { - err := UnpackLog(scrollChainABI, tt.out, tt.eventName, tt.mockLog) - assert.NoError(t, err) - assert.Equal(t, tt.expected, tt.out) - }) - } -} diff --git a/rollup/rollup_sync_service/l1client.go b/rollup/rollup_sync_service/l1client.go deleted file mode 100644 index b6be3e0bc611..000000000000 --- a/rollup/rollup_sync_service/l1client.go +++ /dev/null @@ -1,158 +0,0 @@ -package rollup_sync_service - -import ( - "context" - "errors" - "fmt" - "math/big" - - "github.com/scroll-tech/go-ethereum" - "github.com/scroll-tech/go-ethereum/accounts/abi" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/rpc" - - "github.com/scroll-tech/go-ethereum/rollup/sync_service" -) - -// L1Client is a wrapper around EthClient that adds -// methods for conveniently collecting rollup events of ScrollChain contract. -type L1Client struct { - ctx context.Context - client sync_service.EthClient - scrollChainAddress common.Address - l1CommitBatchEventSignature common.Hash - l1RevertBatchEventSignature common.Hash - l1FinalizeBatchEventSignature common.Hash -} - -// NewL1Client initializes a new L1Client instance with the provided configuration. -// It checks for a valid scrollChainAddress and verifies the chain ID. -func NewL1Client(ctx context.Context, l1Client sync_service.EthClient, l1ChainId uint64, scrollChainAddress common.Address, scrollChainABI *abi.ABI) (*L1Client, error) { - if scrollChainAddress == (common.Address{}) { - return nil, errors.New("must pass non-zero scrollChainAddress to L1Client") - } - - // sanity check: compare chain IDs - got, err := l1Client.ChainID(ctx) - if err != nil { - return nil, fmt.Errorf("failed to query L1 chain ID, err: %w", err) - } - if got.Cmp(big.NewInt(0).SetUint64(l1ChainId)) != 0 { - return nil, fmt.Errorf("unexpected chain ID, expected: %v, got: %v", l1ChainId, got) - } - - client := L1Client{ - ctx: ctx, - client: l1Client, - scrollChainAddress: scrollChainAddress, - l1CommitBatchEventSignature: scrollChainABI.Events["CommitBatch"].ID, - l1RevertBatchEventSignature: scrollChainABI.Events["RevertBatch"].ID, - l1FinalizeBatchEventSignature: scrollChainABI.Events["FinalizeBatch"].ID, - } - - return &client, nil -} - -// FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. -func (c *L1Client) FetchRollupEventsInRange(from, to uint64) ([]types.Log, error) { - log.Trace("L1Client FetchRollupEventsInRange", "fromBlock", from, "toBlock", to) - - query := ethereum.FilterQuery{ - FromBlock: big.NewInt(int64(from)), // inclusive - ToBlock: big.NewInt(int64(to)), // inclusive - Addresses: []common.Address{ - c.scrollChainAddress, - }, - Topics: make([][]common.Hash, 1), - } - query.Topics[0] = make([]common.Hash, 3) - query.Topics[0][0] = c.l1CommitBatchEventSignature - query.Topics[0][1] = c.l1RevertBatchEventSignature - query.Topics[0][2] = c.l1FinalizeBatchEventSignature - - logs, err := c.client.FilterLogs(c.ctx, query) - if err != nil { - return nil, fmt.Errorf("failed to filter logs, err: %w", err) - } - return logs, nil -} - -// GetLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. -func (c *L1Client) GetLatestFinalizedBlockNumber() (uint64, error) { - header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - if err != nil { - return 0, err - } - if !header.Number.IsInt64() { - return 0, fmt.Errorf("received unexpected block number in L1Client: %v", header.Number) - } - return header.Number.Uint64(), nil -} - -// FetchTxData fetches tx data corresponding to given event log -func (c *L1Client) FetchTxData(vLog *types.Log) ([]byte, error) { - tx, _, err := c.client.TransactionByHash(c.ctx, vLog.TxHash) - if err != nil { - log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", - "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) - block, err := c.client.BlockByHash(c.ctx, vLog.BlockHash) - if err != nil { - return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) - } - - found := false - for _, txInBlock := range block.Transactions() { - if txInBlock.Hash() == vLog.TxHash { - tx = txInBlock - found = true - break - } - } - if !found { - return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) - } - } - - return tx.Data(), nil -} - -// FetchTxBlobHash fetches tx blob hash corresponding to given event log -func (c *L1Client) FetchTxBlobHash(vLog *types.Log) (common.Hash, error) { - tx, _, err := c.client.TransactionByHash(c.ctx, vLog.TxHash) - if err != nil { - log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", - "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) - block, err := c.client.BlockByHash(c.ctx, vLog.BlockHash) - if err != nil { - return common.Hash{}, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) - } - - found := false - for _, txInBlock := range block.Transactions() { - if txInBlock.Hash() == vLog.TxHash { - tx = txInBlock - found = true - break - } - } - if !found { - return common.Hash{}, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) - } - } - blobHashes := tx.BlobHashes() - if len(blobHashes) == 0 { - return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", vLog.TxHash.Hex()) - } - return blobHashes[0], nil -} - -// GetHeaderByNumber fetches the block header by number -func (c *L1Client) GetHeaderByNumber(blockNumber uint64) (*types.Header, error) { - header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(0).SetUint64(blockNumber)) - if err != nil { - return nil, err - } - return header, nil -} diff --git a/rollup/rollup_sync_service/l1client_test.go b/rollup/rollup_sync_service/l1client_test.go deleted file mode 100644 index 394f455b80c5..000000000000 --- a/rollup/rollup_sync_service/l1client_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package rollup_sync_service - -import ( - "context" - "math/big" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/scroll-tech/go-ethereum" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/rlp" -) - -func TestL1Client(t *testing.T) { - ctx := context.Background() - mockClient := &mockEthClient{} - - scrollChainABI, err := ScrollChainMetaData.GetAbi() - if err != nil { - t.Fatal("failed to get scroll chain abi", "err", err) - } - scrollChainAddress := common.HexToAddress("0x0123456789abcdef") - l1Client, err := NewL1Client(ctx, mockClient, 11155111, scrollChainAddress, scrollChainABI) - require.NoError(t, err, "Failed to initialize L1Client") - - blockNumber, err := l1Client.GetLatestFinalizedBlockNumber() - assert.NoError(t, err, "Error getting latest confirmed block number") - assert.Equal(t, uint64(36), blockNumber, "Unexpected block number") - - logs, err := l1Client.FetchRollupEventsInRange(0, blockNumber) - assert.NoError(t, err, "Error fetching rollup events in range") - assert.Empty(t, logs, "Expected no logs from FetchRollupEventsInRange") -} - -type mockEthClient struct { - txRLP []byte -} - -func (m *mockEthClient) BlockNumber(ctx context.Context) (uint64, error) { - return 11155111, nil -} - -func (m *mockEthClient) ChainID(ctx context.Context) (*big.Int, error) { - return big.NewInt(11155111), nil -} - -func (m *mockEthClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { - return []types.Log{}, nil -} - -func (m *mockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - return &types.Header{ - Number: big.NewInt(100 - 64), - }, nil -} - -func (m *mockEthClient) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - return nil, nil -} - -func (m *mockEthClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) { - var tx types.Transaction - if err := rlp.DecodeBytes(m.txRLP, &tx); err != nil { - return nil, false, err - } - return &tx, false, nil -} - -func (m *mockEthClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - return nil, nil -} diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 3380b7dcff0a..15b9c55ad633 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -26,9 +26,6 @@ import ( ) const ( - // defaultFetchBlockRange is the number of blocks that we collect in a single eth_getLogs query. - defaultFetchBlockRange = uint64(100) - // defaultSyncInterval is the frequency at which we query for new rollup event. defaultSyncInterval = 60 * time.Second From 875004525ff705af02b55818a884567c6e28913a Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 2 Jan 2025 14:03:22 +0800 Subject: [PATCH 16/17] address review comments --- .../rollup_sync_service.go | 6 ++-- .../rollup_sync_service_test.go | 16 ++++----- .../commitBatchWithBlobProof_codecv3.rlp | Bin 4693 -> 0 bytes ...ommitBatchWithBlobProof_input_codecv3.json | 31 ------------------ .../testdata/commitBatch_codecv0.rlp | Bin 88636 -> 0 bytes .../testdata/commitBatch_codecv1.rlp | Bin 1237 -> 0 bytes .../testdata/commitBatch_codecv2.rlp | Bin 4437 -> 0 bytes .../testdata/commitBatch_input_codecv0.json | 27 --------------- .../testdata/commitBatch_input_codecv1.json | 31 ------------------ .../testdata/commitBatch_input_codecv2.json | 31 ------------------ 10 files changed, 11 insertions(+), 131 deletions(-) delete mode 100644 rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_codecv3.rlp delete mode 100644 rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_input_codecv3.json delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_codecv0.rlp delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_codecv1.rlp delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_codecv2.rlp delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_input_codecv0.json delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_input_codecv1.json delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_input_codecv2.json diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 15b9c55ad633..406895be1120 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -27,7 +27,7 @@ import ( const ( // defaultSyncInterval is the frequency at which we query for new rollup event. - defaultSyncInterval = 60 * time.Second + defaultSyncInterval = 30 * time.Second // defaultMaxRetries is the maximum number of retries allowed when the local node is not synced up to the required block height. defaultMaxRetries = 20 @@ -37,7 +37,7 @@ const ( // of a specific L1 batch finalize event. defaultGetBlockInRangeRetryDelay = 60 * time.Second - // defaultLogInterval is the frequency at which we print the latestProcessedBlock. + // defaultLogInterval is the frequency at which we print the latest processed block. defaultLogInterval = 5 * time.Minute ) @@ -148,7 +148,7 @@ func (s *RollupSyncService) Start() { log.Error("failed to fetch rollup events", "err", err) } case <-logTicker.C: - log.Info("Sync rollup events progress update", "latestProcessedBlock", s.callDataBlobSource.L1Height()) + log.Info("Sync rollup events progress update", "latest processed block", s.callDataBlobSource.L1Height()) } } }() diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index c34f9385b515..dca18285d2c0 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -22,13 +22,13 @@ import ( "github.com/scroll-tech/go-ethereum/rollup/l1" ) -func TestGetCommittedBatchMetaCodecv0(t *testing.T) { +func TestGetCommittedBatchMetaCodecV0(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ L1ChainId: 11155111, ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - L1MessageQueueAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a1"), + L1MessageQueueAddress: common.HexToAddress("0x0000000000000000000000000000000000000001"), }, }, } @@ -39,7 +39,7 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { defer stack.Close() service, err := NewRollupSyncService(context.Background(), genesisConfig, db, &l1.MockNopClient{}, &core.BlockChain{}, stack, da_syncer.Config{ - BlobScanAPIEndpoint: "http://localhost:8080", + BlobScanAPIEndpoint: "http://dummy-endpoint:1234", }) require.NoError(t, err) @@ -77,7 +77,7 @@ func TestGetCommittedBatchMetaCodecV1(t *testing.T) { L1Config: ¶ms.L1Config{ L1ChainId: 11155111, ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - L1MessageQueueAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a1"), + L1MessageQueueAddress: common.HexToAddress("0x0000000000000000000000000000000000000001"), }, }, } @@ -233,7 +233,7 @@ func (b *mockDABlock) Number() uint64 { return b.number } -func TestValidateBatchCodecv0(t *testing.T) { +func TestValidateBatchCodecV0(t *testing.T) { block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -300,7 +300,7 @@ func TestValidateBatchCodecv0(t *testing.T) { assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } -func TestValidateBatchCodecv1(t *testing.T) { +func TestValidateBatchCodecV1(t *testing.T) { block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -365,7 +365,7 @@ func TestValidateBatchCodecv1(t *testing.T) { assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } -func TestValidateBatchCodecv2(t *testing.T) { +func TestValidateBatchCodecV2(t *testing.T) { block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -430,7 +430,7 @@ func TestValidateBatchCodecv2(t *testing.T) { assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } -func TestValidateBatchCodecv3(t *testing.T) { +func TestValidateBatchCodecV3(t *testing.T) { block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} diff --git a/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_codecv3.rlp b/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_codecv3.rlp deleted file mode 100644 index 4640f84687416f77ecd3e0d0207f1bc46d75edbb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4693 zcmcJSe@qi+7{{;eC|00Qx@ct66`003eu!+A7GcM*5(cRWC1q@qbWWKV2aXNm9Dxql z+}OZhT@?_$i38e-#WEHM8|kXf%waN+A<1y*7BgfzwL7}$H4yUX3>c#lgn{@{Jz z=kx7{hd0k%(t*f4*~3U*szjj>~3^ zm%ol^pLlmy&DP?=i+^<4Jl~8sXWwr1Cw+zgz2YX{DH;?Aq! zahVs+AJYhMs{r@Ic@QeVF*ujQF(S9X19C5%2WtelRe<~9d$y(mndKMv{voCr^nZZqk)AOH#Q{ZplOHnnJ zUtIshv;lu3<>Ffd@au6O@E_vyQndj5siglX<>Fge{tUhZw?^MT@w9;c?c4ce zF_ZG`X+JRyz~9W5qG~Mv3se=h(tctZfd2$6&GL(zPof*(KecGTxcfnIFYupUv|n8R z#54f^7ypz0MY4bB_c7M}i+g@ENxy5+{iCoX#pwCjj90+@oTCWe8bJRB(tn=MOVwC@ z;)$ooLMz?>7CZs^|8k)uUT66i^?bo>IKx(#uZZnG`Pt~QlcPQB4Z5i{-E$4ty z4hSn8M9AbX*<+1kJfbr4Gk68Z#Fw`%y};4 zs$#IlUc=t_C1^st@y@-?Im1ZCtK~iOrkWk{n)XYcw;J3tir$}ckC+Vg+io|mNi*in zAe$!C-Y9gs`gl>*quu{Zm)a&({gK1VO3m7z^P8`a+tOOr-Y5!GUmWO@3@*uWVx!Z9@(jGS`omW;ro-|RJIJn&Y{r$doPMsZ# I*)@{*A9z%^Qvd(} diff --git a/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_input_codecv3.json b/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_input_codecv3.json deleted file mode 100644 index ca13d9a749fc..000000000000 --- a/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_input_codecv3.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "blockHash": "0xb7f00b3235ef6066d61e5e5be5472cdd1edc0a64537b84f110f860e6614a4759", - "blockNumber": "0x187", - "from": "0xf472086186382fca55cd182de196520abd76f69d", - "gas": "0x3e052", - "gasPrice": "0x3b9aca07", - "maxFeePerGas": "0x3b9aca0e", - "maxPriorityFeePerGas": "0x3b9aca00", - "maxFeePerBlobGas": "0x2", - "hash": "0x8de0573f1f72ced727838df60088d9084333d27384a77bf331d520cac0e6a298", - "input": "0x86b053a9000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000108000000000000000000000000000000000000000000000000000000000000010a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000e5a938b077b60c939e58eeede33d4385228b532cf73e54aca76731a27acc86b7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000002c0000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000007c00000000000000000000000000000000000000000000000000000000000000820000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000008e0000000000000000000000000000000000000000000000000000000000000094000000000000000000000000000000000000000000000000000000000000009a00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a600000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b200000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000be00000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000ca00000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000d600000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e200000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000ee0000000000000000000000000000000000000000000000000000000000000021d09000000000000000100000000668ebf810000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000200000000668ec0650000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000300000000668ec0680000000000000000000000000000000000000000000000000000000002e71a60000000000098968000070000000000000000000400000000668ec06b0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000500000000668ec08e0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000600000000668ec0910000000000000000000000000000000000000000000000000000000002e71a60000000000098968000080000000000000000000700000000668ec0940000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000800000000668ec0970000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000900000000668ec0b50000000000000000000000000000000000000000000000000000000002e71a6000000000009896800001000000000000000000000000000000000000000000000000000000000000000000000002950b000000000000000a00000000668ec0b80000000000000000000000000000000000000000000000000000000002e71a60000000000098968000020000000000000000000b00000000668ec0bb0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000c00000000668ec1630000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000d00000000668ec1660000000000000000000000000000000000000000000000000000000002e71a60000000000098968000120000000000000000000e00000000668ec1690000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000f00000000668ec16e0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001000000000668ec1710000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001100000000668ec1740000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001200000000668ec17c0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001300000000668ec21b0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001400000000668ec21e0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003400000000000000000000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001500000000668ec2210000000000000000000000000000000000000000000000000000000002e71a60000000000098968000400000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001600000000668ec2240000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001700000000668ec2270000000000000000000000000000000000000000000000000000000002e71a600000000000989680003f0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001800000000668ec22a0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000400000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001900000000668ec22d0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001a00000000668ec2300000000000000000000000000000000000000000000000000000000002e71a600000000000989680003e0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001b00000000668ec2330000000000000000000000000000000000000000000000000000000002e71a60000000000098968000400000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001c00000000668ec2360000000000000000000000000000000000000000000000000000000002e71a600000000000989680003f0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001d00000000668ec2390000000000000000000000000000000000000000000000000000000002e71a60000000000098968000410000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001e00000000668ec23c0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003e0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001f00000000668ec23f0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003f0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002000000000668ec2420000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002100000000668ec2450000000000000000000000000000000000000000000000000000000002e71a600000000000989680003a0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002200000000668ec2480000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002300000000668ec24b0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003e0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002400000000668ec24e0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003c0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002500000000668ec2510000000000000000000000000000000000000000000000000000000002e71a600000000000989680003b0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002600000000668ec2540000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002700000000668ec2570000000000000000000000000000000000000000000000000000000002e71a60000000000098968000370000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002800000000668ec25a0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00b3a20d1a749d0917ef837a9973e549be83321ecafc3b8388f25c5c247078e383e4b062167826f53032aa9bb0fc2a8ef610e1c4e5d0f70ee133a76b5020224a8b2428ba7ec725c61950716b2758aa21f1b0cc3f0bf1551474537093924829c170e3d74228be4c07acf24b5c1adb1adac92d25a32f79ba51731343fc0f52cb11e32bbcedb1969853cf854f61862e915744fb5fbc3d4ec8701fc0f626f5c97211b", - "nonce": "0x1", - "to": "0x475652655309fa7cb1397537bee9a7fbafdc11ca", - "transactionIndex": "0x0", - "value": "0x0", - "type": "0x3", - "accessList": [ - { - "address": "0x3d9a28f7692cb94740bb64f08cb5cd19aa5cd3dd", - "storageKeys": [] - } - ], - "chainId": "0x1b207", - "blobVersionedHashes": [ - "0x0132d1238782d359051322a61d997a57f5b2f86d6f36c2aad4eda0118e3a213a" - ], - "v": "0x0", - "r": "0xe8f6ca4ef76a295dc6aa0099e0e9c4a2902cb0ffaeff0c4bae258575d00cae94", - "s": "0x20682493a94948fb97b8cb67e8ae86f444a96e39c5945f10d1008cf1a0508851", - "yParity": "0x0" -} diff --git a/rollup/rollup_sync_service/testdata/commitBatch_codecv0.rlp b/rollup/rollup_sync_service/testdata/commitBatch_codecv0.rlp deleted file mode 100644 index e0b42dc0a5815a67dbf48d3d7d6d6c55b368ba24..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88636 zcmd>n2Ut@}({KvCgH&k|dj|w*(gj68Lyd^#EKN}a1-l|CG<#4KyRl)v_Flnq?Y&p* zy;p4fv*#osfsnxUeZTL1&!2lu_Uvq#nVp@Po!ztSq4#lTe5d#6Bv{*d6+gc_Up<09 zxRJn;wWYXy?8LLFi`PBgc*fN3*0^5n_0yh-xO?bvM;bL>FQWa~hpW6gT?IH!q`Uwv zP7KezC|6Lov%}zFE?!k5`Z?13Is48SOT16vrni1kyhF{Ue>Q9K*{1_|MA^Qfe{Wsb z_eiitMZuu~E;?yh+vCPnJ-!go_-vC$o^tKg`M|6bLIDWn@D*I;@Nz;qeEEA#vVi{6 z*7!GB=dVS|D~O?U=WuBf-s?YYBd#l|cU=kD`cK=6>n{HXb=!&Sme#wjy11nN)Ar)J zYwBIM8s`q;y6fv*x0+WS#dSBY0`583Ov&xdNl(6#{j=e#NKu^;_9tStfsM ztp3RvJr7MAz@L}L-L0;+!|}4Sinjff!BnG-+tF- z)Vy|ojaAyddg&9Ta2lPZ94?i76~*cV8)PMBW~8OH&q^Mak(P?;EcZ_V6{`4i{>I|_ zmyh)bod2}{Df*t0^|r(8jyFvlr15x6+3UW?%G8v0tX}$5DV#=UDTn{-`S+*tt%7`& z^sC~ZzoSlO@%?@p-*xgzw8!t%zuIYyCQE+_{nYOdV?G-_JLBBEOMks1+wTQFJrdj7 z@b83&eTjw3I^MtRyFt@!+n4US%WYI)s2ncQW_6WPiNCPN$RSTA_tV?ac!H*0#lFCo zKQym6IPhj&wQh9hTJ!yhk>4-m%H)7x!$t-<5`YrMG30# z7Z~*{7x7-CR=v3IadqXvlT-KQy15En9zMUf@W6v<6E>~fZ#-;^h{>7K-ff&6|CihP z@NBOI>gr+Rw@(^Qf8J!F_3{O;U4c3VyOI&k$n~0kk|r<=@GD;M+VA0>A#sAeOL`pp zWN@&3yCH%4+}-p?5q)m+>SBgCpOuj4=ZYaeiDKQ~A zrQC0YkH__wMu*NN`|mYMe_lTSU98rm%qLw=#ZHbJwBl0B=)mX8{qo<&kIcOLX72ob zm*ZCG>W!=lX`N0Nd@jlDqoRMCc8h-Z(OeY2F!_1Xms9R-XB@ZMG`DipndH;W*^7e> z+iCl7Kf9m!J=E&Zmv6(~{J5I5`SA45w7!4m{g_s=qN~P;#j4t`T65-dsKt&CP5P&d z_6cX+h-92&M18hOMU4~Nuk`*wJ)o4dm5CM?XAkl)>(OZ5{B?~zv)P4HEek9c3vb%?CMK4aG&vq3@>{rm zbitg;mOb}5ocHZnR1h~OZGF_G=)kNmi+jCa#z>*j#$2f!b5Vyh;}Q0WT<-2M6A_&t z{32brW(hQ!35TF@f5o#2zX&ejM~8eaq0NgVSoEC0jP$JJ^sKR2a(Gog@FLkzrMeoO zs-}uCs3u#h{+-FzVv13WimJv(K??ff5lkTu2>n$gOrV*@#Kr^CYB+~(D9vHB;n5lX zwMd{b!U>i&!Lo^shjff2P0$E!!W7YII=pBeQN^$_7YfYG7#6}9Ty+qkBS@ezp_+99 z&C=6CSR^D{Ky(%lP{2i|5W0NDMf}JjJZ=Qkp|gkz@{uckgdWM~U_vBm=_E8WMv51g z%y!JGQt&Y+4)GKNFjDXV3cGcJFI#BxinK``$Z3mtV}2T#0IoMYd*NpbTg(*ZT}jCV zn(cwuc!JhR$QI&XU=d9iEyP?BswwVR2;%{z#Eeu}hUBe6h&jpcx=2!7C1*;7=E5lR zIMYU5&)A<|@gdhLB&Wn-`OS?Fw^?QkN#az1RAIp?VhWeD*(4P#L4*-Z4W>T_zC7^2 zR!S%q0?U}0vGI5)djmu*1t2iw6D`Q`CTMglSkYpuKNTr}m0mewZ1BsE9 z8HEvKmn6_M7D%`|UV#itv&^CNQ7C;-QW{gBuFb&&XdNCaQZW0aSz-dzX@F>Bx(xo% zLMy@uT0KD`)_8~o464R`Nf&sIV6qg87GH5T6B-pY2(1!~R+cxa@1r?sF7#YR6r1BJ!-vo_CHcJ<`RmbeAiw8M(K16P? zesFw2YWb!?Ki#ISo!xfT`@|zl&0Br$6aKmB*0%x}A7JTt66Nz!8ka*}iI~Jr5)vt9 zE0qF?q%)$$DIPFMNw;tf^N(&~ur-1g7zgv45Zy#la9C;8?PRW69Kljl1Pm?*XbN^v9YfzlF`T}Ys5|BQ{#uO3gZTiP)2T0aya z4|(q>BwtZT%5W9xx#riRLOBY+X(5-O^P=foehe2>AVC*0g#-;eDVSnds*3P`n6dN- z%sHtMBs(UFq);G@9Kn(l`;YR7&5mcYXR&9I)(9vBrK%e&8CQ7jAizpT7i;7MnzgUQ zaDWm7U?>UHE2e;7Wq?2wP|Bl7t)7EWCp$2KruT@fVdFs65KmmC?BtsBv3l2e)(}0a za}U&_b)rwc1u?aN&E)xC$!n*Ss)U*(qwiznZ zH0HDJR+7*9Q54rkQ(UV{0SgpscqXvj5T>JUC750P-VzicR~w%9$~@12?i+02^oBs& zdNCj?a&0HqXnQ)QM{h#SV$P%A*eb-Je+=?|4i1uhD@8tC|S} zq}OAZN&4-!Eu~_#3>0u(#Ma?Xf(0`gOc85jjET)xOqSx@n#5K+y0wU{gW2^Z)@Xnt zVkNQ&?pPh|*9UL#Dj+ZR6fXr$Z@gl_B%IVpC{BY?SNI?R7 zC;o(5B+-nZ{fe(waR+H}Snf0UYT#be`Q|VNCjB@>4&S^8AYOrM>ny$j_2{P z&^FouOl!0j94h!34T#ctbSxE(ig7u3wkI7UQ*~VI9JFRKmTc`Nv}FS3mPaR8d1ON` zIDjT}WXgb&l`z{eSU|_^&cba;q?pgQspd5lZ$r?matSS{WvUY!Z^aeBm`cY2GJ~*( zXlfA~U&Q7U3z8F97@o%w2q5*NU#~fI80LXk7hypx+ z7ONQYnPRKC0f%5<{+e4shvvP2VZa=59il21qh;O!gW5v_7#9J-&;eUW3Z8j)4Cy9@ zh-(2;1xAS8Nnm77^vpFPbaRc&EpRiFIDEd2kS!nyG#B8i7)#tJBmqbGo|~JSOVA;A z431}jcF3jpVZH?y-z+Z9!}P~XXD?C3H<$7hirs)_@pLW17(D?2H9v@3gOH_}O0|Kf zq1h#Q1#Zzsra=ogJjtQ;nQT6v0c~~z)>6Ik`D(eQ0y7K9M1ZaZAx2j^VX9SyMK;v3 z7z8RI5{nU}r;D4RGU{*(eY0hj?@UiZJ@DnI*+`dFN z#|y}rW;qbV6qdEQbbwRy%d!WqkPA7~v;oZQt+1At9XHD&pa&}q=yMSd`nT*H8;?T~ zmK%9_1S_8oDNvB`y$8s&gb-OSmQ_jb6fQg-6O$SV1@7|zY0F&Mv6NavQsq-M#B&jX zmL_}%PT@hpEB5N_R)Ch}b27|p<$|01gqwu&{(y#+8kPiX7HU=du(_`a7&|~AK&?8_~caK>9 z8;41IBxvxs3~EXya^sCM^A6hKk{YP-N#J#h_p&{<@0@V$MoSVl5d#CFIwT}FJFVAx12 zXgdkS*B10LX$8d+S|3NpfVZ%)v z&bBNyMszO@H*v0Yv-8G?whNM1)YqSb4q^SAWe1jzzOuHckkgXB&~u2EmO7m`mQdHBpq z7Sy$n)Sb5SR6L+i1t6R>Me2tkIX!CeR7l>3WYL`spP;TWBxhe=;19`;kSsAC-UpK5 zl4dY{P{9{KV^0qhLNWxB6VD!?K{6SVxv{6>AUPW+H(8}aa*3pFz?`%}z?%~Q>RZxh zFC?dllcq=w;bhhGyO6|OK+ZAiAc?t zhGfqNXC^}OF(i9WxN8S>%^=y=YE55A21)96?)uow7%_|hgp*J~6Otb6SrL$I4oT18 z&8s0914-Y|)t->dgQQ=Xp%o-&NSe|3b(d+17~ufY?!Rg)B!@uK(p>)zBo{!k>8?zi z+zm;mE{_L7@;L`AKL`(E#2`&7aEcfSMMC7s!xNC)YyAk&JVK4znF;O!VsHWK=_SmUkDIpMZ1ZXkku6 z+Taw?;t_LAb}WOY5Wu=D0W5Tw6zm+*fkh0aj(Hnk@| zRbEXG=g8rs)=^qstqtx8&IKS{SoPxe)w)2Aw-)(;H-%wj)dp0T?;|Zgk;F_b73t0t z*Bpg=k{e^SdDS(Gal3#A(p@7i50R0YDqo89`@wY}BtqTk>P0{-e6@FSu_kHAb+a z#N{=#YW1yQg^bRNp)COF*Fv^04L4*23u&@$SAfdPnMMpUhSdjfLu0tKSB*4Dug0Wj z0=-VG-Rsio@(Xc!EOEv=#6A60yZnXf@~1HrbKB&Axcpt5BPVB?u=gm|JsLkH<1;6M!{q}7`P zd%9dfv>HPYSp?;Z00avudXY48DU64}CY)ZO(}e)%3XG9f3WqOb687d$jEb{}_XxR2 zYc8?@#Ux}St(CmIJY=&0Xp>t5N>2hn;iJ;R7xVae$ZiyHL+ef+)U#)Z>q)&ef;ub% zK9bf?)C7VL(mu8IB+Y2I$9-zs;1B#O{v!l+Z>$we%g&X_T3VjKGd7iY0{+p$&+E23#ela zM1?i5UyY9RG9`8^2wVaP{evV3n6N2O(C0mD4kEoGjyL#^KJW$SQsDyyyvg7ZpAH%V zFHzXoY1G>elQHOREcp6*5a+D7DC)fuqF{)H<9^9QY`{ysD%1kh6cDd(QRtXE3F(_i zW;#|Pu>LgVBK=6bX<3J4Db9v-F-U(n2kEC33JQ__KuM!`ULpMg{58I?fShJXKfjQl zODZbRa9H|g7h~%2^rO{rzO$|^L!^HZR&2;X19xzfbemO1sE~m=U>E!s#Ac--8)VQ2 z>!3QeK^2oMKgaN%7Ha$ovsN5<`p@hHB7@D8?lRa5gtjOo^_jsrJ~B8#&L07#{0(kk z&4!g2$wp+@g3$XTdXPhh+2Fq|Oq1(VZ!H&?OH9?czb3uleImqx53VwjM62N$K`Eab8g-to1RJmrzh{1vL zI$+}Y3~pzE8yD}8fO?H*=NKwQm`06A+COTRpM3A){M?$i@K=8t#-J)WC5udRu=d$Jkoxj#^41}7D_s) z{(PFk-W!c)icDs6{&3F2b{iA#T_ck|9ApxQx`C~c*|^4M-|BmM5}C}!k`k}|d!VP$FVyIB&Lcr<_3^)$SNex+@7Ab; z$GKk%Em<4JFA9s&Of_wOq0hZZlO_SqFt<^oo8-xH)ZK?{N5R2y)Czom>>szfKw3Xj zEgow16Lv;Tk!eT+ur>7qY|qJHi%g?sWHeye_rK^ZjC+_bOv>$tTAU~>EJUVfr;=1i_+a`-;&FsvC5@S* z;0Qf1*h~{9A#jA09y1GmK|z6-Gn48tc8ar2ky$|l=ro%sL_Mk_C=fX_n>l&%WKz&5 z?y2wt>Nr|situJ{y&WTSbwC4L;%bREw-g9a>pqwvlpKS4hl8ts;156;T`|iP=gK=1 z;7f?tGUgkR$5z1f&$J$&vb&_Nve4il3-wyMO1erBf`baSv-SmF)_^9hJ3y1A3YtXLJ>Up4vVky5 zT~o0R^~Xfk#T;Z^z(?)ggV0;gK<(jt6tezMcT07OpY>g6=>iB4+0qShOXR~-N$xwU zBk|d5WWylwo&a6i*diN)Tr#|mY%HjR0JSHhL*zV{JywluG8(`)n-n2pyDQ)uInBg# z4WzDmcp)i68~LLpi8v<9s-l&y=EDU$-7Lg=nD4sKqSMeG>n@X$~1aP&~${ z=^}DRS1}Y&{n!AN(DVh&z!!j?WCp^RHvNU1tTD#;%_i|HneulWHp?7H&V+MZG4FHL^&Xj9f&)Xblx0E>IGp1@I=UFW6HMY9C(=Pe;ji=`bk z8wO8KNfJj8!dsM($dkkM?j5Jktzkb#<&BPVd9@oq?SwT86x16nGVX$fSvxlLXk z$R~1|B@>*5AQI%^fHsebntOosac8o@9Y@V!r2}sj;}=NCrDXF64r

xs;413xfpD z6q0TfYCb|hdRb(!p*b&?+Ec1CyXDRLhq_y7NJHJNqNq@Jt5&r_ z-L0Zjg}PgHt{v)@-hhEx4JEWtCmmo54Ye9gPFf*5hwJ#vOYQpjw^7{LhA)gxtp$piXdLLb_JKvGi6C zAd**rIzfCL@AslsZ+XZi5Eum0mt9OMVvG*=4>yN^?X5$2p z`;x&t7=w1N$mg$;=l1@8_`)`7-$g*38*yC&jY1dgV`1a2eFU{thciLZC^W|Abu23J zJJP6qF+Od8Uy()~!@${SSPQ5_CU#Iz2WuQ7uj8DC;wI%|?#PChN5(?`_7}v& z908y<{5qZ%p9qavMlxC9J{M^(xqK$-SP7g2o8@>5<4Mqwqed?3+zf1|4ZKBY4Sp$| zm&-$rEDmzCM=`L-idTd8Ti|6zF~9==?ur1B0Ekco7y^L7iU7F)5GVpH0>DB=fE@tX zE(bu4*LeIqK==ZH&Wl0FO&uG3>ATW=MHiL8o6NbE$VmglGZyIQ=b9s@M(}`_g_0N~ za%xJRTt-d_4Om?`#X*y>01`|5FVF?ZDIHcnZm_qDoF>3#gAGJHWj7m;({ylY&$UYh{7 z#0gX%UQ;{a^PkdnuhTYMjt_VKAm`>BHn@@Yu=dSE&W)iIUb{+ArJjj|Qf@P)+j8~_H zFr);NbXZ+H@$e9I?8f4`4rd{;hvOFs52#Zawaw3I4~6h?I+*rMAvc#P%#~JaN`w z$V#QMm}bbe6r@NDf%)$ukS|;US#!jNk;}CPh>*)!5TVyHA_Ti=6izRp%@^|B{D!Pw zl5SiZa5WHebrquCakAYoscu|B|G7rUb>o^0SxM4vAVOB}ZF$#)&<&o(k_kbb5kjt8 z#cL-naxKRfU?7ivvGEWY>ue9J3fh11;tLpbau#xPg;-EK2Xo+kVWADY6wAdg|6)n? z6N3;CN`d?&AeI<3fdf0nWG(WQVO+~d3@YKkIW7`pB&nq(25sTMeGnu_O9E08gW$dZ zH43CA0WriNxJQaXffy1HT@3mOLP+M}wWN4|i3hK5*L0~PI>JP5 z(;6_&Zn;7fmn}2S$ZZ~sqfE5Ejv%z+$tv&0fvF&)_3ri!z{uTPi24}HU_?dh>xlZ) z2MsD(&mbf2Owb|txh}5O<&&wpagYqTyI%y;BoirEj@+MsUX{k&fhc%O0ek^IC_@?x zq!xLo@sJ02>$19okvOl6zYt+Un7ie1P?8pq$0Wi)G|WeD@3T$f7K{ z#|ki+;+tQPN1oW4l0ig|IUM9MyH*epW|u@pB@qtjM{n1WS%QNAZxSo6?v1k>>)4 zjCyvj_QPP@e&NBxUkXJ|=Eel%d7Y0uH{i-7*O2Ek=nkMNL zc^#rKtXcAwxa3YrNgx^YERmZ}V)z{}tdwHdMJpffSs^imkQiXt#Y&oiQT9V!+K?I5 zD6?l`?#bN7fx7hIp)M}9aPE>!VbaCF8WSmhyG(>SLu%KVC$2NHx(=`)7o*-~I|p^y zR*m}MTz(!MNe}~ml6Xl9KY+sbs|&v)3*(WH&UNI?fWZgJsKG&RTQLrm)i@yU4j@%f zRx(cBK0M^zl#~x|I5jCRRo?LcbgiykExlAqK-lNfO^|m!2YKfqZv@i92WgaZaJ`oU za2fJW!J-6jp2Hn7;D0j&bkIrO2LW~fc?18dHy2=#@i=qy3o*02fqw65`n}&3@+Z-e zH$Dr_xDT2-de zK<21fMCk*cZ}Rg7`Rc$5$_HL5!EBZ+{eY9OI1rCEOl4mff1kskz|?;18ZS;v+uQEcRpZkRL3hB`g+eB0n!` z;QTBk1Bd*?^B;7jUrf!p;0NRHC)EJR5A;8t_qZ&tR@qrlcBrIXQl3evK2$IX%d{V? z|NXX0=d|CEN&Lx_?wSMKWg)*SWCj*tEaZO3?};=63ps)OzLFV!aIy&tIT!hBQ5m_^ z2#Jutl{5n@3M|*~KV}d$zwi{RD4dMZ@rU>B{kuxh@sFC!pF*Jnj0JT3lcX6KKR@I@ zlFX3cMv(hp$hKw90 zR;rj&)OA59@E2hPkrnXIp!u#^d=&UM)_yYjj(Q+{f2=jSw&S6|cT(#o*H=Pj#*pN6 zRw90hlZD^sg#Kju>Y7T}R`)f6kGghJ(brra>I$dg#JZR2D>w~h4GM>Qi9A_oXvRpm zpD1}uVsdg)7HcTkR}LCO775Eh>0)~2e=tE}>bw3R+Db7B(g2K>07iawQy1E3Ceikh;AK1+d`tbltQAReJN1| z-QoOyw;$5Jz#MW(=cRiTmfr%>+d$n3EWctur29}Tzj>60(tRA3Us%Y1yCQwrRSsc^ zh^1!$2gNLsMR>Q2?&^MokGg|BP>L|D|GKZ2B24wgPR<;f3d%&x6Bs;{l$M%aLwAKV zmtjtbBlJajiFIp=5ZM0@v6S`&%sE6dCuEp-a)T>|Lb7EhcL=;LO|lJpY#}AplN*Js zz>{0dwvf%(m!sGwGsESHk#J5rZ|9GZnC1q%IzLO(-iS0Vh88;wH23zoa97KNFhF!1?%2F9Z>e2|A)%!I*8f?Pk~J8DrFENH_Zgc{lcH$p%YUf}6d*VCe~4Dxnb6ed7n zPgom07+X;Ii3g1VR%g-xk-FD(iyhjm-nTx_o7Z>2?Hx2QOj+yXZ2 z6Cn4I+A%_5zu^TNeCG)Y*M{3@QCJX=1WP_4ME$7CXi<0<@PTv9PU6H6~8ZcGb*JwWK*%|s$VqMb;T)#5nObt^do~&L@?nX zm7|CVz=%9%UAG)XjO3vRk6IX|re}>zNl8siO@@_?)O`djNQ@{?EnE`FKQT@ld2&-5Avz z5DFuF^Q^LuEJjy}rqc0Fp_#gi69r08Nyrf?wBOy_j!LgKRwWxd0 z*J9-o63v9ZVCgUEi=;j(9SzGGlmX5$tna;&CPrmtu}!d%){bSNK1W2ZogPLIyu zp=dC;b@T<^e)5>qtPywuq$WX^vVEC#?o5*Qem0ljh#Z6N<}Gl4j;wFC?DR~JvZ_= z0ezJz?~Bs{`U0gc6(#X9??X~ z{G6b`u2KFl9mTO1ndKcdHD&1ITq7*2>3hIOoTbX?>#e~gw5R(qu%-|`pV>?-r)L4RX`ndv07%e#P@cSbmb@yEy3u5(h*1YdS8(-$Gs~6 zqp^SyImyJcpPKs8xMm#e|MxyFgAu-%-IJAoV_P-NBQ}f_M#{~R97b^ii6+wVh|A=o zI9Q@7#R&F)N4q4;rXo1$<5W>qqAMD3cN1Or_tyxm2@X<@(&v`JG&KsdNU#PPT-gAkF{B$ZvAJQ zJTlfZ%V*mCoNm8h*O8Cv*Q}`dL4Cj7>uz+!CC7hgn`Up^HZ2GqlD^(d7A}|8Sv~$u z6jv@w?m=h%wK1!Na#`{Z%*MZNcAg-c7roUbYJlq&HV9govx6+WW;$7C zYcTl-p&O6vHE7)o7uTankKWm>+??aL`h?rWl=G}ZJI&CwuP4kz<87SW-~U`%?sMO& z@eyWgoki7uD~|x(R9d=NeUIb7-hL^5-v_Nxg_YL+FPVXvb z{?5?+#9bj8-hPm5=L*tvQm{5vwEyrR>4eenPp7^*9vm`1D9&rmahs8zRWX%oKbj=Y z61Cg#HL6Fxh=1+d6^C899b3*Cu{UPp!S1QX&v)}3d}As>u!dLC)A_EkBua+xXV?{q z7W^4@HTTyR9tscfD?E=*pMr+|>_`zVIpt?hNwQ2Qxq-dw5MM!cIvEJ~m|Ng1mMXoasTlQV8adhRD&~jkT##rs5@uAU$FIy?8N@{ zd8JGLff|vix}|6ASBJIKsMvOE-M}l?D<(P52<5cd^sqF@y~Wc_?vD?z8ZqV38f)3s z6r{-^X)4fQs-4c-xNJdYtEJO#wW#{Y`8jFYw8}L)2hQ~07o08{m$mcJHT|-A+TFhx zet#n{U-dUVySG+b-!sXPshj(Bo(2t1`8J8vulzG-ir3pO*>Jeap|K0UK2*0GFr?Lq zNhxo*E1pgfc2ah)CRf|NY+tlB=`F9J-U!S<`+3wBWoD;ns?OYw#l2JLw>4Nq5 zqqjx<&J~?9o7^X~%e1Ll+pb*S9apgQ%4OS%nGtWE51#$3!{5;4wr^W>DRqA+r+EIQ zqiEvEm9bavr;Hg_qGhFNVd7X|t?lo;Uqsv0_3tw;+@wg%&z%N;yv+nY-GFg zxE5oK&u}c?yg0Bd&|wv)Z$!+`1s=C!8GjKXvvybCUz%vAd8@=G$bA2sfrj?0pIrDj z_~p`_>?fWRd?6aHxusHbqkz739*MhV+hfJ2_VruYVOb`BY^?st89fh88^E8J$K9>j zb%*0+=bBj5c=oIDT;oxVTO*_9BeP~kjc209bB#y3s76MOM@B`BjGB)X=Wi^&fB9IC z!1+)6pQ7(6S#LYc?s(J0K^l+8l)dhItV}I<@de+BWmm7iQa9c8J#*VhW7~I3k(SmzD|uK(S}JOAeN`1I zRPjd@TxkdDrO%bZX>^uyIPL#D|MK^;`ySZIvMD&Udr{e8>m^OU&zy61_{gEcB}4Ds zZ=L&4DgElDFObr5A+4*MpmPnYa9nnbz>mO|2J|p0iPb zpbD_6`Jod3qmjFJJ6hf9Zk+1+Xmhvyrh)E<7$&8QPrRG+wtvvGVV;RkPmMf2!tqd5 zjOTdY(~DU*L;bVf=Re;{-_`$^Q{p&;J_<{G=#}Ci!o6Mg4vbVvI z$$AT|))%K+Tej&tW7?q1;wgnYwe|*#Qh$BT*TV0|mqFcF+HOAg1_$e?!XK=5xkxAK z#=Qg+ojD#~({80#{J4D9?DnCjZ8Cco&fNHF(I@MLB6e7tT@P*t_h`THcjy=EqtE7A zcWJNTW45pG{m|x@pSgvJ{ZTV&RdOlhpZlZCkV09JxZzV^+QX0e!)?;bvFsn?3)}VG zvqlzPGp#r3VUL=5a?dr|mxECcA_aM}S^gI{N11JCzM=Uw-=AYkjk_d`ig9WjK3Sh((R)|%!mVp#1u=eh9Qq+Yj7njM|-aHByKbE9{Lj`QwL z9S50r{2Br+r+-U>)hR5uBeja1A}2H}j7XYt_Z&m#%tCjUbIdQIf~NT&GRkN)E-AeM z<_`xfHeY(7N0Tv+y6;`&dgDymKBCtUJqttDj~J)pUqtRbY5VdLk7avxTQ+RFZ&%y1 z=6@UZ-o4U$V+$V3Lew9>o09QusI!&BKPdmPxUdW6KGGT}N ziM!moc4sF)lZ!jrmyi8&t1Pecokg$vy!O|)(`w=4Wv*HsI*S~f`k(LF*>~R4MDuLp z()ZJL^RgY%9y8CL(phak%h9iGwVOL%?B;&?|Juz}*DR!Tu@)Er!w>;5%?V9HYe-5(Vd~e~+Uy9Kd z=c<>!QVOThS<2!6dN-{?6@LVYKeu|6UCtW!mqoc~Kf~gSNbQ|*(UGUFlg9tHGE{4! zlz#Qn*Gl0uI!ihHU(dh)^KNdq?W(4_X5I8FcN9E|n>uZ8-j19twC=C`14A8G9wgj! z6UKB=pLQ>y)#`cK@5Zj1#u?r1-nH>hb-b6IHp1|84x;MIBcSv2FJ3{xi)4;$+v`di;f>J^fmw`yV8NcEogj;B#~k zDxa2nXK5Dql>Ja%iQvo89HM);laqh5bFr<)&Qpay=_ou}pMB+8knVoH6W6VEY&Lbe zqLWh9Mt#hF;y7<*Rb!uQk)Q6Bbt~p_w$Gs_-1o36p8GJ*=I7F!Ve_1>KmQUKlRpgH z++LORFwI=A`6p=tdO%U}ytm<((h4UhT|JxibHXoMuSF-7?$PP zv1399CuheFaH?T+N4W|lOVx>Bx|{8G*6MdBhh%u0Bo4lOc(2u&jJr1;EWF@da;L2* z=(rL4)9b&QuQ-0$x7*?*Vua)Li+vae44lvE?_XqkBc(XuS|nial#V*F0(fZ_MV#UX<9ESzcB0d#lCNstX|f_#&gwetE>xW z_TKNbFaOP9UrYbrPiLML4O-l&P16~-BR9U%`D>JBJLAWV_deWq`p}#~F6C3M?3=*@ zl!tvw=bN7tm^SmnF1Xf|EuVMNHF@fmrB80P`|M}6eOCv07u@!C>X$`(y0x1->-f8+ zefsTt{!ZfAf-x@dX1G`AoYyAjCTzZWCNgmw_ej-+0isP& zUq=T)pRlXdg<3z_oGd<)b^QGgozNYb8%LbiI`rCMfUC2Ay>_)2q*%5t6}))!GmNlX zXKLYiRqsvgjM$dM=l2Wuf4dy)di2p@(a_@j{kq$Z931U#ki(7Ta!cG7G;L9oknNCd z8MMpC_ZW!f#!C8nXzh4~fc~g)U1HfaeEKe>JCQV|5nj=hpCVZ^k&;iF8+c`l>RH z9~4&m)d-2P>x8p^gq~$TrFE) zLHa+j5m_$kv~lC4WxL-l_{Qje);shn+e>3qpE-s84p)x9S#)aZOok{Vs-Pvy`Jt`T zOuhSC9-O(eDZ6b}|I(2yZn(6ssx-RP8kpX*k{(K~odtULl8Pl-vr1x_gyZ5<_uaO6uW2Mx$Kam*wDsxorE>oz@{`={j9jMT}^c6rCA;c)!uX zCG#9E`TL%`S9z*%*rd#EqSmgPlC})k=>IG><%>~J&R+X3;c;l?!nJ2L^fWvoj>Cxp z@??!3sdbnwrYb!Ckl*K{(KtU^?UOY(Qaf67-}r|1{cQW?kpceM=lkT>d9p_Lz~I>Y zMT>6K#V*~A39Oj#(eLVl(`bDrmbR21sU?@*9P77YXWxbUt_@C0Z+VY z`{lmB+mE@D&~eYIT_zLOzbJS#T@>wE9RgcGvQD!d*PPH+Aq zrHvmqJj!5{?YNDcOP8nGIDDAY%)Rb01BiO5iuDGarL;Z>xeB-{@P9o7riwq|qlF7c zMF0NrY3XLx!TzqtH(fb$`=#@Q$0g4UTIrl-)j8`(!qiLODkqT6QV#$3^G_9jRKb`!xUCJJY5O8Ms6c-Oh6w zYCop@JEz!C&%%4Z{rk7$x;{!jJu3U6bL)V)s~-l<{cGojSI&*@p6EB;@*@2Raelpx z)z`DC+@t?C{_A`2CqMEz`7Xtz^3uB3afazTm+$c74)k=o?%VkO`%8w5X*ZLf2QDvr z;BsW|-m)Fqgh`Quc=z4{I~JH!^s>iO0WE3p8UIIo5IBrGg48S z$IPRa^8|v&%6!Mh-9<>t)$-Z)E=LUX9F}(r^Ln*9 zyz<4EPqrxsQwOA+*f#t&aAC){ZM+Sn$yPTzP(1I(Xu;ej27O%9oQm6>+u+)D$D{{^ z^u(Ys{U+03U-854Uvdt|SGHABV+(8R(JL*ro}0e@JIciPXkDC@v_h^Az1|MXGizz5 zR zKOY*c$wf!w%?u4pidUVh+HmmRlEGmYol8fM;?lxz1Z(Q2iPxSbY`5v>Gjwy`9VS^P z+F2Id-)-`JwdkmmsC)05SM6pU9^o-}MF~-OgteE*&v{j5s$Eg|WBu}@AK`>78AoYG zvYh_%$Nyp+$CNU+nUml9xn|oc{sEtB zbE3=6eg8Ps>ig!52YW>07MUA;c%^-8_xVvwt>t+(rmB5;oL$)*QNQF(C8KqN+PcWXYo=kvmq3nV!m6lFTfZGDVk{lj zAz}GTW2-W=DSLEgweFz5TZqQr7K~Qv-2f~XI7L^QBt6Z;jGb;q@>gmN68_&1!PSxIHKY#lA)SOXKO_P7jE@=Pui2DZ? zZ8v>IpGS0TaO(Ud>s%?T6G#F4RKQial`IwYAE`a+BR2q`@@jtMUJ7W)f}d)L{z775DBpD>y@z);NTMtjKTr{Ap^#s?e zt$Ti}Y9uI5DIi)7n9*(IwK31wzWb(bdFJULVm;axXFvPS-A!Yo?5$orYu&`>)Sl6P zdPfFi(>`?ZwZ&oqr=qAA8ekL1=lS0DhnBuDYyI+luI2&r$6aXqjSFp?ZMnBTk~)Z0 zCfcLEHphCB=V$k64rc4Mf`6XM?f3yX$2xTEpY`s@pvi|s-RNOSBe))wS9NbLU1Q?3 zj`p*#xV``KJ$eOgR-`WfxgEM(|81=Xspt=#C{{F8xVyS)_g}SDR#NwFFqmZJuzJO+ z=Z6=C9f~Qkpl8%|UZolPruL^6vpxJkYRg5>ysAQuPR#vz+IT1P`;+tYuD5J5|Kj5P zi?ts7RhWJtZJdbF=HYQmlw3NtJo99~y_2-E%Cxsy?^-kJ%A_Ng=O5|@y@28YI%w0WL(u(i4<{#<9rcRaWEP67TLI|gTGiI1@s}O{h!moh&0>u?8{(T&U_qu zG5oCVPVa(CA&Yw19M91`uWc_2ua&0fudbCRpCOWeto2k;D@Q&}JkFiM@kdLn?hZNiRFu#Zy0{|DBAZ{94h zZ}Ii_FZ3uYAg7Ur9r1~Xn6Tc#Z--SAtuf7po~F6Yo$-j{u=T`>Q~Dp-5loF)BOZ)y z{A^~=ir>vjt1g+~B{p z*|wLNj#J8J6!W{7LZ+1E**Cw1)|ve@K)E6pQQ-l5N3 zZ#^oQzPoFbr{i}KbJFez!s-0iF&npQv~bQE&}jMFE$pcU&5Bn2y{cvUM2xVi&+PY- z{atd2279FkR8=A2c&}7lTvGq3D^5Y(GI5UbA6!?mV$pxvSzPys@=9W8^&TyarvH?D zr4;(bFQcmdNc&}!R5FT3lA1zoYiRN4Mr{)VN-S@+9en*pz?()(=YCliQm9|CW9zCU zFd~d*>*uhqt7x620xsv-C@AZvAWtTXE|Pgrkfb*od3NIy1pwJBmh6Lqv`9A3X|==P z!HnDNBLT$S^g%!Mx(YkYk%h~qC9IOolSv0mFN;gLGNk7WnY>!bWSz~uYGul2wEaHV zZ$ene3)|q>g^fec4L=+>{&5&h^L&hP$rD%Y&Lu;ub_{9fwYlG|OlP)y%|CxqGihP~ z(dN28q~X1h;cL$N(r3w$;rEh6I_~KinO(6F8&yfqqIUYRvwwU0^cMcUT3IIL2ePMHPcf?0 zO&d_q(sa0K0Tr|%)~KRRhEg~C(U~vaElq1vW``7e_z0r%B#T#%N<7i3LEyazk1Nxh)i_W$qKWmN6O_8R^;_El;5 z#_lI>G#%P{W?Q{}&8C;0Ugg`%`KX)SoqpXF54Jd0z4X0OIE~Iy4p-@PtEf)E7Ob=VYffYCMP`Du2-@XyzyV>jvxRyLRdJW}qnh?W%1b zJ%1c~!E0-hrZIKErrrtc=GKo@tziCed{xPZ|H5f0dT!Hv-8J@==zdP#*rBn&wW-H4 ztsgJ!QV&dTd@3ZVU;2Q}SGTn{?f>aYK#Qu!qgERp@eeBE+K!sD)@>~NfBmg+?E7_;a@r24{8!pP<{k)wI%3vH{XpZRv@q)q{rPq?#Y z&n+>ITcx(M-Hft&MUYp{et*F7Ue?cJs7J4tC(pK6)$Z`Cyxj(43po`lLtd?&AhI}~ z%)9g0zN2T_l5J+EMs|!^awW`HcZSR1E@|&`H#Kh$qX-dX7Pi>$G6}sj`Rfj|Hy(ZCIS_{+qvc&)R}Phl2NO zS`Qi{>U_BDVaCj~KAARmxvXwkZ_kFUdM0pfmpo=U&**f^_tuMohe|mXk_p8uC|F6350F1I)_TSAWB&3mqR1#pNhE75cB&5(n0;Dex z(QiTmgpy!FQ-N$t0W3?mphi%!V8IG_1q8iU!3KzkTq%MD6-2~>h`ir9U)gVy-A(ei z@BiLEN^-ta=ggUzGiOejTHl`C-|o2sDq8WT7kjar((Zmvs}l=Xc?T4jdwEZ*G8{Pm z>ae8PyLKDY=?%l$GXsnb;~c-WdiA%n`$X#WmbR}h>mux1Trsn zTxa4c;FFtvn6dl7PtSj#Z@;Ai((fVFbs>214vp6&a|w_i+*VwWJW3Cs;hH~qfSzWO=8t=!dN!GkY`Kec&R zc!kfmqvL+{_8xs($OHHNQsO)EdY}67A5VI4=a&Os*}utjag1%@9Mc{3RdrW>%M8A+ z{zC3|?|z`sEIE30$zkz+eRjv`M&r&U%SkK1$Lv63Sdm5(Vi33&Am-Ol_BYs?JDIFX zD=(`oDXYBG6~0Y5Stw#JrWK3vW7@9K3?H{}lw>c}{KnL!bP|ZUxo%q0u(nKs&SpYK zudlWhX+rVrD1i24G1@jtOjZ^?xa9M(Iz8n@bLTe$Ev>IQyaTIQN zM1|WdVPPH-aCC`cvjksDkw%AN(M6ibgb3Se8?S-1diWsbwXYKV%+9kU^DHJ~Hp*!A z!fF0#ls?fS{~jaM%QYQ9HA@t@j6Ax(QKRb@K@Z0cz_0eG@~Bd!FJgKz z+=PDsB*5N*@8Rq_qCG>3?xdWvNYj2d9+uP$fWvC`7bw!$@@x!CHc#ge!CxuMB_Yyj z9*$e5Dd8O7uDayBNY$#--kWm6r-Zg0^AL* zgd2h;8s0r+HL(g=B85LIbpW6KycWlHRpCWrQ>sI8u914c8>xIT_ zbl1~>=%{E$0WlHiE__wlA7bVqKEfcxeD=JFN{E^b@iikaNYo_rxsSC@&{jL*U85B|rFp2f&5VfQiWBN8&uP%7w$m-rfo)e%*oRl z&H3z^c|mK_*<@!zjH*_%{}^Jm*$i{aB*+`)ofJXflN@`*TFmwz#S!ojcPEG* zLa6$Uk;fI^eJxDr_%VrGq=_CWPaF_}NX+1g{_DiV-&Kj|6nbiseQwK>9m*_AE7JI% z<0WD`i6tb)b&F18ID8GEI2^n;U%dOCf>?~6uE?0{g|dGniD#r*{gPTztKT42E4-mG z!s3&jTq_V&P^1YMA>$7K`#spAs%@w`W}d2QhG)N5IGzJ?MXW6rvF1tuyKtwj1KQhw z9sRCi+6|a2gNWGH_UKTB`OiV!{`sI4hq_A|)O{c8kN?t!+3t0K2@W)w z;77)7V#iO39bYPT9Py{{?`4!jI*K%Hma%m!{&o@C2%1{w#hu4C#CPy0k={%Z)j+AD`>oDeo#jHQsyexE*N2BM z6;3%h@Qa6Y5^tbicClDMHUO?-4O2|ocUQ=0GX$`!&br_> z%wR#Y#m)u=v(DBy@0Rn}hMl!5x{W&P0@=;&Y|!n_Ix9PUgWBSv{^r09Jx*Ft02x^S z2Hz@k1e6;@ov~sAG+Vy~kHqRmqct^aTFy5MTCKTxS>--4)iXGck8mFIonW8# zf)@%tNc&JEtc?!yp3%(IQXP`lU{u+HG8o<|OoGyATq7(ELHOPUQ5(DyWCTfF zXxAu^xlmqap__ITDSpzMKN_i$zrOWs^>LFsp>X8xnq>uffo;@cL%BS(K-9K~tn@I~f1^<6lqA zE^V<=#BX@>5W@-)$|Vq;gNE9ofA~Ec{|GR&P2|280l;$&oG&5`%jLGPAK>?W@V|&Z zVXIa3xSPijp3_D;j)_9tDa5JzJ_Bj5H z0G5y*&eC5t7|kB5vkHwS4<;PK_5oR8a|#OAu@5u*yKar7?f@@gUmLLOKvMV)kOIDc zl|&5x1}$MS2@f;~9y}0*2E!*Y8p8;SWrQ1621|HfmedCoFjy>thcD(rPI%$tSjt!_ zMJk20v?YkNZ~+r8Otc0Ge%7Gy2v*GsgW2qB%d+qehFe%s1|_Q$LBNRL4_K{ML5tWW z%$@=JpjE_2_{%K&b}74w^}o>WEbE5iFrKk(_;FOr*mX67=o=7d_#RdpY-V9iiVenw zVMiPVctlTCg%Lv-lcRR#o!tX~e#l%=Bawfs6~3g5AX1 zj~vf9(TW(5C-hc8)-j4X(hXO50p(VsKTKdC_=pLl9SPS_%oGv%qN!TY*Ju%ffW+@z z=w>8LWL8G2vNUq^^n?s&Oc={Zt9mSz*^o~?VhKf;hzc=7O=E+iatsGsW-P#pJkQO% zQ3F|(S6G!uNC6m8o{SPu6YF`D6~i7?)g+NwL`ij!ZIhFcyW-!R9T2xXo1AfL!;f;9_%iGY}$vP%_$O1U$;rA6l1DA{+ zqJBefqE=xDl3hBjgsmoqbq$W;rQvCT^H&sx5LUI=ZWdZ>*BLDWDrLCE57{plGBEmY zm#MVK7`feMl$05{-HR%|Lp??=f!ZAa#@oFwF`6SOAmLj^_82lo$zoztj$GqK495@D z7nLq(gjfa9Y79qbz_+X&!_kvjf4P~?O@E`8qrcIReKLldaCtjGg3CQvD&J_R{VY6D zs^Gc=xT33@82OlR2G^%{xa@xYn)0;=>qS1C?b%z5DME`e*d}s2@-cO2F=mXt#U?N@ zP{hR07GNUgf&(UE{$y}5Q-?bqXs>5*omAk0%;MRhdnYlnf`AeQWP6wz6e?3DC1m6J zpEM7(FH@1P4ig)n1>3Ix`L>5#=Chzgq0Lb#;_aHZF_G6S*S3!lTq*@a$$J2T$B*B0 z4#7zXepz%O3Bf7^e-2sxIf5@Dc=`I-RS2FkpaqCvUSlY=G9c-pmnXi2U@d}A81@}Q z@C^i?@!Iqzg5M&z|HCKsC>tS{z4yt3g#h6Ol2|a5y!In_U+$y#Ab1AB)n7h;1!aQ~ zT>tsQDG2sMaAWWtQxTjXSHt>;0wa-h=La)w2&N;r{G;6(1WORK795;{;CdE(DXI*? z$KaA5geKEbOM6a2qwSXCJMp(!zEvxnD#OGViCi7 zr~5b395JuEVK4coT&pkKNVEK&{w&4;9?yPCe$Sq)C7cT&KdgFr{t<{t^x+@OK8}&~ zyxpFEs*-;>hs?8-{MJY%7qONc#@O>gd&zaIUcf)~oIfx?%m_1hg0(uj%%Zx3PST zMgrdw)2i%gcPX`gDb;#U<60k~HhsjBTdB=RDc`P*8^n_VG}7b%^FaziV{B}^K1W@S zW8-zr-u7>-ZALXgSW&Y4t$Z4_-71QLK9N$pAvL+1x~*12CNh}^zT5Q-gf%;X7rze1sH|vrTm=!9rN4i-<#2Y>Ohu%tCXM7f2t_tBzpF? z)4#Vu{}FF6SOr1Q8S}`;*G6drFdGXUVM;Qt_VmE!F`%`X@H@u{v5W&ph=LW9dCK#3sonQXwGuF;a`>0!xPjeM(-goTuLwPLxn6~eHI(<@r74e3TpF!{_dm~A?t zV>q%gvs4z$05kic`_#ja0SYR{_=Cj6dQ0r9A zDDvjGIfz=XG5~SSJDftT%{Cg1Nmc#uQD|Jzu8G6zx&B= zg@pR9r6TJ2!NW$|jX;3ktJTD&I#}$;@3$6=h2I4mjSg0ig@*h+YHElHCUBa6U~LU2 zhpM#I<1W4VPxArqN5_2j)xJw_pWUJVdGqR*N+8f-)(y5v;M0FtXNUA zawUejRy@`qoQ?+iJ+-)pb*=$nu|ItQcU<+sM^8I|^(*Fi^gP20+10w4B zTUSs(h|K9sh+zF0YNUW1wrS~tvYbcbTnq)=VW5Dyb(T5`m?>Av_=*B**{{3nYB`%x zKuw+5$^{i99HTzPnY^HJalX^lmLUZk#)^#sTd@Yd;A(S|7Agw#0(4_ufNfSW*-+qA zCWBmTgF5fBBag8?Eo%28L#JPb@Pv zf&3zAx2ESY}zf%s%qmNVvVSDZpc#qXIs5y?Yi|4M(BBS4gvGooq`rROevT$ zgLb!ok%L}qIAsRC4{MY(>q+EBK_}8ErV|@9)~FzOLU9TDsrgDyaI~W21kVzcoZ!Sp zN>1>srj(rE45vyC1>YqSQLvd}G5c^j1p!g;daKY%HAMwKXQ1GxI4@H$v>kNe)GuLp%jcq{%g;Cp>7Eo8)wl*63GFq3X32NKJTvKD!NxGj}KQL0;WLaly zJAvtpioPi-UzSu%(rq7ZN%L!LG_HfHdGx34b!z)-)j3NKue-17j^393+Ss;zhkbl~ zQ>6a+yC2ES^)3zVdT8pY6)RQ%X7Dz({bJo(W=9#9#OrZ#aGYYH`!oGG_7I=_LVZjW z`xkabLn$=91<-~jqm_?U&{AlwsgAe~!=`X1emM?8A8Sdi8*G$5Tve-}sVckU!V4kW zcUDd4C=6-yQ&7n3 zsRy_?S`$iav$xrbQMebtfJ$tq`0z-Jg?db72odQR;vEN9Z$clq*<(P`S|x`_8n=I7sPN`cmB?}t2RL{@yQoD04z;C+~ zHkuTw!mmtY>O?N@w>FhPdcyPFvqRE){6ccFW=6#BZ z5CazK!>vP{&(+7ODW<#yc#D}~qpYD$@W$Cp;x$l3%>AGWwi2~d1*ESQOG!!1lP?@P z=S!C;Zr>99uRZj?EC?P&+PNB0m*ddBCHBAeu>WPlXsk4c%Op2t{ky4Ynu(Z}o$Ic; zr|`_zKm53?@X(|?QkQkG{_>?Rbnovk_vlcl_kHhQ`*(J=pl8$pHzOUSf;8yV!ECik zs(O=_3M^7O{ADmPjlRRh7LZ?uuYqD1)D;v{hif=|CTa9d2z!@A)$xuNfY5Q4jdGw* zDIm0%s5+i*Npr{09E>kYbJXz%#^`Kn;bw}Pur7le`4H$^(tkbrpUdv-J|cSnOqi2G;3G zW_;|FYo&hQ!aPl#*#4&|&a0`@oH`5jXM2V)131^FSruyyIFU!4wjiI@M4jqU1KW35 z0|QlIp%F*#sIwPuWf#b~KejV$S*ZJ8&8qt+vF;mE-E99)uA4fsX`PutI%gQDbGsTG z%hxI3Hdn2u&aehHX|-4j*(dAoovXO{!1)+c=STRCuJcrY`b!H$Lg(+nf!_l>nFBGV zov%||G=mtnJLhj4R88A?(Ko&D@;5g&t=?+MjDF(!;ZaY1_w2;nyN|5={@hEI&r+8Z z-UCMNT_ysfs$J*D(L|Rb=<|^<(=uwOF7xpfS1c=nFn3vlsmrbwbfe358|4+KxJ}GXk$~X%n65^Oa+oYqe!$5t;HCa%{sVi1G zxK+$9knp8s*K7lI&1$%m%;T9s!E0<>jiRm#EL_Xt#)hsYE8l#wWtrKvt(y>t0AGBi z!YR2l^|*oy@#iAih0v{v8~EMkZ;mft^U(4V-TPddQ*j&-n9UI%o)E%4Kl&L#*aAQ-q@&X3M&J%oSfG^OZq1eToX>D0m z#{S<8_x$|spK`|qcIy^Dy$$)A3r$L#3W1X(?Eb~QTaGKw#~_f zVh>R4JA5DCLIZGMLRl*?TYBtQu;kU!0NB^relNxTWTJs-KoHnBYaJKF3>@Q4ZVmzM zJcP%`s7~1M=QpUwn>NZ1Vc<7- ze8cTRTHFm`J5}me5~$~^2I?79%ic&3XTAU1_Alyr2_08pL~kUE9y_7Zgef5$1~gC! zw#Ax$8-aRt#nT94-Td%Be;t8(O)_LruLOrpEVfX!KZ-!T_8O?yG#ENenD%t-ageP%NRD02{R z>wn{I1L}Q;Fvd10ISjIfC3Mg~2DO3_500&Lv% z$riWjSR@3E)5$p8#iEkkk)}TD*l7cHMVk6$LfL3p4b-=SDJax8n%T%*oYQb#rQ-%Z z#h9wOE%c`ELsQ?GY%?mKUC|Sj&;QPCXzG_CcI*o4*iGn3F!I_v=3*e4clVk4B|GUD zvnMdA+~kW-O)kBf%zz^R;)Y+p52O>J`I|YCo$hn-`O;|CQNOFeB<8ICCm5W9mio7{ z(%>$bJ7aK*FdBNP)?_tNf4zbFccgqQvI;$MzXg{WrG#Nf80st`2MO8E66PRbwzC8) z5-iRV9znt;X9=$(;WdW@>VM2+t_Hw|ko1X^L_@sTq_5nS#+C3K9DEQodfX_Ir)1n@ zwnkE1E0AXa`fs*|Q(Rkouw|i~i{d)-lgkuW)PmJT+!R!a1&~DX*C7iit_-W6A=ulc zxTV-^h=IjZz1cu<_d!7t&zXp0#n;$6ihD*JulO-+yo^@16u~;2uZH8UBaPzTQ&z)P ziaT0sp`kypyxKghisEo>0%}PdL?2sI$FcLD%DOl1RhG|;H-9L;t04<&WJj!ht0}%6 za!wovD z=;X5mT5%#%nT9w!1FWhjt@|S~tTqqSW)?c(w~%Vmg8*e(GgdV;a85InRT`eiYJci{ zzcI6-_4Qb0#$db#Y{HNVIu$2K%#DIdzC9Z2#7m)>aftLmv z3^cHS3>6R?CMjw*qDsXmT^J2|3REPefd2BfI>|C^cD1B6L(tN|uxKQqocbc8qx`er1@I3X+mG8gPz_Ck4r+e59n84T6uD z_b8gCK`ghVY4y? zxMCL<))5^Ixwi%5Jj80FDa%yjOhX>TII67mE{xF5nblE^!&Jeo^+P+h0HUGcHk#T- z1(9g2cVYEu4j7`f-h*4*b&w(K+Z9$_>10j4ILJ-zLl0wWQdtVN(9mxnR~4H(I)z&b z&;`u0E{BXH<&OE2eo%(GHqBM$KEU1z5F$9et>#U2xsYdv5Kyww_j*j zr8tuhdjeCY^zsW0tCnU{ZbTfm!9c^-H!>oEyJQ-bEgWb^K1kof7kSWt2w{o%DQ421U-1T z7ZOj&*TxYkmm1!|M8k0@0HucaXjE!A5^+#PncpGss&ye@G#po9h8rjWdl+behR>)r z*Jvr>H)cS<^8qw`F^?FIT@^-hvR&8|7N)ej0ae3rAU_ZxUaF?y+aVRSRvLb<+$s&< zD?->X=Lsq28*)x`8FE&z)^G^_1cX-=2uJwTm}^A{VG;v`BchZD268fu=*c7OAZM*% z*c0rpanOkICK@rY5zHe>L`X)Y*dbByJ7PJ?%xPTaL8;6_dl{g>%0M6Snt?{VY6pG4 z)m*J588G0J!zE|_3YpK?GhgDF*+?k7PKh2Ed{9OM3?{ahV7O|BffDzlzXd#ml_aJRIB?5o;%>@iH8)sup9Yac7QamL*jW#K1q*&d2^pWX>Yu4j`+gd&X-2S65 zBqhx>Q4;pX8X!4|O>-dVq}%Lhs=DDuz_KJ&t#6@LY%R&Px|3+2IgyJ*d64vnBj3aZB8O4$$N**o_TylgwaFi_ZKV)TcenVbJ z#`q^I5`dB+{~6!2yrxEZ*COvcxn54soT~TMu3$8sjP-x=YYI;%zq7);Qj}dAfL%Q$ zALS8@gc*@1Q}Q=T1S4{Rl7HtB$vD}>h}=jiK4OGb9HDwjiBck%pkQ>3{~3Zr{$eb4 zx^5){Ck6NJQ${J^q~xwNuM)ulVgXJ{u@b@HOQw{CJVJ&~pp+UVg288`l#M(>hOeGd zo)#k{`1VrDP9=iF_rU6P8ySougwiyWg69=dPTC>6&|n%$`CEx#m59?2r3QwFFD&y@NJHZ@?KNL>KlVuXsQvx1&SJ;%+y96{pK9+djGNW!VRFSW8p#e z6cxk-dlHAz-bNF9DXo~z02sr9LWf6%W-MH=u%bjix4eAL!ugU=WtbA53!=2^LKG}3 z(9cl)C?7MW{fkLImAn%_xbDxS#;D#VO1q@YehzIFRg}*uDbr79mpJw8J}25!wKb|# zXm4+8ftf}PXriqLOf(9o;w0HCwl!uhRnD4FTDBlTPxHzbmMzeiESX+XQe3H@$J?r+ zB|J-JIh9HMb^OCRB=vsOHKCUR(dbry=rKT)?7H`(djg`13Pfno@l7nV$ua~K1w=`* zltxdpRMY4=u}AL(UlfS`L$))FD2J=;XisUBgz$`=pJ3qgnym%@jR3I zl7t_IMt`AFks2Ytv}_U0EiKkplz_m>^ou0q&t1qy-Q|x8`D2y{T@>Vxv6yL0jC9@tB&H2yXr zT4`&1shP&dHqq9-CK}J>7gy4&CM_Dz3MR;?g>EFP3m*UN~SW-~Nb;|e^jeQ35i#3ljpaq-r*<1lkXl%%k8A6%R_$jkJ9;&4b zoY>@v{GD3LOfgXAU?83{*+Cw0F_Vdv1TW9tsHIFSXft6#&FID|5m3c&X!^KtTn!U- zW;uVImNG4rd3QBW!qc>r*@@L4@0>ckOiP*12;#%El)0+WyR?+K59_wf2iSCF7Cb!6 z5i;f}^Mq)Ja#+A-UIB2wh&x7<`L{Wbzj;CvTH`fZ${fvbpni;&CX9d%Yz?CcJ>{op zX~Mv20Q@?Ch?XX7LJ{^3Eln6-!@G=CpqZUepHSiQ5n7rsL41RjCfv_6(S%j}1zO$_ z*ALK+&Pj{!{~vrYk|t{KV5H0s6aCCI;R$rd4L?lmXQBzeDD)*tg9QCN=wy%qD%O;k zQY8&0&IOYwZ7nv_#KCAw@0#}}K5U|ix7*ul;;q0Izvf62|HfmEGDMSF10ue8?i!*= zeE<y9CSmGpCPXcMPm!`ZsUXVg z1Bm|V9-^#l6J>?jA>uR(eSQA?itntGJ>Jp+xWGh1CZbo5qjS8adF+wMWj5kF?_nvSv;nkai%BZx}NDi_X} zQ98Y}1l**kkE|_?();X}Rmw>ATV~2$>Yj_T&!DX*8@08dg32mqU_xy;AY$*Em{Qff z&l#oCQ*$N(qVL>~M^2@Qa)!8usNsN!5V@V=9mgZ*GoiCG9yw>slyjGRh;m{7&%wy+7JS_cPv;}RyWJzh|0>%KP4b~`m+fmw4?m?Xyq&DN0i^)N_iJpU6db>YOga(A!@)p zpI>31{CQ2PcNXy-Xs+6+pMi$Hk<26o<=@9v2a=|r|1et}h?;&w8$tdXEF;VUY#EXN z4$CMlxbhFNWds&nJmn-?M#%O3z?KnKZc4Df>)ot1ZiuoZh5@{wpR(#MNKjVYR50E^ z1?j|3is5B+Sp}=IKPb$oweh3EDpD#~C4?%ZRItHJ1^MoWw_ulv3YIIwt7@xpJB)bM zZT-$(i;*l}1`o9Lw)?gW-Ov_SW*4~3yrT<)?D z`*OEA_J~^)^c9d){s^y)?nyUU8jtP~-@PEJ#)b(1HWX>vdlZB>|PGE_fB1i0vzzI$bNJ?u=pUsdi+OAgcVCPon=H!Ds)AS&@cWu|b8 zG9FVZ%rpf{G`A38|98q%=MXiVjMy|JLga2T;v4|MEGbb_uBvb})e{gs;vS-@v4H4D z8KS>87njbTQ8~F&h<@Jvlf!L*b9^-I(1 zY6r}C;$OdvI(Sb&f~HMs!dDf)ADg>+YuD+W&%80~S&hcyv#TCQyhRV%N#=ELJ|DOu zs?*a?Y(2mKk<7jMt0T1KKh}EYEQ|m2aLP`P;%ytUJ{LK}Dtpx zq!-uWN}M`RuKGg^d#a<3m-dSo_u@s(pC9$vl9QUU?31ZAVe0h8VeOTM_6CRV%Zu7I zE~APGSn-YO%P}aS+A&i3iX8OK_6g;I-Fhi84FjHW8Mhv}&)hIt`i{8!w~r4WxWl$e zoxaVh8=DQdtd#B zpLPlx|N5eWDV?V$-l6gD8{OvT${6di=ZC#X|D|U~ zOu1+O@b&Awf4TeNH=kMZ%g0)bp!SZd+A{N?DV(<3q$aPsa&c+KmubJf_H}E0#*o|A zhnx%c4>Ru6dJer1G0ZhUAyZa;-}!#m>CH;1SLgcH_tZn@j{bI^@#(ML{eJlBd7o~Y zSF28M7@iyVkGC4eHTZU5s{anbb8(XTcjcF_@9N&G-H*pcg?2yk(!y`k2i)KNw8<~1 zJts5-+xtmPg{<1 z?UAYLd_s==b!hgVbw^gsduQciezEH&{dpHq-lgw^-R@DUORrOR@pw{g==fyawIM5h zxHIe8-F3Zh-R0KTvj3uU|5TT66mC#|qjZg?-btJ~DrIcy#U;1C_<~R7sUgLm{kAyT zko3)G_YPgX!jRbyum5q~ z!E-U{+MI;>kWAaYyztVND=|A$x=q+WzxSVudSAV!v7A^jJ8sREJ?l;#t&d$-_t>eF zg);}A+7a_>e}Ui@&PtXVJd$2p<)XA3t4p@E_Bj z?7O+bd}l$xf%_-!z4vzWgVn~J9`C%~|I>IUfT*LA)gO(++P#hA9AdN%UyVQXjiVf5 zP9NQNvGW%5!8TcM?|VOUOoGpS0V|*He7W$5VbtrBmM^+uEID@e$LBstF{f3uE3P~L z!psvh5>ux9wC1p1_q*opsNWpuP+P+fucxjwjB@K(rueKpm;Mg#b-F6tdiJi^nLm89 zcrTyrF6>>hWs=9ab{R_wCtMh|J@B&~x`X@nr(WJ0zpq!-J;A2>dGU2jQ*M8_;C=1*dCvlUz2U9*&p);GuI%%3=~AnNb``Z94?Y^; zyKU#o^`%9duRi3JI(>$|ciX(HI}_IHGJjkD#0qOc%JJ`x_V=2DCjMth$bYVBY2b!I zuNyhM5r0kIUp8n($WKE%y>57QY}}BoOP{XYwr=R=XY!|?>3;E+de4Y|9GMV5`krwV z_tfCsS2cgUlKzUe-QCFx7gxS9@1^-*Y)N67dQ%FM-=-x?H&1OnZ*Fh4T+A!Jwz$h(*?VGp?0$IK z`TC-QS8NvGNQlh==tg8+x%vw{jqz>i}eHR+eiNzT>brs{GDUJ9ov6$pOH&i_w@dy R;*Oy7tJ$9{uK4#`{{>#NYmEQ^ diff --git a/rollup/rollup_sync_service/testdata/commitBatch_codecv1.rlp b/rollup/rollup_sync_service/testdata/commitBatch_codecv1.rlp deleted file mode 100644 index b0feda0443cd8f807869d3adfd8e1deb3bb0e68f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1237 zcmdnVa*6pT%eiL8P3(*<*0WAAfC#>3#)_UP?qNZx!JNNpHd>aN?|ZrY_xd}6ry6#$ zs0gdBS-^lFFyfbHXuvN|NIMID?F5WS}9__V;~ zKzc>sJa(wwLbwP%21qX=iaDTqOYmud%c1H8hj{8A({i{xJ_f2@E|5~7-Zp$%;Bu&X z!RZUA_W@iU9|Kh{K9w-J>({2(&eHgvsk77FVRy=ho~>skSH)budw}WT0!H4tfOF{) zR#GPOV`R>|cl1qMdiUnPPUl%i&EDVl{S)1=fTLz|hmw8igSGD+7f)q+9lG>kb~#7g y!I@u}bPwK`a!h4G`ok5IPIoNW-+qAQMb86^@G_?B|L@d0ygYaJo=p6u*$Cds7 diff --git a/rollup/rollup_sync_service/testdata/commitBatch_codecv2.rlp b/rollup/rollup_sync_service/testdata/commitBatch_codecv2.rlp deleted file mode 100644 index cbb35f1579f1292bb5a08c4498fe90286c0f2a78..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4437 zcmbuCc}Nsd9LINcZC7oVBnm8og$}xeG$FONW}_6NgVwa5$c;Qmlp-lAgr<|IXe;f~ zvdIq9vaHAuOIH6dDk~z$KkV2nh%PYbFzuRI-h0jOH*Y70_7C6tety6A+2@-XcWsRE zgny5jzs-Er8nJz9`CXIp!?7)_v#KmD%afZC*;{fYdEJ!ex7B@@pT^u38wZ^kJo=(! z(tm~NmrbI6-R#t_4-)n3rj2w17TlPb>V3X@VQE44$NHw5tKUqUY)K4H{Bmu}hn|eJ zzKpE*=Bl{U$8PrdMhf;(#n-*=-?gT!foU6jt*L2WlGg25kN7*i2#ZbA_6Ez z1Q0-RfPess10)1cw4#io)r$$tRzJ)**_@cbY!hL|$riu_=12i%oFXMmV2*-OlcKyD z1VEG@L`{fxY7hX?B8Zw09nc^E>>z1_-K%A4V)v_)cJ6R$nVL96b<)lq0WAZ>WhT+5 zjCt-Ksi2I4t^1J}L`M&crWvTnjm*DaDa-zJZX1EXFX(mB8uK4ei0q$BE4&f-jqU#+ z@h{+cDFgl#O7%vz|5Bw(-oHn$8~9y%owUaMWeSn~nY6+if#2Bq93uWDJTGOyKa*0u zk?sGm(k1VIsa`kmPuJ_DHReB}5ZS++R(K=u3%m}@nE$9!k^L)hhdTd<{}}P-sC{@2 z{Kl?t@cV$g|9tFJXW$pql~8kbJ{3xb>@Nt_sc_&oc72a4>t%nDGKP8qzYwZR&Ds7> zC{*^ZQCIOC_>DckmBe3+o$3ty#;)&4;$N@!;W_ZTag~?Z`AAAtKA(-eF3N!4*!4X{ z{3X=I8~?*!Mf{t2Udn*q*!iC({;kx-8-d^0{d0!+cksNF0e`>t|H(quo+bWW)WsWt zKc01*BWw3C!kK`zf4_HClePOeOBe!cZ493$YlGh%7)2AXb|@zCiy^Lv5#b^k8}`xip(Le1IvTqOSB-%)jkdJg>kx<8q}j`$_D z56^+$*z9gPnsU1LUcVn(yX^5Zf7naOT$p5U zvAgT)UA@uuxgSz`-q!3CI@{XY3u2>Y=U46$rMT*jo?-VUtXTB9=vPHyYu5O_Ih!0e xEC~pVYS` Date: Thu, 2 Jan 2025 16:04:41 +0800 Subject: [PATCH 17/17] address more review comments --- core/rawdb/accessors_rollup_event.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 1b60f6e4f0d8..e5daffcc5965 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -18,7 +18,8 @@ type ChunkBlockRange struct { // CommittedBatchMeta holds metadata for committed batches. type CommittedBatchMeta struct { - Version uint8 + Version uint8 + // BlobVersionedHashes are the versioned hashes of the blobs in the batch. Currently unused. Left for compatibility. BlobVersionedHashes []common.Hash ChunkBlockRanges []*ChunkBlockRange }