From 7de748d3f62cec172ddf0dda110beca55648f306 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 1 Aug 2023 20:17:32 +0800 Subject: [PATCH 01/99] all: implement path-based state scheme (#25963) * all: implement path-based state scheme * all: edits from review * core/rawdb, trie/triedb/pathdb: review changes * core, light, trie, eth, tests: reimplement pbss history * core, trie/triedb/pathdb: track block number in state history * trie/triedb/pathdb: add history documentation * core, trie/triedb/pathdb: address comments from Peter's review Important changes to list: - Cache trie nodes by path in clean cache - Remove root->id mappings when history is truncated * trie/triedb/pathdb: fallback to disk if unexpect node in clean cache * core/rawdb: fix tests * trie/triedb/pathdb: rename metrics, change clean cache key * trie/triedb: manage the clean cache inside of disk layer * trie/triedb/pathdb: move journal function * trie/triedb/path: fix tests * trie/triedb/pathdb: fix journal * trie/triedb/pathdb: fix history * trie/triedb/pathdb: try to fix tests on windows * core, trie: address comments * trie/triedb/pathdb: fix test issues --------- Co-authored-by: Felix Lange Co-authored-by: Martin Holst Swende --- core/blockchain.go | 6 +- core/rawdb/accessors_chain_test.go | 4 +- core/rawdb/accessors_indexes_test.go | 4 +- core/rawdb/accessors_state.go | 172 +++++++ core/rawdb/accessors_trie.go | 46 +- core/rawdb/ancient_scheme.go | 30 +- core/rawdb/chain_iterator_test.go | 8 +- core/rawdb/database.go | 8 +- core/rawdb/freezer.go | 27 +- core/rawdb/freezer_resettable.go | 6 +- core/rawdb/freezer_test.go | 4 +- core/rawdb/schema.go | 12 + core/rawdb/table.go | 4 +- core/state/statedb.go | 11 +- core/types/hashes.go | 11 + ethdb/database.go | 4 +- ethdb/remotedb/remotedb.go | 4 +- trie/committer.go | 15 +- trie/database.go | 45 +- trie/database_test.go | 8 +- trie/iterator_test.go | 12 +- trie/sync_test.go | 38 +- trie/testutil/utils.go | 61 +++ trie/tracer.go | 11 +- trie/trie.go | 22 +- trie/trie_reader.go | 11 +- trie/trie_test.go | 44 +- trie/triedb/hashdb/database.go | 59 +-- trie/triedb/pathdb/database.go | 392 ++++++++++++++++ trie/triedb/pathdb/database_test.go | 573 ++++++++++++++++++++++++ trie/triedb/pathdb/difflayer.go | 174 ++++++++ trie/triedb/pathdb/difflayer_test.go | 170 +++++++ trie/triedb/pathdb/disklayer.go | 296 +++++++++++++ trie/triedb/pathdb/errors.go | 51 +++ trie/triedb/pathdb/history.go | 641 +++++++++++++++++++++++++++ trie/triedb/pathdb/history_test.go | 290 ++++++++++++ trie/triedb/pathdb/journal.go | 378 ++++++++++++++++ trie/triedb/pathdb/layertree.go | 214 +++++++++ trie/triedb/pathdb/metrics.go | 50 +++ trie/triedb/pathdb/nodebuffer.go | 275 ++++++++++++ trie/triedb/pathdb/testutils.go | 156 +++++++ trie/trienode/node.go | 62 +-- trie/triestate/state.go | 243 +++++++++- 43 files changed, 4422 insertions(+), 230 deletions(-) create mode 100644 trie/testutil/utils.go create mode 100644 trie/triedb/pathdb/database.go create mode 100644 trie/triedb/pathdb/database_test.go create mode 100644 trie/triedb/pathdb/difflayer.go create mode 100644 trie/triedb/pathdb/difflayer_test.go create mode 100644 trie/triedb/pathdb/disklayer.go create mode 100644 trie/triedb/pathdb/errors.go create mode 100644 trie/triedb/pathdb/history.go create mode 100644 trie/triedb/pathdb/history_test.go create mode 100644 trie/triedb/pathdb/journal.go create mode 100644 trie/triedb/pathdb/layertree.go create mode 100644 trie/triedb/pathdb/metrics.go create mode 100644 trie/triedb/pathdb/nodebuffer.go create mode 100644 trie/triedb/pathdb/testutils.go diff --git a/core/blockchain.go b/core/blockchain.go index 2f549806c603..3952c31b688f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -713,7 +713,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha if num+1 <= frozen { // Truncate all relative data(header, total difficulty, body, receipt // and canonical hash) from ancient store. - if err := bc.db.TruncateHead(num); err != nil { + if _, err := bc.db.TruncateHead(num); err != nil { log.Crit("Failed to truncate ancient data", "number", num, "err", err) } // Remove the hash <-> number mapping from the active store. @@ -1136,7 +1136,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ size += int64(batch.ValueSize()) if err = batch.Write(); err != nil { snapBlock := bc.CurrentSnapBlock().Number.Uint64() - if err := bc.db.TruncateHead(snapBlock + 1); err != nil { + if _, err := bc.db.TruncateHead(snapBlock + 1); err != nil { log.Error("Can't truncate ancient store after failed insert", "err", err) } return 0, err @@ -1154,7 +1154,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ if !updateHead(blockChain[len(blockChain)-1]) { // We end up here if the header chain has reorg'ed, and the blocks/receipts // don't match the canonical chain. - if err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil { + if _, err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil { log.Error("Can't truncate ancient store after failed insert", "err", err) } return 0, errSideChainReceipts diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 32e38a81ce4d..beeec9f5a615 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -85,7 +85,7 @@ func TestBodyStorage(t *testing.T) { WriteBody(db, hash, 0, body) if entry := ReadBody(db, hash, 0); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) } if entry := ReadBodyRLP(db, hash, 0); entry == nil { @@ -139,7 +139,7 @@ func TestBlockStorage(t *testing.T) { } if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body()) } // Delete the block and verify the execution diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 020075525103..124389ba7a13 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -29,7 +29,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -var newHasher = blocktest.NewHasher +var newTestHasher = blocktest.NewHasher // Tests that positional lookup metadata can be stored and retrieved. func TestLookupStorage(t *testing.T) { @@ -76,7 +76,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 39900df23e94..9ce58e7d27b9 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -17,6 +17,8 @@ package rawdb import ( + "encoding/binary" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -92,3 +94,173 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { log.Crit("Failed to delete contract code", "err", err) } } + +// ReadStateID retrieves the state id with the provided state root. +func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 { + data, err := db.Get(stateIDKey(root)) + if err != nil || len(data) == 0 { + return nil + } + number := binary.BigEndian.Uint64(data) + return &number +} + +// WriteStateID writes the provided state lookup to database. +func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) { + var buff [8]byte + binary.BigEndian.PutUint64(buff[:], id) + if err := db.Put(stateIDKey(root), buff[:]); err != nil { + log.Crit("Failed to store state ID", "err", err) + } +} + +// DeleteStateID deletes the specified state lookup from the database. +func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) { + if err := db.Delete(stateIDKey(root)); err != nil { + log.Crit("Failed to delete state ID", "err", err) + } +} + +// ReadPersistentStateID retrieves the id of the persistent state from the database. +func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 { + data, _ := db.Get(persistentStateIDKey) + if len(data) != 8 { + return 0 + } + return binary.BigEndian.Uint64(data) +} + +// WritePersistentStateID stores the id of the persistent state into database. +func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) { + if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil { + log.Crit("Failed to store the persistent state ID", "err", err) + } +} + +// ReadTrieJournal retrieves the serialized in-memory trie nodes of layers saved at +// the last shutdown. +func ReadTrieJournal(db ethdb.KeyValueReader) []byte { + data, _ := db.Get(trieJournalKey) + return data +} + +// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at +// shutdown. +func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) { + if err := db.Put(trieJournalKey, journal); err != nil { + log.Crit("Failed to store tries journal", "err", err) + } +} + +// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at +// the last shutdown. +func DeleteTrieJournal(db ethdb.KeyValueWriter) { + if err := db.Delete(trieJournalKey); err != nil { + log.Crit("Failed to remove tries journal", "err", err) + } +} + +// ReadStateHistoryMeta retrieves the metadata corresponding to the specified +// state history. Compute the position of state history in freezer by minus +// one since the id of first state history starts from one(zero for initial +// state). +func ReadStateHistoryMeta(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryMeta, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateHistoryMetaList retrieves a batch of meta objects with the specified +// start position and count. Compute the position of state history in freezer by +// minus one since the id of first state history starts from one(zero for initial +// state). +func ReadStateHistoryMetaList(db ethdb.AncientReaderOp, start uint64, count uint64) ([][]byte, error) { + return db.AncientRange(stateHistoryMeta, start-1, count, 0) +} + +// ReadStateAccountIndex retrieves the state root corresponding to the specified +// state history. Compute the position of state history in freezer by minus one +// since the id of first state history starts from one(zero for initial state). +func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryAccountIndex, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateStorageIndex retrieves the state root corresponding to the specified +// state history. Compute the position of state history in freezer by minus one +// since the id of first state history starts from one(zero for initial state). +func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryStorageIndex, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateAccountHistory retrieves the state root corresponding to the specified +// state history. Compute the position of state history in freezer by minus one +// since the id of first state history starts from one(zero for initial state). +func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryAccountData, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateStorageHistory retrieves the state root corresponding to the specified +// state history. Compute the position of state history in freezer by minus one +// since the id of first state history starts from one(zero for initial state). +func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryStorageData, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateHistory retrieves the state history from database with provided id. +// Compute the position of state history in freezer by minus one since the id +// of first state history starts from one(zero for initial state). +func ReadStateHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []byte, []byte, []byte, error) { + meta, err := db.Ancient(stateHistoryMeta, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + accountIndex, err := db.Ancient(stateHistoryAccountIndex, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + storageIndex, err := db.Ancient(stateHistoryStorageIndex, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + accountData, err := db.Ancient(stateHistoryAccountData, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + storageData, err := db.Ancient(stateHistoryStorageData, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + return meta, accountIndex, storageIndex, accountData, storageData, nil +} + +// WriteStateHistory writes the provided state history to database. Compute the +// position of state history in freezer by minus one since the id of first state +// history starts from one(zero for initial state). +func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIndex []byte, storageIndex []byte, accounts []byte, storages []byte) { + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + op.AppendRaw(stateHistoryMeta, id-1, meta) + op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex) + op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex) + op.AppendRaw(stateHistoryAccountData, id-1, accounts) + op.AppendRaw(stateHistoryStorageData, id-1, storages) + return nil + }) +} diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index e24021302584..12f1ecdf833e 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -46,21 +46,23 @@ const HashScheme = "hashScheme" // on extra state diffs to survive deep reorg. const PathScheme = "pathScheme" -// nodeHasher used to derive the hash of trie node. -type nodeHasher struct{ sha crypto.KeccakState } +// hasher is used to compute the sha256 hash of the provided data. +type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, } -func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) } -func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) } +func newHasher() *hasher { + return hasherPool.Get().(*hasher) +} + +func (h *hasher) hash(data []byte) common.Hash { + return crypto.HashData(h.sha, data) +} -func (h *nodeHasher) hashData(data []byte) (n common.Hash) { - h.sha.Reset() - h.sha.Write(data) - h.sha.Read(n[:]) - return n +func (h *hasher) release() { + hasherPool.Put(h) } // ReadAccountTrieNode retrieves the account trie node and the associated node @@ -70,9 +72,9 @@ func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.H if err != nil { return nil, common.Hash{} } - hasher := newNodeHasher() - defer returnHasherToPool(hasher) - return data, hasher.hashData(data) + h := newHasher() + defer h.release() + return data, h.hash(data) } // HasAccountTrieNode checks the account trie node presence with the specified @@ -82,9 +84,9 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) if err != nil { return false } - hasher := newNodeHasher() - defer returnHasherToPool(hasher) - return hasher.hashData(data) == hash + h := newHasher() + defer h.release() + return h.hash(data) == hash } // WriteAccountTrieNode writes the provided account trie node into database. @@ -108,9 +110,9 @@ func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path if err != nil { return nil, common.Hash{} } - hasher := newNodeHasher() - defer returnHasherToPool(hasher) - return data, hasher.hashData(data) + h := newHasher() + defer h.release() + return data, h.hash(data) } // HasStorageTrieNode checks the storage trie node presence with the provided @@ -120,9 +122,9 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [ if err != nil { return false } - hasher := newNodeHasher() - defer returnHasherToPool(hasher) - return hasher.hashData(data) == hash + h := newHasher() + defer h.release() + return h.hash(data) == hash } // WriteStorageTrieNode writes the provided storage trie node into database. diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index b0428c5f5bd9..c1cd7fda17f2 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -16,6 +16,8 @@ package rawdb +import "path/filepath" + // The list of table names of chain freezer. const ( // ChainFreezerHeaderTable indicates the name of the freezer header table. @@ -44,10 +46,36 @@ var chainFreezerNoSnappy = map[string]bool{ ChainFreezerDifficultyTable: true, } +const ( + // stateHistoryTableSize defines the maximum size of freezer data files. + stateHistoryTableSize = 2 * 1000 * 1000 * 1000 + + // stateHistoryAccountIndex indicates the name of the freezer state history table. + stateHistoryMeta = "history.meta" + stateHistoryAccountIndex = "account.index" + stateHistoryStorageIndex = "storage.index" + stateHistoryAccountData = "account.data" + stateHistoryStorageData = "storage.data" +) + +var stateHistoryFreezerNoSnappy = map[string]bool{ + stateHistoryMeta: true, + stateHistoryAccountIndex: false, + stateHistoryStorageIndex: false, + stateHistoryAccountData: false, + stateHistoryStorageData: false, +} + // The list of identifiers of ancient stores. var ( chainFreezerName = "chain" // the folder name of chain segment ancient store. + stateFreezerName = "state" // the folder name of reverse diff ancient store. ) // freezers the collections of all builtin freezers. -var freezers = []string{chainFreezerName} +var freezers = []string{chainFreezerName, stateFreezerName} + +// NewStateHistoryFreezer initializes the freezer for state history. +func NewStateHistoryFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) { + return NewResettableFreezer(filepath.Join(ancientDir, stateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateHistoryFreezerNoSnappy) +} diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index fd405e9d6997..9e18c8605cf1 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -34,7 +34,7 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction to := common.BytesToAddress([]byte{0x11}) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) for i := uint64(1); i <= 10; i++ { @@ -60,7 +60,7 @@ func TestChainIterator(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -113,7 +113,7 @@ func TestIndexTransactions(t *testing.T) { to := common.BytesToAddress([]byte{0x11}) // Write empty genesis block - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) @@ -140,7 +140,7 @@ func TestIndexTransactions(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index e864bcb2e88e..7bc91090619f 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -123,13 +123,13 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e } // TruncateHead returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) TruncateHead(items uint64) error { - return errNotSupported +func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) { + return 0, errNotSupported } // TruncateTail returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) TruncateTail(items uint64) error { - return errNotSupported +func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) { + return 0, errNotSupported } // Sync returns an error as we don't have a backing chain freezer. diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 2846858e0bdd..a9fe2343215a 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -275,43 +275,46 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize } // TruncateHead discards any recent data above the provided threshold number. -func (f *Freezer) TruncateHead(items uint64) error { +// It returns the previous head number. +func (f *Freezer) TruncateHead(items uint64) (uint64, error) { if f.readonly { - return errReadOnly + return 0, errReadOnly } f.writeLock.Lock() defer f.writeLock.Unlock() - if f.frozen.Load() <= items { - return nil + oitems := f.frozen.Load() + if oitems <= items { + return oitems, nil } for _, table := range f.tables { if err := table.truncateHead(items); err != nil { - return err + return 0, err } } f.frozen.Store(items) - return nil + return oitems, nil } // TruncateTail discards any recent data below the provided threshold number. -func (f *Freezer) TruncateTail(tail uint64) error { +func (f *Freezer) TruncateTail(tail uint64) (uint64, error) { if f.readonly { - return errReadOnly + return 0, errReadOnly } f.writeLock.Lock() defer f.writeLock.Unlock() - if f.tail.Load() >= tail { - return nil + old := f.tail.Load() + if old >= tail { + return old, nil } for _, table := range f.tables { if err := table.truncateTail(tail); err != nil { - return err + return 0, err } } f.tail.Store(tail) - return nil + return old, nil } // Sync flushes all data tables to disk. diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index f9a56c6de552..0a3892bcdfa4 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -170,7 +170,8 @@ func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) } // TruncateHead discards any recent data above the provided threshold number. -func (f *ResettableFreezer) TruncateHead(items uint64) error { +// It returns the previous head number. +func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -178,7 +179,8 @@ func (f *ResettableFreezer) TruncateHead(items uint64) error { } // TruncateTail discards any recent data below the provided threshold number. -func (f *ResettableFreezer) TruncateTail(tail uint64) error { +// It returns the previous value +func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 630c9029b0f5..96d24cc9473b 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -192,7 +192,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) { for i := 0; i < 10; i++ { // First reset and write 100 items. - if err := f.TruncateHead(0); err != nil { + if _, err := f.TruncateHead(0); err != nil { t.Fatal("truncate failed:", err) } _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error { @@ -227,7 +227,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) { wg.Done() }() go func() { - truncateErr = f.TruncateHead(10) + _, truncateErr = f.TruncateHead(10) wg.Done() }() go func() { diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 18722ed5d4cb..940ce01549cd 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -43,6 +43,9 @@ var ( // headFinalizedBlockKey tracks the latest known finalized block hash. headFinalizedBlockKey = []byte("LastFinalized") + // persistentStateIDKey tracks the id of latest stored state(for path-based only). + persistentStateIDKey = []byte("LastStateID") + // lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead). lastPivotKey = []byte("LastPivot") @@ -70,6 +73,9 @@ var ( // skeletonSyncStatusKey tracks the skeleton sync status across restarts. skeletonSyncStatusKey = []byte("SkeletonSyncStatus") + // trieJournalKey tracks the in-memory trie node layers across restarts. + trieJournalKey = []byte("TrieJournal") + // txIndexTailKey tracks the oldest block whose transactions have been indexed. txIndexTailKey = []byte("TransactionIndexTail") @@ -104,6 +110,7 @@ var ( // Path-based storage scheme of merkle patricia trie. trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node + stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db @@ -240,6 +247,11 @@ func genesisStateSpecKey(hash common.Hash) []byte { return append(genesisPrefix, hash.Bytes()...) } +// stateIDKey = stateIDPrefix + root (32 bytes) +func stateIDKey(root common.Hash) []byte { + return append(stateIDPrefix, root.Bytes()...) +} + // accountTrieNodeKey = trieNodeAccountPrefix + nodePath. func accountTrieNodeKey(path []byte) []byte { return append(trieNodeAccountPrefix, path...) diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 6d6fa0555da9..1895f61da200 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -97,13 +97,13 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err e // TruncateHead is a noop passthrough that just forwards the request to the underlying // database. -func (t *table) TruncateHead(items uint64) error { +func (t *table) TruncateHead(items uint64) (uint64, error) { return t.db.TruncateHead(items) } // TruncateTail is a noop passthrough that just forwards the request to the underlying // database. -func (t *table) TruncateTail(items uint64) error { +func (t *table) TruncateTail(items uint64) (uint64, error) { return t.db.TruncateTail(items) } diff --git a/core/state/statedb.go b/core/state/statedb.go index 8321128dcd4d..fdaeacc6b3d9 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1054,8 +1054,8 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root if it.Hash() == (common.Hash{}) { continue } - nodeSize += common.StorageSize(len(it.Path()) + len(it.NodeBlob())) - set.AddNode(it.Path(), trienode.NewWithPrev(common.Hash{}, nil, it.NodeBlob())) + nodeSize += common.StorageSize(len(it.Path())) + set.AddNode(it.Path(), trienode.NewDeleted()) } if err := it.Error(); err != nil { return false, nil, nil, err @@ -1274,12 +1274,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er } if root != origin { start := time.Now() - set := &triestate.Set{ - Accounts: s.accountsOrigin, - Storages: s.storagesOrigin, - Incomplete: incomplete, - } - if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { + if err := s.db.TrieDB().Update(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil { return common.Hash{}, err } s.originalRoot = root diff --git a/core/types/hashes.go b/core/types/hashes.go index 3bad430be571..3a787aa136f8 100644 --- a/core/types/hashes.go +++ b/core/types/hashes.go @@ -19,6 +19,7 @@ package types import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) var ( @@ -40,3 +41,13 @@ var ( // EmptyWithdrawalsHash is the known hash of the empty withdrawal set. EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") ) + +// TrieRootHash returns the hash itself if it's non-empty or the predefined +// emptyHash one instead. +func TrieRootHash(hash common.Hash) common.Hash { + if hash == (common.Hash{}) { + log.Error("Zero trie root hash!") + return EmptyRootHash + } + return hash +} diff --git a/ethdb/database.go b/ethdb/database.go index 3fe6dfa73c0e..4d4817daf2e5 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -114,14 +114,14 @@ type AncientWriter interface { // TruncateHead discards all but the first n ancient data from the ancient store. // After the truncation, the latest item can be accessed it item_n-1(start from 0). - TruncateHead(n uint64) error + TruncateHead(n uint64) (uint64, error) // TruncateTail discards the first n ancient data from the ancient store. The already // deleted items are ignored. After the truncation, the earliest item can be accessed // is item_n(start from 0). The deleted items may not be removed from the ancient store // immediately, but only when the accumulated deleted data reach the threshold then // will be removed all together. - TruncateTail(n uint64) error + TruncateTail(n uint64) (uint64, error) // Sync flushes all in-memory ancient store data to disk. Sync() error diff --git a/ethdb/remotedb/remotedb.go b/ethdb/remotedb/remotedb.go index 9ce657d78026..c1c803caf2b9 100644 --- a/ethdb/remotedb/remotedb.go +++ b/ethdb/remotedb/remotedb.go @@ -98,11 +98,11 @@ func (db *Database) ModifyAncients(f func(ethdb.AncientWriteOp) error) (int64, e panic("not supported") } -func (db *Database) TruncateHead(n uint64) error { +func (db *Database) TruncateHead(n uint64) (uint64, error) { panic("not supported") } -func (db *Database) TruncateTail(n uint64) error { +func (db *Database) TruncateTail(n uint64) (uint64, error) { panic("not supported") } diff --git a/trie/committer.go b/trie/committer.go index e825287fd220..92163cdb3b64 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -131,22 +131,15 @@ func (c *committer) store(path []byte, n node) node { // The node is embedded in its parent, in other words, this node // will not be stored in the database independently, mark it as // deleted only if the node was existent in database before. - prev, ok := c.tracer.accessList[string(path)] + _, ok := c.tracer.accessList[string(path)] if ok { - c.nodes.AddNode(path, trienode.NewWithPrev(common.Hash{}, nil, prev)) + c.nodes.AddNode(path, trienode.NewDeleted()) } return n } // Collect the dirty node to nodeset for return. - var ( - nhash = common.BytesToHash(hash) - node = trienode.NewWithPrev( - nhash, - nodeToBytes(n), - c.tracer.accessList[string(path)], - ) - ) - c.nodes.AddNode(path, node) + nhash := common.BytesToHash(hash) + c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n))) // Collect the corresponding leaf node if it's required. We don't check // full node since it's impossible to store value in fullNode. The key diff --git a/trie/database.go b/trie/database.go index 08ef5d07e1ac..49a884fd7f39 100644 --- a/trie/database.go +++ b/trie/database.go @@ -19,18 +19,19 @@ package trie import ( "errors" - "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" ) // Config defines all necessary options for database. type Config struct { - Cache int // Memory allowance (MB) to use for caching trie nodes in memory - Preimages bool // Flag whether the preimage of trie key is recorded + Cache int // Memory allowance (MB) to use for caching trie nodes in memory + Preimages bool // Flag whether the preimage of trie key is recorded + PathDB *pathdb.Config // Configs for experimental path-based scheme, not used yet. // Testing hooks OnCommit func(states *triestate.Set) // Hook invoked when commit is performed @@ -53,7 +54,10 @@ type backend interface { // Update performs a state transition by committing dirty nodes contained // in the given set in order to update state from the specified parent to // the specified root. - Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error + // + // The passed in maps(nodes, states) will be retained to avoid copying + // everything. Therefore, these maps must not be changed afterwards. + Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error // Commit writes all relevant trie nodes belonging to the specified state // to disk. Report specifies whether logs will be displayed in info level. @@ -67,20 +71,15 @@ type backend interface { // types of node backend as an entrypoint. It's responsible for all interactions // relevant with trie nodes and node preimages. type Database struct { - config *Config // Configuration for trie database - diskdb ethdb.Database // Persistent database to store the snapshot - cleans *fastcache.Cache // Megabytes permitted using for read caches - preimages *preimageStore // The store for caching preimages - backend backend // The backend for managing trie nodes + config *Config // Configuration for trie database + diskdb ethdb.Database // Persistent database to store the snapshot + preimages *preimageStore // The store for caching preimages + backend backend // The backend for managing trie nodes } // prepare initializes the database with provided configs, but the // database backend is still left as nil. func prepare(diskdb ethdb.Database, config *Config) *Database { - var cleans *fastcache.Cache - if config != nil && config.Cache > 0 { - cleans = fastcache.New(config.Cache * 1024 * 1024) - } var preimages *preimageStore if config != nil && config.Preimages { preimages = newPreimageStore(diskdb) @@ -88,7 +87,6 @@ func prepare(diskdb ethdb.Database, config *Config) *Database { return &Database{ config: config, diskdb: diskdb, - cleans: cleans, preimages: preimages, } } @@ -103,21 +101,34 @@ func NewDatabase(diskdb ethdb.Database) *Database { // The path-based scheme is not activated yet, always initialized with legacy // hash-based scheme by default. func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database { + var cleans int + if config != nil && config.Cache != 0 { + cleans = config.Cache * 1024 * 1024 + } db := prepare(diskdb, config) - db.backend = hashdb.New(diskdb, db.cleans, mptResolver{}) + db.backend = hashdb.New(diskdb, cleans, mptResolver{}) return db } // Reader returns a reader for accessing all trie nodes with provided state root. // An error will be returned if the requested state is not available. func (db *Database) Reader(blockRoot common.Hash) (Reader, error) { - return db.backend.(*hashdb.Database).Reader(blockRoot) + switch b := db.backend.(type) { + case *hashdb.Database: + return b.Reader(blockRoot) + case *pathdb.Database: + return b.Reader(blockRoot) + } + return nil, errors.New("unknown backend") } // Update performs a state transition by committing dirty nodes contained in the // given set in order to update state from the specified parent to the specified // root. The held pre-images accumulated up to this point will be flushed in case // the size exceeds the threshold. +// +// The passed in maps(nodes, states) will be retained to avoid copying everything. +// Therefore, these maps must not be changed afterwards. func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { if db.config != nil && db.config.OnCommit != nil { db.config.OnCommit(states) @@ -125,7 +136,7 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n if db.preimages != nil { db.preimages.commit(false) } - return db.backend.Update(root, parent, nodes) + return db.backend.Update(root, parent, block, nodes, states) } // Commit iterates over all the children of a particular node, writes them out diff --git a/trie/database_test.go b/trie/database_test.go index cad462f737a7..ed43a81e5976 100644 --- a/trie/database_test.go +++ b/trie/database_test.go @@ -20,16 +20,16 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // newTestDatabase initializes the trie database with specified scheme. func newTestDatabase(diskdb ethdb.Database, scheme string) *Database { db := prepare(diskdb, nil) if scheme == rawdb.HashScheme { - db.backend = hashdb.New(diskdb, db.cleans, mptResolver{}) + db.backend = hashdb.New(diskdb, 0, mptResolver{}) + } else { + db.backend = pathdb.New(diskdb, &pathdb.Config{}) // disable clean/dirty cache } - //} else { - // db.backend = snap.New(diskdb, db.cleans, nil) - //} return db } diff --git a/trie/iterator_test.go b/trie/iterator_test.go index abe9cf1b11ae..bf20c00c6436 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -130,7 +130,7 @@ type iterationElement struct { // Tests that the node iterator indeed walks over the entire database contents. func TestNodeIteratorCoverage(t *testing.T) { testNodeIteratorCoverage(t, rawdb.HashScheme) - //testNodeIteratorCoverage(t, rawdb.PathScheme) + testNodeIteratorCoverage(t, rawdb.PathScheme) } func testNodeIteratorCoverage(t *testing.T, scheme string) { @@ -355,8 +355,8 @@ func TestIteratorNoDups(t *testing.T) { func TestIteratorContinueAfterError(t *testing.T) { testIteratorContinueAfterError(t, false, rawdb.HashScheme) testIteratorContinueAfterError(t, true, rawdb.HashScheme) - // testIteratorContinueAfterError(t, false, rawdb.PathScheme) - // testIteratorContinueAfterError(t, true, rawdb.PathScheme) + testIteratorContinueAfterError(t, false, rawdb.PathScheme) + testIteratorContinueAfterError(t, true, rawdb.PathScheme) } func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { @@ -461,8 +461,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { func TestIteratorContinueAfterSeekError(t *testing.T) { testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme) testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme) - // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme) - // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme) + testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme) + testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme) } func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) { @@ -534,7 +534,7 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in func TestIteratorNodeBlob(t *testing.T) { testIteratorNodeBlob(t, rawdb.HashScheme) - //testIteratorNodeBlob(t, rawdb.PathScheme) + testIteratorNodeBlob(t, rawdb.PathScheme) } type loggingDb struct { diff --git a/trie/sync_test.go b/trie/sync_test.go index 589e2858fbea..b6fe8d84a6df 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -111,16 +111,16 @@ type trieElement struct { func TestEmptySync(t *testing.T) { dbA := NewDatabase(rawdb.NewMemoryDatabase()) dbB := NewDatabase(rawdb.NewMemoryDatabase()) - //dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) - //dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) + dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) + dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) emptyA := NewEmpty(dbA) emptyB, _ := New(TrieID(types.EmptyRootHash), dbB) - //emptyC := NewEmpty(dbC) - //emptyD, _ := New(TrieID(types.EmptyRootHash), dbD) + emptyC := NewEmpty(dbC) + emptyD, _ := New(TrieID(types.EmptyRootHash), dbD) - for i, trie := range []*Trie{emptyA, emptyB /*emptyC, emptyD*/} { - sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB /*dbC, dbD*/}[i].Scheme()) + for i, trie := range []*Trie{emptyA, emptyB, emptyC, emptyD} { + sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB, dbC, dbD}[i].Scheme()) if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes) } @@ -134,10 +134,10 @@ func TestIterativeSync(t *testing.T) { testIterativeSync(t, 100, false, rawdb.HashScheme) testIterativeSync(t, 1, true, rawdb.HashScheme) testIterativeSync(t, 100, true, rawdb.HashScheme) - // testIterativeSync(t, 1, false, rawdb.PathScheme) - // testIterativeSync(t, 100, false, rawdb.PathScheme) - // testIterativeSync(t, 1, true, rawdb.PathScheme) - // testIterativeSync(t, 100, true, rawdb.PathScheme) + testIterativeSync(t, 1, false, rawdb.PathScheme) + testIterativeSync(t, 100, false, rawdb.PathScheme) + testIterativeSync(t, 1, true, rawdb.PathScheme) + testIterativeSync(t, 100, true, rawdb.PathScheme) } func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { @@ -212,7 +212,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { // partial results are returned, and the others sent only later. func TestIterativeDelayedSync(t *testing.T) { testIterativeDelayedSync(t, rawdb.HashScheme) - //testIterativeDelayedSync(t, rawdb.PathScheme) + testIterativeDelayedSync(t, rawdb.PathScheme) } func testIterativeDelayedSync(t *testing.T, scheme string) { @@ -280,8 +280,8 @@ func testIterativeDelayedSync(t *testing.T, scheme string) { func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1, rawdb.HashScheme) testIterativeRandomSync(t, 100, rawdb.HashScheme) - // testIterativeRandomSync(t, 1, rawdb.PathScheme) - // testIterativeRandomSync(t, 100, rawdb.PathScheme) + testIterativeRandomSync(t, 1, rawdb.PathScheme) + testIterativeRandomSync(t, 100, rawdb.PathScheme) } func testIterativeRandomSync(t *testing.T, count int, scheme string) { @@ -348,7 +348,7 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) { // partial results are returned (Even those randomly), others sent only later. func TestIterativeRandomDelayedSync(t *testing.T) { testIterativeRandomDelayedSync(t, rawdb.HashScheme) - // testIterativeRandomDelayedSync(t, rawdb.PathScheme) + testIterativeRandomDelayedSync(t, rawdb.PathScheme) } func testIterativeRandomDelayedSync(t *testing.T, scheme string) { @@ -420,7 +420,7 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) { // have such references. func TestDuplicateAvoidanceSync(t *testing.T) { testDuplicateAvoidanceSync(t, rawdb.HashScheme) - // testDuplicateAvoidanceSync(t, rawdb.PathScheme) + testDuplicateAvoidanceSync(t, rawdb.PathScheme) } func testDuplicateAvoidanceSync(t *testing.T, scheme string) { @@ -491,12 +491,10 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) { // the database. func TestIncompleteSyncHash(t *testing.T) { testIncompleteSync(t, rawdb.HashScheme) - // testIncompleteSync(t, rawdb.PathScheme) + testIncompleteSync(t, rawdb.PathScheme) } func testIncompleteSync(t *testing.T, scheme string) { - t.Parallel() - // Create a random trie to copy _, srcDb, srcTrie, _ := makeTestTrie(scheme) @@ -582,7 +580,7 @@ func testIncompleteSync(t *testing.T, scheme string) { // depth. func TestSyncOrdering(t *testing.T) { testSyncOrdering(t, rawdb.HashScheme) - // testSyncOrdering(t, rawdb.PathScheme) + testSyncOrdering(t, rawdb.PathScheme) } func testSyncOrdering(t *testing.T, scheme string) { @@ -716,7 +714,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database // states synced in the last cycle. func TestSyncMovingTarget(t *testing.T) { testSyncMovingTarget(t, rawdb.HashScheme) - // testSyncMovingTarget(t, rawdb.PathScheme) + testSyncMovingTarget(t, rawdb.PathScheme) } func testSyncMovingTarget(t *testing.T, scheme string) { diff --git a/trie/testutil/utils.go b/trie/testutil/utils.go new file mode 100644 index 000000000000..a75d0431b0f4 --- /dev/null +++ b/trie/testutil/utils.go @@ -0,0 +1,61 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package testutil + +import ( + crand "crypto/rand" + "encoding/binary" + mrand "math/rand" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +// Prng is a pseudo random number generator seeded by strong randomness. +// The randomness is printed on startup in order to make failures reproducible. +var prng = initRand() + +func initRand() *mrand.Rand { + var seed [8]byte + crand.Read(seed[:]) + rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:])))) + return rnd +} + +// RandBytes generates a random byte slice with specified length. +func RandBytes(n int) []byte { + r := make([]byte, n) + prng.Read(r) + return r +} + +// RandomHash generates a random blob of data and returns it as a hash. +func RandomHash() common.Hash { + return common.BytesToHash(RandBytes(common.HashLength)) +} + +// RandomAddress generates a random blob of data and returns it as an address. +func RandomAddress() common.Address { + return common.BytesToAddress(RandBytes(common.AddressLength)) +} + +// RandomNode generates a random node. +func RandomNode() *trienode.Node { + val := RandBytes(100) + return trienode.New(crypto.Keccak256Hash(val), val) +} diff --git a/trie/tracer.go b/trie/tracer.go index 2b5de8ec4714..5786af4d3ec9 100644 --- a/trie/tracer.go +++ b/trie/tracer.go @@ -18,7 +18,6 @@ package trie import ( "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/trie/trienode" ) // tracer tracks the changes of trie nodes. During the trie operations, @@ -114,16 +113,18 @@ func (t *tracer) copy() *tracer { } } -// markDeletions puts all tracked deletions into the provided nodeset. -func (t *tracer) markDeletions(set *trienode.NodeSet) { +// deletedNodes returns a list of node paths which are deleted from the trie. +func (t *tracer) deletedNodes() []string { + var paths []string for path := range t.deletes { // It's possible a few deleted nodes were embedded // in their parent before, the deletions can be no // effect by deleting nothing, filter them out. - prev, ok := t.accessList[path] + _, ok := t.accessList[path] if !ok { continue } - set.AddNode([]byte(path), trienode.NewWithPrev(common.Hash{}, nil, prev)) + paths = append(paths, path) } + return paths } diff --git a/trie/trie.go b/trie/trie.go index 8fbd4e64a308..07467ac69c96 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -612,14 +612,20 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) defer func() { t.committed = true }() - nodes := trienode.NewNodeSet(t.owner) - t.tracer.markDeletions(nodes) - // Trie is empty and can be classified into two types of situations: - // - The trie was empty and no update happens - // - The trie was non-empty and all nodes are dropped + // (a) The trie was empty and no update happens => return nil + // (b) The trie was non-empty and all nodes are dropped => return + // the node set includes all deleted nodes if t.root == nil { - return types.EmptyRootHash, nodes, nil + paths := t.tracer.deletedNodes() + if len(paths) == 0 { + return types.EmptyRootHash, nil, nil // case (a) + } + nodes := trienode.NewNodeSet(t.owner) + for _, path := range paths { + nodes.AddNode([]byte(path), trienode.NewDeleted()) + } + return types.EmptyRootHash, nodes, nil // case (b) } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -633,6 +639,10 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) t.root = hashedNode return rootHash, nil, nil } + nodes := trienode.NewNodeSet(t.owner) + for _, path := range t.tracer.deletedNodes() { + nodes.AddNode([]byte(path), trienode.NewDeleted()) + } t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root) return rootHash, nodes, nil } diff --git a/trie/trie_reader.go b/trie/trie_reader.go index d42adad2c2fc..1c63ff4544fd 100644 --- a/trie/trie_reader.go +++ b/trie/trie_reader.go @@ -24,9 +24,14 @@ import ( // Reader wraps the Node method of a backing trie store. type Reader interface { - // Node retrieves the RLP-encoded trie node blob with the provided trie - // identifier, node path and the corresponding node hash. No error will - // be returned if the node is not found. + // Node retrieves the trie node blob with the provided trie identifier, node path and + // the corresponding node hash. No error will be returned if the node is not found. + // + // When looking up nodes in the account trie, 'owner' is the zero hash. For contract + // storage trie nodes, 'owner' is the hash of the account address that containing the + // storage. + // + // TODO(rjl493456442): remove the 'hash' parameter, it's redundant in PBSS. Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) } diff --git a/trie/trie_test.go b/trie/trie_test.go index cc86e7aee256..3cb21c1956b3 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -76,9 +76,9 @@ func TestMissingRoot(t *testing.T) { func TestMissingNode(t *testing.T) { testMissingNode(t, false, rawdb.HashScheme) - //testMissingNode(t, false, rawdb.PathScheme) + testMissingNode(t, false, rawdb.PathScheme) testMissingNode(t, true, rawdb.HashScheme) - //testMissingNode(t, true, rawdb.PathScheme) + testMissingNode(t, true, rawdb.PathScheme) } func testMissingNode(t *testing.T, memonly bool, scheme string) { @@ -422,44 +422,44 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error { if !ok || n.IsDeleted() { return errors.New("expect new node") } - if len(n.Prev) > 0 { - return errors.New("unexpected origin value") - } + //if len(n.Prev) > 0 { + // return errors.New("unexpected origin value") + //} } // Check deletion set - for path, blob := range deletes { + for path := range deletes { n, ok := set.Nodes[path] if !ok || !n.IsDeleted() { return errors.New("expect deleted node") } - if len(n.Prev) == 0 { - return errors.New("expect origin value") - } - if !bytes.Equal(n.Prev, blob) { - return errors.New("invalid origin value") - } + //if len(n.Prev) == 0 { + // return errors.New("expect origin value") + //} + //if !bytes.Equal(n.Prev, blob) { + // return errors.New("invalid origin value") + //} } // Check update set - for path, blob := range updates { + for path := range updates { n, ok := set.Nodes[path] if !ok || n.IsDeleted() { return errors.New("expect updated node") } - if len(n.Prev) == 0 { - return errors.New("expect origin value") - } - if !bytes.Equal(n.Prev, blob) { - return errors.New("invalid origin value") - } + //if len(n.Prev) == 0 { + // return errors.New("expect origin value") + //} + //if !bytes.Equal(n.Prev, blob) { + // return errors.New("invalid origin value") + //} } return nil } func runRandTest(rt randTest) bool { var scheme = rawdb.HashScheme - //if rand.Intn(2) == 0 { - // scheme = rawdb.PathScheme - //} + if rand.Intn(2) == 0 { + scheme = rawdb.PathScheme + } var ( origin = types.EmptyRootHash triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme) diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go index ed20ff9cdeb5..4441f2a3827d 100644 --- a/trie/triedb/hashdb/database.go +++ b/trie/triedb/hashdb/database.go @@ -32,30 +32,31 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" ) var ( - memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) - memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) - memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) - memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) - - memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) - memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) - memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) - memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) - - memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) - memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) - memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) - - memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) - memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) - memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) - - memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) - memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) - memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) + memcacheCleanHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil) + memcacheCleanMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil) + memcacheCleanReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil) + memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil) + + memcacheDirtyHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil) + memcacheDirtyMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil) + memcacheDirtyReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil) + memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil) + + memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil) + memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil) + memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil) + + memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil) + memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil) + memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil) + + memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil) + memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil) + memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil) ) // ChildResolver defines the required method to decode the provided @@ -121,7 +122,13 @@ func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash commo } // New initializes the hash-based node database. -func New(diskdb ethdb.Database, cleans *fastcache.Cache, resolver ChildResolver) *Database { +func New(diskdb ethdb.Database, size int, resolver ChildResolver) *Database { + // Initialize the clean cache if the specified cache allowance + // is non-zero. Note, the size is in bytes. + var cleans *fastcache.Cache + if size > 0 { + cleans = fastcache.New(size) + } return &Database{ diskdb: diskdb, resolver: resolver, @@ -269,7 +276,7 @@ func (db *Database) Dereference(root common.Hash) { db.gctime += time.Since(start) memcacheGCTimeTimer.Update(time.Since(start)) - memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize)) memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), @@ -390,7 +397,7 @@ func (db *Database) Cap(limit common.StorageSize) error { db.flushtime += time.Since(start) memcacheFlushTimeTimer.Update(time.Since(start)) - memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize)) memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), @@ -436,7 +443,7 @@ func (db *Database) Commit(node common.Hash, report bool) error { // Reset the storage counters and bumped metrics memcacheCommitTimeTimer.Update(time.Since(start)) - memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize)) memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) logger := log.Info @@ -549,7 +556,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool { // Update inserts the dirty nodes in provided nodeset into database and link the // account trie with multiple storage tries if necessary. -func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { +func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { // Ensure the parent state is present and signal a warning if not. if parent != types.EmptyRootHash { if blob, _ := db.Node(parent); len(blob) == 0 { diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go new file mode 100644 index 000000000000..29f6b5e103cd --- /dev/null +++ b/trie/triedb/pathdb/database.go @@ -0,0 +1,392 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +// maxDiffLayers is the maximum diff layers allowed in the layer tree. +const maxDiffLayers = 128 + +// layer is the interface implemented by all state layers which includes some +// public methods and some additional methods for internal usage. +type layer interface { + // Node retrieves the trie node with the node info. An error will be returned + // if the read operation exits abnormally. For example, if the layer is already + // stale, or the associated state is regarded as corrupted. Notably, no error + // will be returned if the requested node is not found in database. + Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) + + // rootHash returns the root hash for which this layer was made. + rootHash() common.Hash + + // stateID returns the associated state id of layer. + stateID() uint64 + + // parentLayer returns the subsequent layer of it, or nil if the disk was reached. + parentLayer() layer + + // update creates a new layer on top of the existing layer diff tree with + // the provided dirty trie nodes along with the state change set. + // + // Note, the maps are retained by the method to avoid copying everything. + update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer + + // journal commits an entire diff hierarchy to disk into a single journal entry. + // This is meant to be used during shutdown to persist the layer without + // flattening everything down (bad for reorgs). + journal(w io.Writer) error +} + +// Config contains the settings for database. +type Config struct { + StateLimit uint64 // Number of recent blocks to maintain state history for + CleanSize int // Maximum memory allowance (in bytes) for caching clean nodes + DirtySize int // Maximum memory allowance (in bytes) for caching dirty nodes + ReadOnly bool // Flag whether the database is opened in read only mode. +} + +var ( + // defaultCleanSize is the default memory allowance of clean cache. + defaultCleanSize = 16 * 1024 * 1024 + + // defaultBufferSize is the default memory allowance of node buffer + // that aggregates the writes from above until it's flushed into the + // disk. Do not increase the buffer size arbitrarily, otherwise the + // system pause time will increase when the database writes happen. + defaultBufferSize = 128 * 1024 * 1024 +) + +// Defaults contains default settings for Ethereum mainnet. +var Defaults = &Config{ + StateLimit: params.FullImmutabilityThreshold, + CleanSize: defaultCleanSize, + DirtySize: defaultBufferSize, +} + +// Database is a multiple-layered structure for maintaining in-memory trie nodes. +// It consists of one persistent base layer backed by a key-value store, on top +// of which arbitrarily many in-memory diff layers are stacked. The memory diffs +// can form a tree with branching, but the disk layer is singleton and common to +// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can +// be applied to rollback. The deepest reorg that can be handled depends on the +// amount of state histories tracked in the disk. +// +// At most one readable and writable database can be opened at the same time in +// the whole system which ensures that only one database writer can operate disk +// state. Unexpected open operations can cause the system to panic. +type Database struct { + // readOnly is the flag whether the mutation is allowed to be applied. + // It will be set automatically when the database is journaled during + // the shutdown to reject all following unexpected mutations. + readOnly bool // Indicator if database is opened in read only mode + bufferSize int // Memory allowance (in bytes) for caching dirty nodes + config *Config // Configuration for database + diskdb ethdb.Database // Persistent storage for matured trie nodes + tree *layerTree // The group for all known layers + freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests + lock sync.RWMutex // Lock to prevent mutations from happening at the same time +} + +// New attempts to load an already existing layer from a persistent key-value +// store (with a number of memory layers from a journal). If the journal is not +// matched with the base persistent layer, all the recorded diff layers are discarded. +func New(diskdb ethdb.Database, config *Config) *Database { + if config == nil { + config = Defaults + } + db := &Database{ + readOnly: config.ReadOnly, + bufferSize: config.DirtySize, + config: config, + diskdb: diskdb, + } + // Construct the layer tree by resolving the in-disk singleton state + // and in-memory layer journal. + db.tree = newLayerTree(db.loadLayers()) + + // Open the freezer for state history if the passed database contains an + // ancient store. Otherwise, all the relevant functionalities are disabled. + // + // Because the freezer can only be opened once at the same time, this + // mechanism also ensures that at most one **non-readOnly** database + // is opened at the same time to prevent accidental mutation. + if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly { + freezer, err := rawdb.NewStateHistoryFreezer(ancient, false) + if err != nil { + log.Crit("Failed to open state history freezer", "err", err) + } + db.freezer = freezer + + // Truncate the extra state histories above in freezer in case + // it's not aligned with the disk layer. + pruned, err := truncateFromHead(db.diskdb, freezer, db.tree.bottom().stateID()) + if err != nil { + log.Crit("Failed to truncate extra state histories", "err", err) + } + if pruned != 0 { + log.Warn("Truncated extra state histories", "number", pruned) + } + } + log.Warn("Path-based state scheme is an experimental feature") + return db +} + +// Reader retrieves a layer belonging to the given state root. +func (db *Database) Reader(root common.Hash) (layer, error) { + l := db.tree.get(root) + if l == nil { + return nil, fmt.Errorf("state %#x is not available", root) + } + return l, nil +} + +// Update adds a new layer into the tree, if that can be linked to an existing +// old parent. It is disallowed to insert a disk layer (the origin of all). Apart +// from that this function will flatten the extra diff layers at bottom into disk +// to only keep 128 diff layers in memory by default. +// +// The passed in maps(nodes, states) will be retained to avoid copying everything. +// Therefore, these maps must not be changed afterwards. +func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { + // Hold the lock to prevent concurrent mutations. + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errSnapshotReadOnly + } + if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil { + return err + } + // Keep 128 diff layers in the memory, persistent layer is 129th. + // - head layer is paired with HEAD state + // - head-1 layer is paired with HEAD-1 state + // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state + // - head-128 layer(disk layer) is paired with HEAD-128 state + return db.tree.cap(root, maxDiffLayers) +} + +// Commit traverses downwards the layer tree from a specified layer with the +// provided state root and all the layers below are flattened downwards. It +// can be used alone and mostly for test purposes. +func (db *Database) Commit(root common.Hash, report bool) error { + // Hold the lock to prevent concurrent mutations. + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errSnapshotReadOnly + } + return db.tree.cap(root, 0) +} + +// Reset rebuilds the database with the specified state as the base. +// +// - if target state is empty, clear the stored state and all layers on top +// - if target state is non-empty, ensure the stored state matches with it +// and clear all other layers on top. +func (db *Database) Reset(root common.Hash) error { + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errSnapshotReadOnly + } + batch := db.diskdb.NewBatch() + root = types.TrieRootHash(root) + if root == types.EmptyRootHash { + // Empty state is requested as the target, nuke out + // the root node and leave all others as dangling. + rawdb.DeleteAccountTrieNode(batch, nil) + } else { + // Ensure the requested state is existent before any + // action is applied. + _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil) + if hash != root { + return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root) + } + } + // Mark the disk layer as stale before applying any mutation. + db.tree.bottom().markStale() + + // Drop the stale state journal in persistent database and + // reset the persistent state id back to zero. + rawdb.DeleteTrieJournal(batch) + rawdb.WritePersistentStateID(batch, 0) + if err := batch.Write(); err != nil { + return err + } + // Clean up all state histories in freezer. Theoretically + // all root->id mappings should be removed as well. Since + // mappings can be huge and might take a while to clear + // them, just leave them in disk and wait for overwriting. + if db.freezer != nil { + if err := db.freezer.Reset(); err != nil { + return err + } + } + // Re-construct a new disk layer backed by persistent state + // with **empty clean cache and node buffer**. + dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)) + db.tree.reset(dl) + log.Info("Rebuilt trie database", "root", root) + return nil +} + +// Recover rollbacks the database to a specified historical point. +// The state is supported as the rollback destination only if it's +// canonical state and the corresponding trie histories are existent. +func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error { + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if rollback operation is not supported. + if db.readOnly || db.freezer == nil { + return errors.New("state rollback is non-supported") + } + // Short circuit if the target state is not recoverable. + root = types.TrieRootHash(root) + if !db.Recoverable(root) { + return errStateUnrecoverable + } + // Apply the state histories upon the disk layer in order. + var ( + start = time.Now() + dl = db.tree.bottom() + ) + for dl.rootHash() != root { + h, err := readHistory(db.freezer, dl.stateID()) + if err != nil { + return err + } + dl, err = dl.revert(h, loader) + if err != nil { + return err + } + // reset layer with newly created disk layer. It must be + // done after each revert operation, otherwise the new + // disk layer won't be accessible from outside. + db.tree.reset(dl) + } + rawdb.DeleteTrieJournal(db.diskdb) + _, err := truncateFromHead(db.diskdb, db.freezer, dl.stateID()) + if err != nil { + return err + } + log.Debug("Recovered state", "root", root, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// Recoverable returns the indicator if the specified state is recoverable. +func (db *Database) Recoverable(root common.Hash) bool { + // Ensure the requested state is a known state. + root = types.TrieRootHash(root) + id := rawdb.ReadStateID(db.diskdb, root) + if id == nil { + return false + } + // Recoverable state must below the disk layer. The recoverable + // state only refers the state that is currently not available, + // but can be restored by applying state history. + dl := db.tree.bottom() + if *id >= dl.stateID() { + return false + } + // Ensure the requested state is a canonical state and all state + // histories in range [id+1, disklayer.ID] are present and complete. + parent := root + return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error { + if m.parent != parent { + return errors.New("unexpected state history") + } + if len(m.incomplete) > 0 { + return errors.New("incomplete state history") + } + parent = m.root + return nil + }) == nil +} + +// Close closes the trie database and the held freezer. +func (db *Database) Close() error { + db.lock.Lock() + defer db.lock.Unlock() + + db.readOnly = true + if db.freezer == nil { + return nil + } + return db.freezer.Close() +} + +// Size returns the current storage size of the memory cache in front of the +// persistent database layer. +func (db *Database) Size() (size common.StorageSize) { + db.tree.forEach(func(layer layer) { + if diff, ok := layer.(*diffLayer); ok { + size += common.StorageSize(diff.memory) + } + if disk, ok := layer.(*diskLayer); ok { + size += disk.size() + } + }) + return size +} + +// Initialized returns an indicator if the state data is already +// initialized in path-based scheme. +func (db *Database) Initialized(genesisRoot common.Hash) bool { + var inited bool + db.tree.forEach(func(layer layer) { + if layer.rootHash() != types.EmptyRootHash { + inited = true + } + }) + return inited +} + +// SetBufferSize sets the node buffer size to the provided value(in bytes). +func (db *Database) SetBufferSize(size int) error { + db.lock.Lock() + defer db.lock.Unlock() + + db.bufferSize = size + return db.tree.bottom().setBufferSize(db.bufferSize) +} + +// Scheme returns the node scheme used in the database. +func (db *Database) Scheme() string { + return rawdb.PathScheme +} diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go new file mode 100644 index 000000000000..bcc37e59c853 --- /dev/null +++ b/trie/triedb/pathdb/database_test.go @@ -0,0 +1,573 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "errors" + "fmt" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/testutil" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) { + h, err := newTestHasher(addrHash, root, cleans) + if err != nil { + panic(fmt.Errorf("failed to create hasher, err: %w", err)) + } + for key, val := range dirties { + if len(val) == 0 { + h.Delete(key.Bytes()) + } else { + h.Update(key.Bytes(), val) + } + } + return h.Commit(false) +} + +func generateAccount(storageRoot common.Hash) types.StateAccount { + return types.StateAccount{ + Nonce: uint64(rand.Intn(100)), + Balance: big.NewInt(rand.Int63()), + CodeHash: testutil.RandBytes(32), + Root: storageRoot, + } +} + +const ( + createAccountOp int = iota + modifyAccountOp + deleteAccountOp + opLen +) + +type genctx struct { + accounts map[common.Hash][]byte + storages map[common.Hash]map[common.Hash][]byte + accountOrigin map[common.Address][]byte + storageOrigin map[common.Address]map[common.Hash][]byte + nodes *trienode.MergedNodeSet +} + +func newCtx() *genctx { + return &genctx{ + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + accountOrigin: make(map[common.Address][]byte), + storageOrigin: make(map[common.Address]map[common.Hash][]byte), + nodes: trienode.NewMergedNodeSet(), + } +} + +type tester struct { + db *Database + roots []common.Hash + preimages map[common.Hash]common.Address + accounts map[common.Hash][]byte + storages map[common.Hash]map[common.Hash][]byte + + // state snapshots + snapAccounts map[common.Hash]map[common.Hash][]byte + snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte +} + +func newTester(t *testing.T) *tester { + var ( + disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + db = New(disk, &Config{CleanSize: 256 * 1024, DirtySize: 256 * 1024}) + obj = &tester{ + db: db, + preimages: make(map[common.Hash]common.Address), + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + snapAccounts: make(map[common.Hash]map[common.Hash][]byte), + snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte), + } + ) + for i := 0; i < 2*128; i++ { + var parent = types.EmptyRootHash + if len(obj.roots) != 0 { + parent = obj.roots[len(obj.roots)-1] + } + root, nodes, states := obj.generate(parent) + if err := db.Update(root, parent, uint64(i), nodes, states); err != nil { + panic(fmt.Errorf("failed to update state changes, err: %w", err)) + } + obj.roots = append(obj.roots, root) + } + return obj +} + +func (t *tester) release() { + t.db.Close() + t.db.diskdb.Close() +} + +func (t *tester) randAccount() (common.Address, []byte) { + for addrHash, account := range t.accounts { + return t.preimages[addrHash], account + } + return common.Address{}, nil +} + +func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash { + var ( + addrHash = crypto.Keccak256Hash(addr.Bytes()) + storage = make(map[common.Hash][]byte) + origin = make(map[common.Hash][]byte) + ) + for i := 0; i < 10; i++ { + v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) + hash := testutil.RandomHash() + + storage[hash] = v + origin[hash] = nil + } + root, set := updateTrie(addrHash, types.EmptyRootHash, storage, nil) + + ctx.storages[addrHash] = storage + ctx.storageOrigin[addr] = origin + ctx.nodes.Merge(set) + return root +} + +func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash { + var ( + addrHash = crypto.Keccak256Hash(addr.Bytes()) + storage = make(map[common.Hash][]byte) + origin = make(map[common.Hash][]byte) + ) + for hash, val := range t.storages[addrHash] { + origin[hash] = val + storage[hash] = nil + + if len(origin) == 3 { + break + } + } + for i := 0; i < 3; i++ { + v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) + hash := testutil.RandomHash() + + storage[hash] = v + origin[hash] = nil + } + root, set := updateTrie(crypto.Keccak256Hash(addr.Bytes()), root, storage, t.storages[addrHash]) + + ctx.storages[addrHash] = storage + ctx.storageOrigin[addr] = origin + ctx.nodes.Merge(set) + return root +} + +func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash { + var ( + addrHash = crypto.Keccak256Hash(addr.Bytes()) + storage = make(map[common.Hash][]byte) + origin = make(map[common.Hash][]byte) + ) + for hash, val := range t.storages[addrHash] { + origin[hash] = val + storage[hash] = nil + } + root, set := updateTrie(addrHash, root, storage, t.storages[addrHash]) + if root != types.EmptyRootHash { + panic("failed to clear storage trie") + } + ctx.storages[addrHash] = storage + ctx.storageOrigin[addr] = origin + ctx.nodes.Merge(set) + return root +} + +func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) { + var ( + ctx = newCtx() + dirties = make(map[common.Hash]struct{}) + ) + for i := 0; i < 20; i++ { + switch rand.Intn(opLen) { + case createAccountOp: + // account creation + addr := testutil.RandomAddress() + addrHash := crypto.Keccak256Hash(addr.Bytes()) + if _, ok := t.accounts[addrHash]; ok { + continue + } + if _, ok := dirties[addrHash]; ok { + continue + } + dirties[addrHash] = struct{}{} + + root := t.generateStorage(ctx, addr) + ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root)) + ctx.accountOrigin[addr] = nil + t.preimages[addrHash] = addr + + case modifyAccountOp: + // account mutation + addr, account := t.randAccount() + if addr == (common.Address{}) { + continue + } + addrHash := crypto.Keccak256Hash(addr.Bytes()) + if _, ok := dirties[addrHash]; ok { + continue + } + dirties[addrHash] = struct{}{} + + acct, _ := types.FullAccount(account) + stRoot := t.mutateStorage(ctx, addr, acct.Root) + newAccount := types.SlimAccountRLP(generateAccount(stRoot)) + + ctx.accounts[addrHash] = newAccount + ctx.accountOrigin[addr] = account + + case deleteAccountOp: + // account deletion + addr, account := t.randAccount() + if addr == (common.Address{}) { + continue + } + addrHash := crypto.Keccak256Hash(addr.Bytes()) + if _, ok := dirties[addrHash]; ok { + continue + } + dirties[addrHash] = struct{}{} + + acct, _ := types.FullAccount(account) + if acct.Root != types.EmptyRootHash { + t.clearStorage(ctx, addr, acct.Root) + } + ctx.accounts[addrHash] = nil + ctx.accountOrigin[addr] = account + } + } + root, set := updateTrie(common.Hash{}, parent, ctx.accounts, t.accounts) + ctx.nodes.Merge(set) + + // Save state snapshot before commit + t.snapAccounts[parent] = copyAccounts(t.accounts) + t.snapStorages[parent] = copyStorages(t.storages) + + // Commit all changes to live state set + for addrHash, account := range ctx.accounts { + if len(account) == 0 { + delete(t.accounts, addrHash) + } else { + t.accounts[addrHash] = account + } + } + for addrHash, slots := range ctx.storages { + if _, ok := t.storages[addrHash]; !ok { + t.storages[addrHash] = make(map[common.Hash][]byte) + } + for sHash, slot := range slots { + if len(slot) == 0 { + delete(t.storages[addrHash], sHash) + } else { + t.storages[addrHash][sHash] = slot + } + } + } + return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil) +} + +// lastRoot returns the latest root hash, or empty if nothing is cached. +func (t *tester) lastHash() common.Hash { + if len(t.roots) == 0 { + return common.Hash{} + } + return t.roots[len(t.roots)-1] +} + +func (t *tester) verifyState(root common.Hash) error { + reader, err := t.db.Reader(root) + if err != nil { + return err + } + _, err = reader.Node(common.Hash{}, nil, root) + if err != nil { + return errors.New("root node is not available") + } + for addrHash, account := range t.snapAccounts[root] { + blob, err := reader.Node(common.Hash{}, addrHash.Bytes(), crypto.Keccak256Hash(account)) + if err != nil || !bytes.Equal(blob, account) { + return fmt.Errorf("account is mismatched: %w", err) + } + } + for addrHash, slots := range t.snapStorages[root] { + for hash, slot := range slots { + blob, err := reader.Node(addrHash, hash.Bytes(), crypto.Keccak256Hash(slot)) + if err != nil || !bytes.Equal(blob, slot) { + return fmt.Errorf("slot is mismatched: %w", err) + } + } + } + return nil +} + +func (t *tester) verifyHistory() error { + bottom := t.bottomIndex() + for i, root := range t.roots { + // The state history related to the state above disk layer should not exist. + if i > bottom { + _, err := readHistory(t.db.freezer, uint64(i+1)) + if err == nil { + return errors.New("unexpected state history") + } + continue + } + // The state history related to the state below or equal to the disk layer + // should exist. + obj, err := readHistory(t.db.freezer, uint64(i+1)) + if err != nil { + return err + } + parent := types.EmptyRootHash + if i != 0 { + parent = t.roots[i-1] + } + if obj.meta.parent != parent { + return fmt.Errorf("unexpected parent, want: %x, got: %x", parent, obj.meta.parent) + } + if obj.meta.root != root { + return fmt.Errorf("unexpected root, want: %x, got: %x", root, obj.meta.root) + } + } + return nil +} + +// bottomIndex returns the index of current disk layer. +func (t *tester) bottomIndex() int { + bottom := t.db.tree.bottom() + for i := 0; i < len(t.roots); i++ { + if t.roots[i] == bottom.rootHash() { + return i + } + } + return -1 +} + +func TestDatabaseRollback(t *testing.T) { + // Verify state histories + tester := newTester(t) + defer tester.release() + + if err := tester.verifyHistory(); err != nil { + t.Fatalf("Invalid state history, err: %v", err) + } + // Revert database from top to bottom + for i := tester.bottomIndex(); i >= 0; i-- { + root := tester.roots[i] + parent := types.EmptyRootHash + if i > 0 { + parent = tester.roots[i-1] + } + loader := newHashLoader(tester.snapAccounts[root], tester.snapStorages[root]) + if err := tester.db.Recover(parent, loader); err != nil { + t.Fatalf("Failed to revert db, err: %v", err) + } + tester.verifyState(parent) + } + if tester.db.tree.len() != 1 { + t.Fatal("Only disk layer is expected") + } +} + +func TestDatabaseRecoverable(t *testing.T) { + var ( + tester = newTester(t) + index = tester.bottomIndex() + ) + defer tester.release() + + var cases = []struct { + root common.Hash + expect bool + }{ + // Unknown state should be unrecoverable + {common.Hash{0x1}, false}, + + // Initial state should be recoverable + {types.EmptyRootHash, true}, + + // Initial state should be recoverable + {common.Hash{}, true}, + + // Layers below current disk layer are recoverable + {tester.roots[index-1], true}, + + // Disklayer itself is not recoverable, since it's + // available for accessing. + {tester.roots[index], false}, + + // Layers above current disk layer are not recoverable + // since they are available for accessing. + {tester.roots[index+1], false}, + } + for i, c := range cases { + result := tester.db.Recoverable(c.root) + if result != c.expect { + t.Fatalf("case: %d, unexpected result, want %t, got %t", i, c.expect, result) + } + } +} + +func TestReset(t *testing.T) { + var ( + tester = newTester(t) + index = tester.bottomIndex() + ) + defer tester.release() + + // Reset database to unknown target, should reject it + if err := tester.db.Reset(testutil.RandomHash()); err == nil { + t.Fatal("Failed to reject invalid reset") + } + // Reset database to state persisted in the disk + if err := tester.db.Reset(types.EmptyRootHash); err != nil { + t.Fatalf("Failed to reset database %v", err) + } + // Ensure journal is deleted from disk + if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 { + t.Fatal("Failed to clean journal") + } + // Ensure all trie histories are removed + for i := 0; i <= index; i++ { + _, err := readHistory(tester.db.freezer, uint64(i+1)) + if err == nil { + t.Fatalf("Failed to clean state history, index %d", i+1) + } + } + // Verify layer tree structure, single disk layer is expected + if tester.db.tree.len() != 1 { + t.Fatalf("Extra layer kept %d", tester.db.tree.len()) + } + if tester.db.tree.bottom().rootHash() != types.EmptyRootHash { + t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash()) + } +} + +func TestCommit(t *testing.T) { + tester := newTester(t) + defer tester.release() + + if err := tester.db.Commit(tester.lastHash(), false); err != nil { + t.Fatalf("Failed to cap database, err: %v", err) + } + // Verify layer tree structure, single disk layer is expected + if tester.db.tree.len() != 1 { + t.Fatal("Layer tree structure is invalid") + } + if tester.db.tree.bottom().rootHash() != tester.lastHash() { + t.Fatal("Layer tree structure is invalid") + } + // Verify states + if err := tester.verifyState(tester.lastHash()); err != nil { + t.Fatalf("State is invalid, err: %v", err) + } + // Verify state histories + if err := tester.verifyHistory(); err != nil { + t.Fatalf("State history is invalid, err: %v", err) + } +} + +func TestJournal(t *testing.T) { + tester := newTester(t) + defer tester.release() + + if err := tester.db.Journal(tester.lastHash()); err != nil { + t.Errorf("Failed to journal, err: %v", err) + } + tester.db.Close() + tester.db = New(tester.db.diskdb, nil) + + // Verify states including disk layer and all diff on top. + for i := 0; i < len(tester.roots); i++ { + if i >= tester.bottomIndex() { + if err := tester.verifyState(tester.roots[i]); err != nil { + t.Fatalf("Invalid state, err: %v", err) + } + continue + } + if err := tester.verifyState(tester.roots[i]); err == nil { + t.Fatal("Unexpected state") + } + } +} + +func TestCorruptedJournal(t *testing.T) { + tester := newTester(t) + defer tester.release() + + if err := tester.db.Journal(tester.lastHash()); err != nil { + t.Errorf("Failed to journal, err: %v", err) + } + tester.db.Close() + _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + + // Mutate the journal in disk, it should be regarded as invalid + blob := rawdb.ReadTrieJournal(tester.db.diskdb) + blob[0] = 1 + rawdb.WriteTrieJournal(tester.db.diskdb, blob) + + // Verify states, all not-yet-written states should be discarded + tester.db = New(tester.db.diskdb, nil) + for i := 0; i < len(tester.roots); i++ { + if tester.roots[i] == root { + if err := tester.verifyState(root); err != nil { + t.Fatalf("Disk state is corrupted, err: %v", err) + } + continue + } + if err := tester.verifyState(tester.roots[i]); err == nil { + t.Fatal("Unexpected state") + } + } +} + +// copyAccounts returns a deep-copied account set of the provided one. +func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte { + copied := make(map[common.Hash][]byte, len(set)) + for key, val := range set { + copied[key] = common.CopyBytes(val) + } + return copied +} + +// copyStorages returns a deep-copied storage set of the provided one. +func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte { + copied := make(map[common.Hash]map[common.Hash][]byte, len(set)) + for addrHash, subset := range set { + copied[addrHash] = make(map[common.Hash][]byte, len(subset)) + for key, val := range subset { + copied[addrHash][key] = common.CopyBytes(val) + } + } + return copied +} diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go new file mode 100644 index 000000000000..d25ac1c601d7 --- /dev/null +++ b/trie/triedb/pathdb/difflayer.go @@ -0,0 +1,174 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +// diffLayer represents a collection of modifications made to the in-memory tries +// along with associated state changes after running a block on top. +// +// The goal of a diff layer is to act as a journal, tracking recent modifications +// made to the state, that have not yet graduated into a semi-immutable state. +type diffLayer struct { + // Immutables + root common.Hash // Root hash to which this layer diff belongs to + id uint64 // Corresponding state id + block uint64 // Associated block number + nodes map[common.Hash]map[string]*trienode.Node // Cached trie nodes indexed by owner and path + states *triestate.Set // Associated state change set for building history + memory uint64 // Approximate guess as to how much memory we use + + parent layer // Parent layer modified by this one, never nil, **can be changed** + lock sync.RWMutex // Lock used to protect parent +} + +// newDiffLayer creates a new diff layer on top of an existing layer. +func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { + var ( + size int64 + count int + ) + dl := &diffLayer{ + root: root, + id: id, + block: block, + nodes: nodes, + states: states, + parent: parent, + } + for _, subset := range nodes { + for path, n := range subset { + dl.memory += uint64(n.Size() + len(path)) + size += int64(len(n.Blob) + len(path)) + } + count += len(subset) + } + if states != nil { + dl.memory += uint64(states.Size()) + } + dirtyWriteMeter.Mark(size) + diffLayerNodesMeter.Mark(int64(count)) + diffLayerBytesMeter.Mark(int64(dl.memory)) + log.Debug("Created new diff layer", "id", id, "block", block, "nodes", count, "size", common.StorageSize(dl.memory)) + return dl +} + +// rootHash implements the layer interface, returning the root hash of +// corresponding state. +func (dl *diffLayer) rootHash() common.Hash { + return dl.root +} + +// stateID implements the layer interface, returning the state id of the layer. +func (dl *diffLayer) stateID() uint64 { + return dl.id +} + +// parentLayer implements the layer interface, returning the subsequent +// layer of the diff layer. +func (dl *diffLayer) parentLayer() layer { + dl.lock.RLock() + defer dl.lock.RUnlock() + + return dl.parent +} + +// node retrieves the node with provided node information. It's the internal +// version of Node function with additional accessed layer tracked. No error +// will be returned if node is not found. +func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, depth int) ([]byte, error) { + // Hold the lock, ensure the parent won't be changed during the + // state accessing. + dl.lock.RLock() + defer dl.lock.RUnlock() + + // If the trie node is known locally, return it + subset, ok := dl.nodes[owner] + if ok { + n, ok := subset[string(path)] + if ok { + // If the trie node is not hash matched, or marked as removed, + // bubble up an error here. It shouldn't happen at all. + if n.Hash != hash { + dirtyFalseMeter.Mark(1) + log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) + return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path) + } + dirtyHitMeter.Mark(1) + dirtyNodeHitDepthHist.Update(int64(depth)) + dirtyReadMeter.Mark(int64(len(n.Blob))) + return n.Blob, nil + } + } + // Trie node unknown to this layer, resolve from parent + if diff, ok := dl.parent.(*diffLayer); ok { + return diff.node(owner, path, hash, depth+1) + } + // Failed to resolve through diff layers, fallback to disk layer + return dl.parent.Node(owner, path, hash) +} + +// Node implements the layer interface, retrieving the trie node blob with the +// provided node information. No error will be returned if the node is not found. +func (dl *diffLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { + return dl.node(owner, path, hash, 0) +} + +// update implements the layer interface, creating a new layer on top of the +// existing layer tree with the specified data items. +func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { + return newDiffLayer(dl, root, id, block, nodes, states) +} + +// persist flushes the diff layer and all its parent layers to disk layer. +func (dl *diffLayer) persist(force bool) (layer, error) { + if parent, ok := dl.parentLayer().(*diffLayer); ok { + // Hold the lock to prevent any read operation until the new + // parent is linked correctly. + dl.lock.Lock() + + // The merging of diff layers starts at the bottom-most layer, + // therefore we recurse down here, flattening on the way up + // (diffToDisk). + result, err := parent.persist(force) + if err != nil { + dl.lock.Unlock() + return nil, err + } + dl.parent = result + dl.lock.Unlock() + } + return diffToDisk(dl, force) +} + +// diffToDisk merges a bottom-most diff into the persistent disk layer underneath +// it. The method will panic if called onto a non-bottom-most diff layer. +func diffToDisk(layer *diffLayer, force bool) (layer, error) { + disk, ok := layer.parentLayer().(*diskLayer) + if !ok { + panic(fmt.Sprintf("unknown layer type: %T", layer.parentLayer())) + } + return disk.commit(layer, force) +} diff --git a/trie/triedb/pathdb/difflayer_test.go b/trie/triedb/pathdb/difflayer_test.go new file mode 100644 index 000000000000..77c4cd5722da --- /dev/null +++ b/trie/triedb/pathdb/difflayer_test.go @@ -0,0 +1,170 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/trie/testutil" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +func emptyLayer() *diskLayer { + return &diskLayer{ + db: New(rawdb.NewMemoryDatabase(), nil), + buffer: newNodeBuffer(defaultBufferSize, nil, 0), + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkSearch128Layers +// BenchmarkSearch128Layers-8 243826 4755 ns/op +func BenchmarkSearch128Layers(b *testing.B) { benchmarkSearch(b, 0, 128) } + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkSearch512Layers +// BenchmarkSearch512Layers-8 49686 24256 ns/op +func BenchmarkSearch512Layers(b *testing.B) { benchmarkSearch(b, 0, 512) } + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkSearch1Layer +// BenchmarkSearch1Layer-8 14062725 88.40 ns/op +func BenchmarkSearch1Layer(b *testing.B) { benchmarkSearch(b, 127, 128) } + +func benchmarkSearch(b *testing.B, depth int, total int) { + var ( + npath []byte + nhash common.Hash + nblob []byte + ) + // First, we set up 128 diff layers, with 3K items each + fill := func(parent layer, index int) *diffLayer { + nodes := make(map[common.Hash]map[string]*trienode.Node) + nodes[common.Hash{}] = make(map[string]*trienode.Node) + for i := 0; i < 3000; i++ { + var ( + path = testutil.RandBytes(32) + node = testutil.RandomNode() + ) + nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + if npath == nil && depth == index { + npath = common.CopyBytes(path) + nblob = common.CopyBytes(node.Blob) + nhash = node.Hash + } + } + return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) + } + var layer layer + layer = emptyLayer() + for i := 0; i < total; i++ { + layer = fill(layer, i) + } + b.ResetTimer() + + var ( + have []byte + err error + ) + for i := 0; i < b.N; i++ { + have, err = layer.Node(common.Hash{}, npath, nhash) + if err != nil { + b.Fatal(err) + } + } + if !bytes.Equal(have, nblob) { + b.Fatalf("have %x want %x", have, nblob) + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkPersist +// BenchmarkPersist-8 10 111252975 ns/op +func BenchmarkPersist(b *testing.B) { + // First, we set up 128 diff layers, with 3K items each + fill := func(parent layer) *diffLayer { + nodes := make(map[common.Hash]map[string]*trienode.Node) + nodes[common.Hash{}] = make(map[string]*trienode.Node) + for i := 0; i < 3000; i++ { + var ( + path = testutil.RandBytes(32) + node = testutil.RandomNode() + ) + nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + } + return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) + } + for i := 0; i < b.N; i++ { + b.StopTimer() + var layer layer + layer = emptyLayer() + for i := 1; i < 128; i++ { + layer = fill(layer) + } + b.StartTimer() + + dl, ok := layer.(*diffLayer) + if !ok { + break + } + dl.persist(false) + } +} + +// BenchmarkJournal benchmarks the performance for journaling the layers. +// +// BenchmarkJournal +// BenchmarkJournal-8 10 110969279 ns/op +func BenchmarkJournal(b *testing.B) { + b.SkipNow() + + // First, we set up 128 diff layers, with 3K items each + fill := func(parent layer) *diffLayer { + nodes := make(map[common.Hash]map[string]*trienode.Node) + nodes[common.Hash{}] = make(map[string]*trienode.Node) + for i := 0; i < 3000; i++ { + var ( + path = testutil.RandBytes(32) + node = testutil.RandomNode() + ) + nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + } + // TODO(rjl493456442) a non-nil state set is expected. + return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) + } + var layer layer + layer = emptyLayer() + for i := 0; i < 128; i++ { + layer = fill(layer) + } + b.ResetTimer() + + for i := 0; i < b.N; i++ { + layer.journal(new(bytes.Buffer)) + } +} diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go new file mode 100644 index 000000000000..b526b3b7dd9d --- /dev/null +++ b/trie/triedb/pathdb/disklayer.go @@ -0,0 +1,296 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "errors" + "fmt" + "sync" + + "github.com/VictoriaMetrics/fastcache" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" + "golang.org/x/crypto/sha3" +) + +// diskLayer is a low level persistent layer built on top of a key-value store. +type diskLayer struct { + root common.Hash // Immutable, root hash to which this layer was made for + id uint64 // Immutable, corresponding state id + db *Database // Path-based trie database + cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs + buffer *nodebuffer // Node buffer to aggregate writes + stale bool // Signals that the layer became stale (state progressed) + lock sync.RWMutex // Lock used to protect stale flag +} + +// newDiskLayer creates a new disk layer based on the passing arguments. +func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer { + // Initialize a clean cache if the memory allowance is not zero + // or reuse the provided cache if it is not nil (inherited from + // the original disk layer). + if cleans == nil && db.config.CleanSize != 0 { + cleans = fastcache.New(db.config.CleanSize) + } + return &diskLayer{ + root: root, + id: id, + db: db, + cleans: cleans, + buffer: buffer, + } +} + +// root implements the layer interface, returning root hash of corresponding state. +func (dl *diskLayer) rootHash() common.Hash { + return dl.root +} + +// stateID implements the layer interface, returning the state id of disk layer. +func (dl *diskLayer) stateID() uint64 { + return dl.id +} + +// parent implements the layer interface, returning nil as there's no layer +// below the disk. +func (dl *diskLayer) parentLayer() layer { + return nil +} + +// isStale return whether this layer has become stale (was flattened across) or if +// it's still live. +func (dl *diskLayer) isStale() bool { + dl.lock.RLock() + defer dl.lock.RUnlock() + + return dl.stale +} + +// markStale sets the stale flag as true. +func (dl *diskLayer) markStale() { + dl.lock.Lock() + defer dl.lock.Unlock() + + if dl.stale { + panic("triedb disk layer is stale") // we've committed into the same base from two children, boom + } + dl.stale = true +} + +// Node implements the layer interface, retrieving the trie node with the +// provided node info. No error will be returned if the node is not found. +func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { + dl.lock.RLock() + defer dl.lock.RUnlock() + + if dl.stale { + return nil, errSnapshotStale + } + // Try to retrieve the trie node from the not-yet-written + // node buffer first. Note the buffer is lock free since + // it's impossible to mutate the buffer before tagging the + // layer as stale. + n, err := dl.buffer.node(owner, path, hash) + if err != nil { + return nil, err + } + if n != nil { + dirtyHitMeter.Mark(1) + dirtyReadMeter.Mark(int64(len(n.Blob))) + return n.Blob, nil + } + dirtyMissMeter.Mark(1) + + // Try to retrieve the trie node from the clean memory cache + key := cacheKey(owner, path) + if dl.cleans != nil { + if blob := dl.cleans.Get(nil, key); len(blob) > 0 { + h := newHasher() + defer h.release() + + got := h.hash(blob) + if got == hash { + cleanHitMeter.Mark(1) + cleanReadMeter.Mark(int64(len(blob))) + return blob, nil + } + cleanFalseMeter.Mark(1) + log.Error("Unexpected trie node in clean cache", "owner", owner, "path", path, "expect", hash, "got", got) + } + cleanMissMeter.Mark(1) + } + // Try to retrieve the trie node from the disk. + var ( + nBlob []byte + nHash common.Hash + ) + if owner == (common.Hash{}) { + nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) + } else { + nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) + } + if nHash != hash { + diskFalseMeter.Mark(1) + log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash) + return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path) + } + if dl.cleans != nil && len(nBlob) > 0 { + dl.cleans.Set(key, nBlob) + cleanWriteMeter.Mark(int64(len(nBlob))) + } + return nBlob, nil +} + +// update implements the layer interface, returning a new diff layer on top +// with the given state set. +func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { + return newDiffLayer(dl, root, id, block, nodes, states) +} + +// commit merges the given bottom-most diff layer into the node buffer +// and returns a newly constructed disk layer. Note the current disk +// layer must be tagged as stale first to prevent re-access. +func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { + dl.lock.Lock() + defer dl.lock.Unlock() + + // Construct and store the state history first. If crash happens + // after storing the state history but without flushing the + // corresponding states(journal), the stored state history will + // be truncated in the next restart. + if dl.db.freezer != nil { + err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateLimit) + if err != nil { + return nil, err + } + } + // Mark the diskLayer as stale before applying any mutations on top. + dl.stale = true + + // Store the root->id lookup afterwards. All stored lookups are + // identified by the **unique** state root. It's impossible that + // in the same chain blocks are not adjacent but have the same + // root. + if dl.id == 0 { + rawdb.WriteStateID(dl.db.diskdb, dl.root, 0) + } + rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID()) + + // Construct a new disk layer by merging the nodes from the provided + // diff layer, and flush the content in disk layer if there are too + // many nodes cached. The clean cache is inherited from the original + // disk layer for reusing. + ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes)) + err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force) + if err != nil { + return nil, err + } + return ndl, nil +} + +// revert applies the given state history and return a reverted disk layer. +func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer, error) { + if h.meta.root != dl.rootHash() { + return nil, errUnexpectedHistory + } + // Reject if the provided state history is incomplete. It's due to + // a large construct SELF-DESTRUCT which can't be handled because + // of memory limitation. + if len(h.meta.incomplete) > 0 { + return nil, errors.New("incomplete state history") + } + if dl.id == 0 { + return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable) + } + // Apply the reverse state changes upon the current state. This must + // be done before holding the lock in order to access state in "this" + // layer. + nodes, err := triestate.Apply(h.meta.parent, h.meta.root, h.accounts, h.storages, loader) + if err != nil { + return nil, err + } + // Mark the diskLayer as stale before applying any mutations on top. + dl.lock.Lock() + defer dl.lock.Unlock() + + dl.stale = true + + // State change may be applied to node buffer, or the persistent + // state, depends on if node buffer is empty or not. If the node + // buffer is not empty, it means that the state transition that + // needs to be reverted is not yet flushed and cached in node + // buffer, otherwise, manipulate persistent state directly. + if !dl.buffer.empty() { + err := dl.buffer.revert(dl.db.diskdb, nodes) + if err != nil { + return nil, err + } + } else { + batch := dl.db.diskdb.NewBatch() + writeNodes(batch, nodes, dl.cleans) + rawdb.WritePersistentStateID(batch, dl.id-1) + if err := batch.Write(); err != nil { + log.Crit("Failed to write states", "err", err) + } + } + return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil +} + +// setBufferSize sets the node buffer size to the provided value. +func (dl *diskLayer) setBufferSize(size int) error { + dl.lock.RLock() + defer dl.lock.RUnlock() + + if dl.stale { + return errSnapshotStale + } + return dl.buffer.setSize(size, dl.db.diskdb, dl.cleans, dl.id) +} + +// size returns the approximate size of cached nodes in the disk layer. +func (dl *diskLayer) size() common.StorageSize { + dl.lock.RLock() + defer dl.lock.RUnlock() + + if dl.stale { + return 0 + } + return common.StorageSize(dl.buffer.size) +} + +// hasher is used to compute the sha256 hash of the provided data. +type hasher struct{ sha crypto.KeccakState } + +var hasherPool = sync.Pool{ + New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, +} + +func newHasher() *hasher { + return hasherPool.Get().(*hasher) +} + +func (h *hasher) hash(data []byte) common.Hash { + return crypto.HashData(h.sha, data) +} + +func (h *hasher) release() { + hasherPool.Put(h) +} diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go new file mode 100644 index 000000000000..f503a9c49d2e --- /dev/null +++ b/trie/triedb/pathdb/errors.go @@ -0,0 +1,51 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +var ( + // errSnapshotReadOnly is returned if the database is opened in read only mode + // and mutation is requested. + errSnapshotReadOnly = errors.New("read only") + + // errSnapshotStale is returned from data accessors if the underlying layer + // layer had been invalidated due to the chain progressing forward far enough + // to not maintain the layer's original state. + errSnapshotStale = errors.New("layer stale") + + // errUnexpectedHistory is returned if an unmatched state history is applied + // to the database for state rollback. + errUnexpectedHistory = errors.New("unexpected state history") + + // errStateUnrecoverable is returned if state is required to be reverted to + // a destination without associated state history available. + errStateUnrecoverable = errors.New("state is unrecoverable") + + // errUnexpectedNode is returned if the requested node with specified path is + // not hash matched with expectation. + errUnexpectedNode = errors.New("unexpected node") +) + +func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error { + return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash) +} diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go new file mode 100644 index 000000000000..c12fc5518472 --- /dev/null +++ b/trie/triedb/pathdb/history.go @@ -0,0 +1,641 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/triestate" + "golang.org/x/exp/slices" +) + +// State history records the state changes involved in executing a block. The +// state can be reverted to the previous version by applying the associated +// history object (state reverse diff). State history objects are kept to +// guarantee that the system can perform state rollbacks in case of deep reorg. +// +// Each state transition will generate a state history object. Note that not +// every block has a corresponding state history object. If a block performs +// no state changes whatsoever, no state is created for it. Each state history +// will have a sequentially increasing number acting as its unique identifier. +// +// The state history is written to disk (ancient store) when the corresponding +// diff layer is merged into the disk layer. At the same time, system can prune +// the oldest histories according to config. +// +// Disk State +// ^ +// | +// +------------+ +---------+ +---------+ +---------+ +// | Init State |---->| State 1 |---->| ... |---->| State n | +// +------------+ +---------+ +---------+ +---------+ +// +// +-----------+ +------+ +-----------+ +// | History 1 |----> | ... |---->| History n | +// +-----------+ +------+ +-----------+ +// +// # Rollback +// +// If the system wants to roll back to a previous state n, it needs to ensure +// all history objects from n+1 up to the current disk layer are existent. The +// history objects are applied to the state in reverse order, starting from the +// current disk layer. + +const ( + accountIndexSize = common.AddressLength + 13 // The length of encoded account index + slotIndexSize = common.HashLength + 5 // The length of encoded slot index + historyMetaSize = 9 + 2*common.HashLength // The length of fixed size part of meta object + + stateHistoryVersion = uint8(0) // initial version of state history structure. +) + +// Each state history entry is consisted of five elements: +// +// # metadata +// This object contains a few meta fields, such as the associated state root, +// block number, version tag and so on. This object may contain an extra +// accountHash list which means the storage changes belong to these accounts +// are not complete due to large contract destruction. The incomplete history +// can not be used for rollback and serving archive state request. +// +// # account index +// This object contains some index information of account. For example, offset +// and length indicate the location of the data belonging to the account. Besides, +// storageOffset and storageSlots indicate the storage modification location +// belonging to the account. +// +// The size of each account index is *fixed*, and all indexes are sorted +// lexicographically. Thus binary search can be performed to quickly locate a +// specific account. +// +// # account data +// Account data is a concatenated byte stream composed of all account data. +// The account data can be solved by the offset and length info indicated +// by corresponding account index. +// +// fixed size +// ^ ^ +// / \ +// +-----------------+-----------------+----------------+-----------------+ +// | Account index 1 | Account index 2 | ... | Account index N | +// +-----------------+-----------------+----------------+-----------------+ +// | +// | length +// offset |----------------+ +// v v +// +----------------+----------------+----------------+----------------+ +// | Account data 1 | Account data 2 | ... | Account data N | +// +----------------+----------------+----------------+----------------+ +// +// # storage index +// This object is similar with account index. It's also fixed size and contains +// the location info of storage slot data. +// +// # storage data +// Storage data is a concatenated byte stream composed of all storage slot data. +// The storage slot data can be solved by the location info indicated by +// corresponding account index and storage slot index. +// +// fixed size +// ^ ^ +// / \ +// +-----------------+-----------------+----------------+-----------------+ +// | Account index 1 | Account index 2 | ... | Account index N | +// +-----------------+-----------------+----------------+-----------------+ +// | +// | storage slots +// storage offset |-----------------------------------------------------+ +// v v +// +-----------------+-----------------+-----------------+ +// | storage index 1 | storage index 2 | storage index 3 | +// +-----------------+-----------------+-----------------+ +// | length +// offset |-------------+ +// v v +// +-------------+ +// | slot data 1 | +// +-------------+ + +// accountIndex describes the metadata belonging to an account. +type accountIndex struct { + address common.Address // The address of account + length uint8 // The length of account data, size limited by 255 + offset uint32 // The offset of item in account data table + storageOffset uint32 // The offset of storage index in storage index table + storageSlots uint32 // The number of mutated storage slots belonging to the account +} + +// encode packs account index into byte stream. +func (i *accountIndex) encode() []byte { + var buf [accountIndexSize]byte + copy(buf[:], i.address.Bytes()) + buf[common.AddressLength] = i.length + binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset) + binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset) + binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots) + return buf[:] +} + +// decode unpacks account index from byte stream. +func (i *accountIndex) decode(blob []byte) { + i.address = common.BytesToAddress(blob[:common.AddressLength]) + i.length = blob[common.AddressLength] + i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:]) + i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:]) + i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:]) +} + +// slotIndex describes the metadata belonging to a storage slot. +type slotIndex struct { + hash common.Hash // The hash of slot key + length uint8 // The length of storage slot, up to 32 bytes defined in protocol + offset uint32 // The offset of item in storage slot data table +} + +// encode packs slot index into byte stream. +func (i *slotIndex) encode() []byte { + var buf [slotIndexSize]byte + copy(buf[:common.HashLength], i.hash.Bytes()) + buf[common.HashLength] = i.length + binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset) + return buf[:] +} + +// decode unpack slot index from the byte stream. +func (i *slotIndex) decode(blob []byte) { + i.hash = common.BytesToHash(blob[:common.HashLength]) + i.length = blob[common.HashLength] + i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:]) +} + +// meta describes the meta data of state history object. +type meta struct { + version uint8 // version tag of history object + parent common.Hash // prev-state root before the state transition + root common.Hash // post-state root after the state transition + block uint64 // associated block number + incomplete []common.Address // list of address whose storage set is incomplete +} + +// encode packs the meta object into byte stream. +func (m *meta) encode() []byte { + buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength) + buf[0] = m.version + copy(buf[1:1+common.HashLength], m.parent.Bytes()) + copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes()) + binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block) + for i, h := range m.incomplete { + copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes()) + } + return buf[:] +} + +// decode unpacks the meta object from byte stream. +func (m *meta) decode(blob []byte) error { + if len(blob) < 1 { + return fmt.Errorf("no version tag") + } + switch blob[0] { + case stateHistoryVersion: + if len(blob) < historyMetaSize { + return fmt.Errorf("invalid state history meta, len: %d", len(blob)) + } + if (len(blob)-historyMetaSize)%common.AddressLength != 0 { + return fmt.Errorf("corrupted state history meta, len: %d", len(blob)) + } + m.version = blob[0] + m.parent = common.BytesToHash(blob[1 : 1+common.HashLength]) + m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength]) + m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize]) + for pos := historyMetaSize; pos < len(blob); { + m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength])) + pos += common.AddressLength + } + return nil + default: + return fmt.Errorf("unknown version %d", blob[0]) + } +} + +// history represents a set of state changes belong to a block along with +// the metadata including the state roots involved in the state transition. +// State history objects in disk are linked with each other by a unique id +// (8-bytes integer), the oldest state history object can be pruned on demand +// in order to control the storage size. +type history struct { + meta *meta // Meta data of history + accounts map[common.Address][]byte // Account data keyed by its address hash + accountList []common.Address // Sorted account hash list + storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash + storageList map[common.Address][]common.Hash // Sorted slot hash list +} + +// newHistory constructs the state history object with provided state change set. +func newHistory(root common.Hash, parent common.Hash, block uint64, states *triestate.Set) *history { + var ( + accountList []common.Address + storageList = make(map[common.Address][]common.Hash) + incomplete []common.Address + ) + for addr := range states.Accounts { + accountList = append(accountList, addr) + } + slices.SortFunc(accountList, func(a, b common.Address) bool { return a.Less(b) }) + + for addr, slots := range states.Storages { + slist := make([]common.Hash, 0, len(slots)) + for slotHash := range slots { + slist = append(slist, slotHash) + } + slices.SortFunc(slist, func(a, b common.Hash) bool { return a.Less(b) }) + storageList[addr] = slist + } + for addr := range states.Incomplete { + incomplete = append(incomplete, addr) + } + slices.SortFunc(incomplete, func(a, b common.Address) bool { return a.Less(b) }) + + return &history{ + meta: &meta{ + version: stateHistoryVersion, + parent: parent, + root: root, + block: block, + incomplete: incomplete, + }, + accounts: states.Accounts, + accountList: accountList, + storages: states.Storages, + storageList: storageList, + } +} + +// encode serializes the state history and returns four byte streams represent +// concatenated account/storage data, account/storage indexes respectively. +func (h *history) encode() ([]byte, []byte, []byte, []byte) { + var ( + slotNumber uint32 // the number of processed slots + accountData []byte // the buffer for concatenated account data + storageData []byte // the buffer for concatenated storage data + accountIndexes []byte // the buffer for concatenated account index + storageIndexes []byte // the buffer for concatenated storage index + ) + for _, addr := range h.accountList { + accIndex := accountIndex{ + address: addr, + length: uint8(len(h.accounts[addr])), + offset: uint32(len(accountData)), + } + slots, exist := h.storages[addr] + if exist { + // Encode storage slots in order + for _, slotHash := range h.storageList[addr] { + sIndex := slotIndex{ + hash: slotHash, + length: uint8(len(slots[slotHash])), + offset: uint32(len(storageData)), + } + storageData = append(storageData, slots[slotHash]...) + storageIndexes = append(storageIndexes, sIndex.encode()...) + } + // Fill up the storage meta in account index + accIndex.storageOffset = slotNumber + accIndex.storageSlots = uint32(len(slots)) + slotNumber += uint32(len(slots)) + } + accountData = append(accountData, h.accounts[addr]...) + accountIndexes = append(accountIndexes, accIndex.encode()...) + } + return accountData, storageData, accountIndexes, storageIndexes +} + +// decoder wraps the byte streams for decoding with extra meta fields. +type decoder struct { + accountData []byte // the buffer for concatenated account data + storageData []byte // the buffer for concatenated storage data + accountIndexes []byte // the buffer for concatenated account index + storageIndexes []byte // the buffer for concatenated storage index + + lastAccount *common.Address // the address of last resolved account + lastAccountRead uint32 // the read-cursor position of account data + lastSlotIndexRead uint32 // the read-cursor position of storage slot index + lastSlotDataRead uint32 // the read-cursor position of storage slot data +} + +// verify validates the provided byte streams for decoding state history. A few +// checks will be performed to quickly detect data corruption. The byte stream +// is regarded as corrupted if: +// +// - account indexes buffer is empty(empty state set is invalid) +// - account indexes/storage indexer buffer is not aligned +// +// note, these situations are allowed: +// +// - empty account data: all accounts were not present +// - empty storage set: no slots are modified +func (r *decoder) verify() error { + if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 { + return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes)) + } + if len(r.storageIndexes)%slotIndexSize != 0 { + return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes)) + } + return nil +} + +// readAccount parses the account from the byte stream with specified position. +func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) { + // Decode account index from the index byte stream. + var index accountIndex + if (pos+1)*accountIndexSize > len(r.accountIndexes) { + return accountIndex{}, nil, errors.New("account data buffer is corrupted") + } + index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize]) + + // Perform validation before parsing account data, ensure + // - account is sorted in order in byte stream + // - account data is strictly encoded with no gap inside + // - account data is not out-of-slice + if r.lastAccount != nil { // zero address is possible + if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 { + return accountIndex{}, nil, errors.New("account is not in order") + } + } + if index.offset != r.lastAccountRead { + return accountIndex{}, nil, errors.New("account data buffer is gaped") + } + last := index.offset + uint32(index.length) + if uint32(len(r.accountData)) < last { + return accountIndex{}, nil, errors.New("account data buffer is corrupted") + } + data := r.accountData[index.offset:last] + + r.lastAccount = &index.address + r.lastAccountRead = last + + return index, data, nil +} + +// readStorage parses the storage slots from the byte stream with specified account. +func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) { + var ( + last common.Hash + list []common.Hash + storage = make(map[common.Hash][]byte) + ) + for j := 0; j < int(accIndex.storageSlots); j++ { + var ( + index slotIndex + start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize) + end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize) + ) + // Perform validation before parsing storage slot data, ensure + // - slot index is not out-of-slice + // - slot data is not out-of-slice + // - slot is sorted in order in byte stream + // - slot indexes is strictly encoded with no gap inside + // - slot data is strictly encoded with no gap inside + if start != r.lastSlotIndexRead { + return nil, nil, errors.New("storage index buffer is gapped") + } + if uint32(len(r.storageIndexes)) < end { + return nil, nil, errors.New("storage index buffer is corrupted") + } + index.decode(r.storageIndexes[start:end]) + + if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 { + return nil, nil, errors.New("storage slot is not in order") + } + if index.offset != r.lastSlotDataRead { + return nil, nil, errors.New("storage data buffer is gapped") + } + sEnd := index.offset + uint32(index.length) + if uint32(len(r.storageData)) < sEnd { + return nil, nil, errors.New("storage data buffer is corrupted") + } + storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd] + list = append(list, index.hash) + + last = index.hash + r.lastSlotIndexRead = end + r.lastSlotDataRead = sEnd + } + return list, storage, nil +} + +// decode deserializes the account and storage data from the provided byte stream. +func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error { + var ( + accounts = make(map[common.Address][]byte) + storages = make(map[common.Address]map[common.Hash][]byte) + accountList []common.Address + storageList = make(map[common.Address][]common.Hash) + + r = &decoder{ + accountData: accountData, + storageData: storageData, + accountIndexes: accountIndexes, + storageIndexes: storageIndexes, + } + ) + if err := r.verify(); err != nil { + return err + } + for i := 0; i < len(accountIndexes)/accountIndexSize; i++ { + // Resolve account first + accIndex, accData, err := r.readAccount(i) + if err != nil { + return err + } + accounts[accIndex.address] = accData + accountList = append(accountList, accIndex.address) + + // Resolve storage slots + slotList, slotData, err := r.readStorage(accIndex) + if err != nil { + return err + } + if len(slotList) > 0 { + storageList[accIndex.address] = slotList + storages[accIndex.address] = slotData + } + } + h.accounts = accounts + h.accountList = accountList + h.storages = storages + h.storageList = storageList + return nil +} + +// readHistory reads and decodes the state history object by the given id. +func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) { + blob := rawdb.ReadStateHistoryMeta(freezer, id) + if len(blob) == 0 { + return nil, fmt.Errorf("state history not found %d", id) + } + var m meta + if err := m.decode(blob); err != nil { + return nil, err + } + var ( + dec = history{meta: &m} + accountData = rawdb.ReadStateAccountHistory(freezer, id) + storageData = rawdb.ReadStateStorageHistory(freezer, id) + accountIndexes = rawdb.ReadStateAccountIndex(freezer, id) + storageIndexes = rawdb.ReadStateStorageIndex(freezer, id) + ) + if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { + return nil, err + } + return &dec, nil +} + +// writeHistory writes the state history with provided state set. After +// storing the corresponding state history, it will also prune the stale +// histories from the disk with the given threshold. +func writeHistory(db ethdb.KeyValueStore, freezer *rawdb.ResettableFreezer, dl *diffLayer, limit uint64) error { + // Short circuit if state set is not available. + if dl.states == nil { + return errors.New("state change set is not available") + } + var ( + err error + n int + start = time.Now() + h = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states) + ) + accountData, storageData, accountIndex, storageIndex := h.encode() + dataSize := common.StorageSize(len(accountData) + len(storageData)) + indexSize := common.StorageSize(len(accountIndex) + len(storageIndex)) + + // Write history data into five freezer table respectively. + rawdb.WriteStateHistory(freezer, dl.stateID(), h.meta.encode(), accountIndex, storageIndex, accountData, storageData) + + // Prune stale state histories based on the config. + if limit != 0 && dl.stateID() > limit { + n, err = truncateFromTail(db, freezer, dl.stateID()-limit) + if err != nil { + return err + } + } + historyDataBytesMeter.Mark(int64(dataSize)) + historyIndexBytesMeter.Mark(int64(indexSize)) + historyBuildTimeMeter.UpdateSince(start) + log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "pruned", n, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// checkHistories retrieves a batch of meta objects with the specified range +// and performs the callback on each item. +func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check func(*meta) error) error { + for count > 0 { + number := count + if number > 10000 { + number = 10000 // split the big read into small chunks + } + blobs, err := rawdb.ReadStateHistoryMetaList(freezer, start, number) + if err != nil { + return err + } + for _, blob := range blobs { + var dec meta + if err := dec.decode(blob); err != nil { + return err + } + if err := check(&dec); err != nil { + return err + } + } + count -= uint64(len(blobs)) + start += uint64(len(blobs)) + } + return nil +} + +// truncateFromHead removes the extra state histories from the head with the given +// parameters. It returns the number of items removed from the head. +func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead uint64) (int, error) { + ohead, err := freezer.Ancients() + if err != nil { + return 0, err + } + if ohead <= nhead { + return 0, nil + } + // Load the meta objects in range [nhead+1, ohead] + blobs, err := rawdb.ReadStateHistoryMetaList(freezer, nhead+1, ohead-nhead) + if err != nil { + return 0, err + } + batch := db.NewBatch() + for _, blob := range blobs { + var m meta + if err := m.decode(blob); err != nil { + return 0, err + } + rawdb.DeleteStateID(batch, m.root) + } + if err := batch.Write(); err != nil { + return 0, err + } + ohead, err = freezer.TruncateHead(nhead) + if err != nil { + return 0, err + } + return int(ohead - nhead), nil +} + +// truncateFromTail removes the extra state histories from the tail with the given +// parameters. It returns the number of items removed from the tail. +func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) { + otail, err := freezer.Tail() + if err != nil { + return 0, err + } + if otail >= ntail { + return 0, nil + } + // Load the meta objects in range [otail+1, ntail] + blobs, err := rawdb.ReadStateHistoryMetaList(freezer, otail+1, ntail-otail) + if err != nil { + return 0, err + } + batch := db.NewBatch() + for _, blob := range blobs { + var m meta + if err := m.decode(blob); err != nil { + return 0, err + } + rawdb.DeleteStateID(batch, m.root) + } + if err := batch.Write(); err != nil { + return 0, err + } + otail, err = freezer.TruncateTail(ntail) + if err != nil { + return 0, err + } + return int(ntail - otail), nil +} diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go new file mode 100644 index 000000000000..6c250c2591cc --- /dev/null +++ b/trie/triedb/pathdb/history_test.go @@ -0,0 +1,290 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/testutil" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +// randomStateSet generates a random state change set. +func randomStateSet(n int) *triestate.Set { + var ( + accounts = make(map[common.Address][]byte) + storages = make(map[common.Address]map[common.Hash][]byte) + ) + for i := 0; i < n; i++ { + addr := testutil.RandomAddress() + storages[addr] = make(map[common.Hash][]byte) + for j := 0; j < 3; j++ { + v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) + storages[addr][testutil.RandomHash()] = v + } + account := generateAccount(types.EmptyRootHash) + accounts[addr] = types.SlimAccountRLP(account) + } + return triestate.New(accounts, storages, nil) +} + +func makeHistory() *history { + return newHistory(testutil.RandomHash(), types.EmptyRootHash, 0, randomStateSet(3)) +} + +func makeHistories(n int) []*history { + var ( + parent = types.EmptyRootHash + result []*history + ) + for i := 0; i < n; i++ { + root := testutil.RandomHash() + h := newHistory(root, parent, uint64(i), randomStateSet(3)) + parent = root + result = append(result, h) + } + return result +} + +func TestEncodeDecodeHistory(t *testing.T) { + var ( + m meta + dec history + obj = makeHistory() + ) + // check if meta data can be correctly encode/decode + blob := obj.meta.encode() + if err := m.decode(blob); err != nil { + t.Fatalf("Failed to decode %v", err) + } + if !reflect.DeepEqual(&m, obj.meta) { + t.Fatal("meta is mismatched") + } + + // check if account/storage data can be correctly encode/decode + accountData, storageData, accountIndexes, storageIndexes := obj.encode() + if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { + t.Fatalf("Failed to decode, err: %v", err) + } + if !compareSet(dec.accounts, obj.accounts) { + t.Fatal("account data is mismatched") + } + if !compareStorages(dec.storages, obj.storages) { + t.Fatal("storage data is mismatched") + } + if !compareList(dec.accountList, obj.accountList) { + t.Fatal("account list is mismatched") + } + if !compareStorageList(dec.storageList, obj.storageList) { + t.Fatal("storage list is mismatched") + } +} + +func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, id uint64, root common.Hash, exist bool) { + blob := rawdb.ReadStateHistoryMeta(freezer, id) + if exist && len(blob) == 0 { + t.Fatalf("Failed to load trie history, %d", id) + } + if !exist && len(blob) != 0 { + t.Fatalf("Unexpected trie history, %d", id) + } + if exist && rawdb.ReadStateID(db, root) == nil { + t.Fatalf("Root->ID mapping is not found, %d", id) + } + if !exist && rawdb.ReadStateID(db, root) != nil { + t.Fatalf("Unexpected root->ID mapping, %d", id) + } +} + +func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, from, to uint64, roots []common.Hash, exist bool) { + for i, j := from, 0; i <= to; i, j = i+1, j+1 { + checkHistory(t, db, freezer, i, roots[j], exist) + } +} + +func TestTruncateHeadHistory(t *testing.T) { + var ( + roots []common.Hash + hs = makeHistories(10) + db = rawdb.NewMemoryDatabase() + freezer, _ = openFreezer(t.TempDir(), false) + ) + defer freezer.Close() + + for i := 0; i < len(hs); i++ { + accountData, storageData, accountIndex, storageIndex := hs[i].encode() + rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1)) + roots = append(roots, hs[i].meta.root) + } + for size := len(hs); size > 0; size-- { + pruned, err := truncateFromHead(db, freezer, uint64(size-1)) + if err != nil { + t.Fatalf("Failed to truncate from head %v", err) + } + if pruned != 1 { + t.Error("Unexpected pruned items", "want", 1, "got", pruned) + } + checkHistoriesInRange(t, db, freezer, uint64(size), uint64(10), roots[size-1:], false) + checkHistoriesInRange(t, db, freezer, uint64(1), uint64(size-1), roots[:size-1], true) + } +} + +func TestTruncateTailHistory(t *testing.T) { + var ( + roots []common.Hash + hs = makeHistories(10) + db = rawdb.NewMemoryDatabase() + freezer, _ = openFreezer(t.TempDir(), false) + ) + defer freezer.Close() + + for i := 0; i < len(hs); i++ { + accountData, storageData, accountIndex, storageIndex := hs[i].encode() + rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1)) + roots = append(roots, hs[i].meta.root) + } + for newTail := 1; newTail < len(hs); newTail++ { + pruned, _ := truncateFromTail(db, freezer, uint64(newTail)) + if pruned != 1 { + t.Error("Unexpected pruned items", "want", 1, "got", pruned) + } + checkHistoriesInRange(t, db, freezer, uint64(1), uint64(newTail), roots[:newTail], false) + checkHistoriesInRange(t, db, freezer, uint64(newTail+1), uint64(10), roots[newTail:], true) + } +} + +func TestTruncateTailHistories(t *testing.T) { + var cases = []struct { + limit uint64 + expPruned int + maxPruned uint64 + minUnpruned uint64 + empty bool + }{ + { + 1, 9, 9, 10, false, + }, + { + 0, 10, 10, 0 /* no meaning */, true, + }, + { + 10, 0, 0, 1, false, + }, + } + for i, c := range cases { + var ( + roots []common.Hash + hs = makeHistories(10) + db = rawdb.NewMemoryDatabase() + freezer, _ = openFreezer(t.TempDir()+fmt.Sprintf("%d", i), false) + ) + defer freezer.Close() + + for i := 0; i < len(hs); i++ { + accountData, storageData, accountIndex, storageIndex := hs[i].encode() + rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1)) + roots = append(roots, hs[i].meta.root) + } + pruned, _ := truncateFromTail(db, freezer, uint64(10)-c.limit) + if pruned != c.expPruned { + t.Error("Unexpected pruned items", "want", c.expPruned, "got", pruned) + } + if c.empty { + checkHistoriesInRange(t, db, freezer, uint64(1), uint64(10), roots, false) + } else { + tail := 10 - int(c.limit) + checkHistoriesInRange(t, db, freezer, uint64(1), c.maxPruned, roots[:tail], false) + checkHistoriesInRange(t, db, freezer, c.minUnpruned, uint64(10), roots[tail:], true) + } + } +} + +// openFreezer initializes the freezer instance for storing state histories. +func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) { + return rawdb.NewStateHistoryFreezer(datadir, readOnly) +} + +func compareSet[k comparable](a, b map[k][]byte) bool { + if len(a) != len(b) { + return false + } + for key, valA := range a { + valB, ok := b[key] + if !ok { + return false + } + if !bytes.Equal(valA, valB) { + return false + } + } + return true +} + +func compareList[k comparable](a, b []k) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func compareStorages(a, b map[common.Address]map[common.Hash][]byte) bool { + if len(a) != len(b) { + return false + } + for h, subA := range a { + subB, ok := b[h] + if !ok { + return false + } + if !compareSet(subA, subB) { + return false + } + } + return true +} + +func compareStorageList(a, b map[common.Address][]common.Hash) bool { + if len(a) != len(b) { + return false + } + for h, la := range a { + lb, ok := b[h] + if !ok { + return false + } + if !compareList(la, lb) { + return false + } + } + return true +} diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go new file mode 100644 index 000000000000..d8c7d39fb9bd --- /dev/null +++ b/trie/triedb/pathdb/journal.go @@ -0,0 +1,378 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +var ( + errMissJournal = errors.New("journal not found") + errMissVersion = errors.New("version not found") + errUnexpectedVersion = errors.New("unexpected journal version") + errMissDiskRoot = errors.New("disk layer root not found") + errUnmatchedJournal = errors.New("unmatched journal") +) + +const journalVersion uint64 = 0 + +// journalNode represents a trie node persisted in the journal. +type journalNode struct { + Path []byte // Path of the node in the trie + Blob []byte // RLP-encoded trie node blob, nil means the node is deleted +} + +// journalNodes represents a list trie nodes belong to a single account +// or the main account trie. +type journalNodes struct { + Owner common.Hash + Nodes []journalNode +} + +// journalAccounts represents a list accounts belong to the layer. +type journalAccounts struct { + Addresses []common.Address + Accounts [][]byte +} + +// journalStorage represents a list of storage slots belong to an account. +type journalStorage struct { + Incomplete bool + Account common.Address + Hashes []common.Hash + Slots [][]byte +} + +// loadJournal tries to parse the layer journal from the disk. +func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { + journal := rawdb.ReadTrieJournal(db.diskdb) + if len(journal) == 0 { + return nil, errMissJournal + } + r := rlp.NewStream(bytes.NewReader(journal), 0) + + // Firstly, resolve the first element as the journal version + version, err := r.Uint64() + if err != nil { + return nil, errMissVersion + } + if version != journalVersion { + return nil, fmt.Errorf("%w want %d got %d", errUnexpectedVersion, journalVersion, version) + } + // Secondly, resolve the disk layer root, ensure it's continuous + // with disk layer. Note now we can ensure it's the layer journal + // correct version, so we expect everything can be resolved properly. + var root common.Hash + if err := r.Decode(&root); err != nil { + return nil, errMissDiskRoot + } + // The journal is not matched with persistent state, discard them. + // It can happen that geth crashes without persisting the journal. + if !bytes.Equal(root.Bytes(), diskRoot.Bytes()) { + return nil, fmt.Errorf("%w want %x got %x", errUnmatchedJournal, root, diskRoot) + } + // Load the disk layer from the journal + base, err := db.loadDiskLayer(r) + if err != nil { + return nil, err + } + // Load all the diff layers from the journal + head, err := db.loadDiffLayer(base, r) + if err != nil { + return nil, err + } + log.Debug("Loaded layer journal", "diskroot", diskRoot, "diffhead", head.rootHash()) + return head, nil +} + +// loadLayers loads a pre-existing state layer backed by a key-value store. +func (db *Database) loadLayers() layer { + // Retrieve the root node of persistent state. + _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil) + root = types.TrieRootHash(root) + + // Load the layers by resolving the journal + head, err := db.loadJournal(root) + if err == nil { + return head + } + // journal is not matched(or missing) with the persistent state, discard + // it. Display log for discarding journal, but try to avoid showing + // useless information when the db is created from scratch. + if !(root == types.EmptyRootHash && errors.Is(err, errMissJournal)) { + log.Info("Failed to load journal, discard it", "err", err) + } + // Return single layer with persistent state. + return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newNodeBuffer(db.bufferSize, nil, 0)) +} + +// loadDiskLayer reads the binary blob from the layer journal, reconstructing +// a new disk layer on it. +func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) { + // Resolve disk layer root + var root common.Hash + if err := r.Decode(&root); err != nil { + return nil, fmt.Errorf("load disk root: %v", err) + } + // Resolve the state id of disk layer, it can be different + // with the persistent id tracked in disk, the id distance + // is the number of transitions aggregated in disk layer. + var id uint64 + if err := r.Decode(&id); err != nil { + return nil, fmt.Errorf("load state id: %v", err) + } + stored := rawdb.ReadPersistentStateID(db.diskdb) + if stored > id { + return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id) + } + // Resolve nodes cached in node buffer + var encoded []journalNodes + if err := r.Decode(&encoded); err != nil { + return nil, fmt.Errorf("load disk nodes: %v", err) + } + nodes := make(map[common.Hash]map[string]*trienode.Node) + for _, entry := range encoded { + subset := make(map[string]*trienode.Node) + for _, n := range entry.Nodes { + if len(n.Blob) > 0 { + subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob) + } else { + subset[string(n.Path)] = trienode.NewDeleted() + } + } + nodes[entry.Owner] = subset + } + // Calculate the internal state transitions by id difference. + base := newDiskLayer(root, id, db, nil, newNodeBuffer(db.bufferSize, nodes, id-stored)) + return base, nil +} + +// loadDiffLayer reads the next sections of a layer journal, reconstructing a new +// diff and verifying that it can be linked to the requested parent. +func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) { + // Read the next diff journal entry + var root common.Hash + if err := r.Decode(&root); err != nil { + // The first read may fail with EOF, marking the end of the journal + if err == io.EOF { + return parent, nil + } + return nil, fmt.Errorf("load diff root: %v", err) + } + var block uint64 + if err := r.Decode(&block); err != nil { + return nil, fmt.Errorf("load block number: %v", err) + } + // Read in-memory trie nodes from journal + var encoded []journalNodes + if err := r.Decode(&encoded); err != nil { + return nil, fmt.Errorf("load diff nodes: %v", err) + } + nodes := make(map[common.Hash]map[string]*trienode.Node) + for _, entry := range encoded { + subset := make(map[string]*trienode.Node) + for _, n := range entry.Nodes { + if len(n.Blob) > 0 { + subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob) + } else { + subset[string(n.Path)] = trienode.NewDeleted() + } + } + nodes[entry.Owner] = subset + } + // Read state changes from journal + var ( + jaccounts journalAccounts + jstorages []journalStorage + accounts = make(map[common.Address][]byte) + storages = make(map[common.Address]map[common.Hash][]byte) + incomplete = make(map[common.Address]struct{}) + ) + if err := r.Decode(&jaccounts); err != nil { + return nil, fmt.Errorf("load diff accounts: %v", err) + } + for i, addr := range jaccounts.Addresses { + accounts[addr] = jaccounts.Accounts[i] + } + if err := r.Decode(&jstorages); err != nil { + return nil, fmt.Errorf("load diff storages: %v", err) + } + for _, entry := range jstorages { + set := make(map[common.Hash][]byte) + for i, h := range entry.Hashes { + if len(entry.Slots[i]) > 0 { + set[h] = entry.Slots[i] + } else { + set[h] = nil + } + } + if entry.Incomplete { + incomplete[entry.Account] = struct{}{} + } + storages[entry.Account] = set + } + return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, incomplete)), r) +} + +// journal implements the layer interface, marshaling the un-flushed trie nodes +// along with layer meta data into provided byte buffer. +func (dl *diskLayer) journal(w io.Writer) error { + dl.lock.RLock() + defer dl.lock.RUnlock() + + // Ensure the layer didn't get stale + if dl.stale { + return errSnapshotStale + } + // Step one, write the disk root into the journal. + if err := rlp.Encode(w, dl.root); err != nil { + return err + } + // Step two, write the corresponding state id into the journal + if err := rlp.Encode(w, dl.id); err != nil { + return err + } + // Step three, write all unwritten nodes into the journal + nodes := make([]journalNodes, 0, len(dl.buffer.nodes)) + for owner, subset := range dl.buffer.nodes { + entry := journalNodes{Owner: owner} + for path, node := range subset { + entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob}) + } + nodes = append(nodes, entry) + } + if err := rlp.Encode(w, nodes); err != nil { + return err + } + log.Debug("Journaled pathdb disk layer", "root", dl.root, "nodes", len(dl.buffer.nodes)) + return nil +} + +// journal implements the layer interface, writing the memory layer contents +// into a buffer to be stored in the database as the layer journal. +func (dl *diffLayer) journal(w io.Writer) error { + dl.lock.RLock() + defer dl.lock.RUnlock() + + // journal the parent first + if err := dl.parent.journal(w); err != nil { + return err + } + // Everything below was journaled, persist this layer too + if err := rlp.Encode(w, dl.root); err != nil { + return err + } + if err := rlp.Encode(w, dl.block); err != nil { + return err + } + // Write the accumulated trie nodes into buffer + nodes := make([]journalNodes, 0, len(dl.nodes)) + for owner, subset := range dl.nodes { + entry := journalNodes{Owner: owner} + for path, node := range subset { + entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob}) + } + nodes = append(nodes, entry) + } + if err := rlp.Encode(w, nodes); err != nil { + return err + } + // Write the accumulated state changes into buffer + var jacct journalAccounts + for addr, account := range dl.states.Accounts { + jacct.Addresses = append(jacct.Addresses, addr) + jacct.Accounts = append(jacct.Accounts, account) + } + if err := rlp.Encode(w, jacct); err != nil { + return err + } + storage := make([]journalStorage, 0, len(dl.states.Storages)) + for addr, slots := range dl.states.Storages { + entry := journalStorage{Account: addr} + if _, ok := dl.states.Incomplete[addr]; ok { + entry.Incomplete = true + } + for slotHash, slot := range slots { + entry.Hashes = append(entry.Hashes, slotHash) + entry.Slots = append(entry.Slots, slot) + } + storage = append(storage, entry) + } + if err := rlp.Encode(w, storage); err != nil { + return err + } + log.Debug("Journaled pathdb diff layer", "root", dl.root, "parent", dl.parent.rootHash(), "id", dl.stateID(), "block", dl.block, "nodes", len(dl.nodes)) + return nil +} + +// Journal commits an entire diff hierarchy to disk into a single journal entry. +// This is meant to be used during shutdown to persist the layer without +// flattening everything down (bad for reorgs). And this function will mark the +// database as read-only to prevent all following mutation to disk. +func (db *Database) Journal(root common.Hash) error { + // Retrieve the head layer to journal from. + l := db.tree.get(root) + if l == nil { + return fmt.Errorf("triedb layer [%#x] missing", root) + } + // Run the journaling + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errSnapshotReadOnly + } + // Firstly write out the metadata of journal + journal := new(bytes.Buffer) + if err := rlp.Encode(journal, journalVersion); err != nil { + return err + } + // The stored state in disk might be empty, convert the + // root to emptyRoot in this case. + _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil) + diskroot = types.TrieRootHash(diskroot) + + // Secondly write out the state root in disk, ensure all layers + // on top are continuous with disk. + if err := rlp.Encode(journal, diskroot); err != nil { + return err + } + // Finally write out the journal of each layer in reverse order. + if err := l.journal(journal); err != nil { + return err + } + // Store the journal into the database and return + rawdb.WriteTrieJournal(db.diskdb, journal.Bytes()) + + // Set the db in read only mode to reject all following mutations + db.readOnly = true + log.Info("Stored journal in triedb", "disk", diskroot, "size", common.StorageSize(journal.Len())) + return nil +} diff --git a/trie/triedb/pathdb/layertree.go b/trie/triedb/pathdb/layertree.go new file mode 100644 index 000000000000..d314779910e9 --- /dev/null +++ b/trie/triedb/pathdb/layertree.go @@ -0,0 +1,214 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +// layerTree is a group of state layers identified by the state root. +// This structure defines a few basic operations for manipulating +// state layers linked with each other in a tree structure. It's +// thread-safe to use. However, callers need to ensure the thread-safety +// of the referenced layer by themselves. +type layerTree struct { + lock sync.RWMutex + layers map[common.Hash]layer +} + +// newLayerTree constructs the layerTree with the given head layer. +func newLayerTree(head layer) *layerTree { + tree := new(layerTree) + tree.reset(head) + return tree +} + +// reset initializes the layerTree by the given head layer. +// All the ancestors will be iterated out and linked in the tree. +func (tree *layerTree) reset(head layer) { + tree.lock.Lock() + defer tree.lock.Unlock() + + var layers = make(map[common.Hash]layer) + for head != nil { + layers[head.rootHash()] = head + head = head.parentLayer() + } + tree.layers = layers +} + +// get retrieves a layer belonging to the given state root. +func (tree *layerTree) get(root common.Hash) layer { + tree.lock.RLock() + defer tree.lock.RUnlock() + + return tree.layers[types.TrieRootHash(root)] +} + +// forEach iterates the stored layers inside and applies the +// given callback on them. +func (tree *layerTree) forEach(onLayer func(layer)) { + tree.lock.RLock() + defer tree.lock.RUnlock() + + for _, layer := range tree.layers { + onLayer(layer) + } +} + +// len returns the number of layers cached. +func (tree *layerTree) len() int { + tree.lock.RLock() + defer tree.lock.RUnlock() + + return len(tree.layers) +} + +// add inserts a new layer into the tree if it can be linked to an existing old parent. +func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { + // Reject noop updates to avoid self-loops. This is a special case that can + // happen for clique networks and proof-of-stake networks where empty blocks + // don't modify the state (0 block subsidy). + // + // Although we could silently ignore this internally, it should be the caller's + // responsibility to avoid even attempting to insert such a layer. + root, parentRoot = types.TrieRootHash(root), types.TrieRootHash(parentRoot) + if root == parentRoot { + return errors.New("layer cycle") + } + parent := tree.get(parentRoot) + if parent == nil { + return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot) + } + l := parent.update(root, parent.stateID()+1, block, nodes.Flatten(), states) + + tree.lock.Lock() + tree.layers[l.rootHash()] = l + tree.lock.Unlock() + return nil +} + +// cap traverses downwards the diff tree until the number of allowed diff layers +// are crossed. All diffs beyond the permitted number are flattened downwards. +func (tree *layerTree) cap(root common.Hash, layers int) error { + // Retrieve the head layer to cap from + root = types.TrieRootHash(root) + l := tree.get(root) + if l == nil { + return fmt.Errorf("triedb layer [%#x] missing", root) + } + diff, ok := l.(*diffLayer) + if !ok { + return fmt.Errorf("triedb layer [%#x] is disk layer", root) + } + tree.lock.Lock() + defer tree.lock.Unlock() + + // If full commit was requested, flatten the diffs and merge onto disk + if layers == 0 { + base, err := diff.persist(true) + if err != nil { + return err + } + // Replace the entire layer tree with the flat base + tree.layers = map[common.Hash]layer{base.rootHash(): base} + return nil + } + // Dive until we run out of layers or reach the persistent database + for i := 0; i < layers-1; i++ { + // If we still have diff layers below, continue down + if parent, ok := diff.parentLayer().(*diffLayer); ok { + diff = parent + } else { + // Diff stack too shallow, return without modifications + return nil + } + } + // We're out of layers, flatten anything below, stopping if it's the disk or if + // the memory limit is not yet exceeded. + switch parent := diff.parentLayer().(type) { + case *diskLayer: + return nil + + case *diffLayer: + // Hold the lock to prevent any read operations until the new + // parent is linked correctly. + diff.lock.Lock() + + base, err := parent.persist(false) + if err != nil { + diff.lock.Unlock() + return err + } + tree.layers[base.rootHash()] = base + diff.parent = base + + diff.lock.Unlock() + + default: + panic(fmt.Sprintf("unknown data layer in triedb: %T", parent)) + } + // Remove any layer that is stale or links into a stale layer + children := make(map[common.Hash][]common.Hash) + for root, layer := range tree.layers { + if dl, ok := layer.(*diffLayer); ok { + parent := dl.parentLayer().rootHash() + children[parent] = append(children[parent], root) + } + } + var remove func(root common.Hash) + remove = func(root common.Hash) { + delete(tree.layers, root) + for _, child := range children[root] { + remove(child) + } + delete(children, root) + } + for root, layer := range tree.layers { + if dl, ok := layer.(*diskLayer); ok && dl.isStale() { + remove(root) + } + } + return nil +} + +// bottom returns the bottom-most disk layer in this tree. +func (tree *layerTree) bottom() *diskLayer { + tree.lock.RLock() + defer tree.lock.RUnlock() + + if len(tree.layers) == 0 { + return nil // Shouldn't happen, empty tree + } + // pick a random one as the entry point + var current layer + for _, layer := range tree.layers { + current = layer + break + } + for current.parentLayer() != nil { + current = current.parentLayer() + } + return current.(*diskLayer) +} diff --git a/trie/triedb/pathdb/metrics.go b/trie/triedb/pathdb/metrics.go new file mode 100644 index 000000000000..9e2b1dcbf55e --- /dev/null +++ b/trie/triedb/pathdb/metrics.go @@ -0,0 +1,50 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import "github.com/ethereum/go-ethereum/metrics" + +var ( + cleanHitMeter = metrics.NewRegisteredMeter("pathdb/clean/hit", nil) + cleanMissMeter = metrics.NewRegisteredMeter("pathdb/clean/miss", nil) + cleanReadMeter = metrics.NewRegisteredMeter("pathdb/clean/read", nil) + cleanWriteMeter = metrics.NewRegisteredMeter("pathdb/clean/write", nil) + + dirtyHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/hit", nil) + dirtyMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/miss", nil) + dirtyReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/read", nil) + dirtyWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/write", nil) + dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) + + cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil) + dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil) + diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil) + + commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil) + commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil) + commitBytesMeter = metrics.NewRegisteredMeter("pathdb/commit/bytes", nil) + + gcNodesMeter = metrics.NewRegisteredMeter("pathdb/gc/nodes", nil) + gcBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/bytes", nil) + + diffLayerBytesMeter = metrics.NewRegisteredMeter("pathdb/diff/bytes", nil) + diffLayerNodesMeter = metrics.NewRegisteredMeter("pathdb/diff/nodes", nil) + + historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil) + historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil) + historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil) +) diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go new file mode 100644 index 000000000000..67de225b0495 --- /dev/null +++ b/trie/triedb/pathdb/nodebuffer.go @@ -0,0 +1,275 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "fmt" + "time" + + "github.com/VictoriaMetrics/fastcache" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +// nodebuffer is a collection of modified trie nodes to aggregate the disk +// write. The content of the nodebuffer must be checked before diving into +// disk (since it basically is not-yet-written data). +type nodebuffer struct { + layers uint64 // The number of diff layers aggregated inside + size uint64 // The size of aggregated writes + limit uint64 // The maximum memory allowance in bytes + nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path +} + +// newNodeBuffer initializes the node buffer with the provided nodes. +func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodebuffer { + if nodes == nil { + nodes = make(map[common.Hash]map[string]*trienode.Node) + } + var size uint64 + for _, subset := range nodes { + for path, n := range subset { + size += uint64(len(n.Blob) + len(path)) + } + } + return &nodebuffer{ + layers: layers, + nodes: nodes, + size: size, + limit: uint64(limit), + } +} + +// node retrieves the trie node with given node info. +func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) { + subset, ok := b.nodes[owner] + if !ok { + return nil, nil + } + n, ok := subset[string(path)] + if !ok { + return nil, nil + } + if n.Hash != hash { + dirtyFalseMeter.Mark(1) + log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) + return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path) + } + return n, nil +} + +// commit merges the dirty nodes into the nodebuffer. This operation won't take +// the ownership of the nodes map which belongs to the bottom-most diff layer. +// It will just hold the node references from the given map which are safe to +// copy. +func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer { + var ( + delta int64 + overwrite int64 + overwriteSize int64 + ) + for owner, subset := range nodes { + current, exist := b.nodes[owner] + if !exist { + // Allocate a new map for the subset instead of claiming it directly + // from the passed map to avoid potential concurrent map read/write. + // The nodes belong to original diff layer are still accessible even + // after merging, thus the ownership of nodes map should still belong + // to original layer and any mutation on it should be prevented. + current = make(map[string]*trienode.Node) + for path, n := range subset { + current[path] = n + delta += int64(len(n.Blob) + len(path)) + } + b.nodes[owner] = current + continue + } + for path, n := range subset { + if orig, exist := current[path]; !exist { + delta += int64(len(n.Blob) + len(path)) + } else { + delta += int64(len(n.Blob) - len(orig.Blob)) + overwrite++ + overwriteSize += int64(len(orig.Blob) + len(path)) + } + current[path] = n + } + b.nodes[owner] = current + } + b.updateSize(delta) + b.layers++ + gcNodesMeter.Mark(overwrite) + gcBytesMeter.Mark(overwriteSize) + return b +} + +// revert is the reverse operation of commit. It also merges the provided nodes +// into the nodebuffer, the difference is that the provided node set should +// revert the changes made by the last state transition. +func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { + // Short circuit if no embedded state transition to revert. + if b.layers == 0 { + return errStateUnrecoverable + } + b.layers-- + + // Reset the entire buffer if only a single transition left. + if b.layers == 0 { + b.reset() + return nil + } + var delta int64 + for owner, subset := range nodes { + current, ok := b.nodes[owner] + if !ok { + panic(fmt.Sprintf("non-existent subset (%x)", owner)) + } + for path, n := range subset { + orig, ok := current[path] + if !ok { + // There is a special case in MPT that one child is removed from + // a fullNode which only has two children, and then a new child + // with different position is immediately inserted into the fullNode. + // In this case, the clean child of the fullNode will also be + // marked as dirty because of node collapse and expansion. + // + // In case of database rollback, don't panic if this "clean" + // node occurs which is not present in buffer. + var nhash common.Hash + if owner == (common.Hash{}) { + _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path)) + } else { + _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) + } + // Ignore the clean node in the case described above. + if nhash == n.Hash { + continue + } + panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) + } + current[path] = n + delta += int64(len(n.Blob)) - int64(len(orig.Blob)) + } + } + b.updateSize(delta) + return nil +} + +// updateSize updates the total cache size by the given delta. +func (b *nodebuffer) updateSize(delta int64) { + size := int64(b.size) + delta + if size >= 0 { + b.size = uint64(size) + return + } + s := b.size + b.size = 0 + log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta)) +} + +// reset cleans up the disk cache. +func (b *nodebuffer) reset() { + b.layers = 0 + b.size = 0 + b.nodes = make(map[common.Hash]map[string]*trienode.Node) +} + +// empty returns an indicator if nodebuffer contains any state transition inside. +func (b *nodebuffer) empty() bool { + return b.layers == 0 +} + +// setSize sets the buffer size to the provided number, and invokes a flush +// operation if the current memory usage exceeds the new limit. +func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error { + b.limit = uint64(size) + return b.flush(db, clean, id, false) +} + +// flush persists the in-memory dirty trie node into the disk if the configured +// memory threshold is reached. Note, all data must be written atomically. +func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error { + if b.size <= b.limit && !force { + return nil + } + // Ensure the target state id is aligned with the internal counter. + head := rawdb.ReadPersistentStateID(db) + if head+b.layers != id { + return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id) + } + var ( + start = time.Now() + batch = db.NewBatchWithSize(int(b.size)) + ) + nodes := writeNodes(batch, b.nodes, clean) + rawdb.WritePersistentStateID(batch, id) + + // Flush all mutations in a single batch + size := batch.ValueSize() + if err := batch.Write(); err != nil { + return err + } + commitBytesMeter.Mark(int64(size)) + commitNodesMeter.Mark(int64(nodes)) + commitTimeTimer.UpdateSince(start) + log.Debug("Persisted pathdb nodes", "nodes", len(b.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start))) + b.reset() + return nil +} + +// writeNodes writes the trie nodes into the provided database batch. +// Note this function will also inject all the newly written nodes +// into clean cache. +func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) { + for owner, subset := range nodes { + for path, n := range subset { + if n.IsDeleted() { + if owner == (common.Hash{}) { + rawdb.DeleteAccountTrieNode(batch, []byte(path)) + } else { + rawdb.DeleteStorageTrieNode(batch, owner, []byte(path)) + } + if clean != nil { + clean.Del(cacheKey(owner, []byte(path))) + } + } else { + if owner == (common.Hash{}) { + rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob) + } else { + rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob) + } + if clean != nil { + clean.Set(cacheKey(owner, []byte(path)), n.Blob) + } + } + } + total += len(subset) + } + return total +} + +// cacheKey constructs the unique key of clean cache. +func cacheKey(owner common.Hash, path []byte) []byte { + if owner == (common.Hash{}) { + return path + } + return append(owner.Bytes(), path...) +} diff --git a/trie/triedb/pathdb/testutils.go b/trie/triedb/pathdb/testutils.go new file mode 100644 index 000000000000..cb3a240cc4f3 --- /dev/null +++ b/trie/triedb/pathdb/testutils.go @@ -0,0 +1,156 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" + "golang.org/x/exp/slices" +) + +// testHasher is a test utility for computing root hash of a batch of state +// elements. The hash algorithm is to sort all the elements in lexicographical +// order, concat the key and value in turn, and perform hash calculation on +// the concatenated bytes. Except the root hash, a nodeset will be returned +// once Commit is called, which contains all the changes made to hasher. +type testHasher struct { + owner common.Hash // owner identifier + root common.Hash // original root + dirties map[common.Hash][]byte // dirty states + cleans map[common.Hash][]byte // clean states +} + +// newTestHasher constructs a hasher object with provided states. +func newTestHasher(owner common.Hash, root common.Hash, cleans map[common.Hash][]byte) (*testHasher, error) { + if cleans == nil { + cleans = make(map[common.Hash][]byte) + } + if got, _ := hash(cleans); got != root { + return nil, fmt.Errorf("state root mismatched, want: %x, got: %x", root, got) + } + return &testHasher{ + owner: owner, + root: root, + dirties: make(map[common.Hash][]byte), + cleans: cleans, + }, nil +} + +// Get returns the value for key stored in the trie. +func (h *testHasher) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + val, ok := h.dirties[hash] + if ok { + return val, nil + } + return h.cleans[hash], nil +} + +// Update associates key with value in the trie. +func (h *testHasher) Update(key, value []byte) error { + h.dirties[common.BytesToHash(key)] = common.CopyBytes(value) + return nil +} + +// Delete removes any existing value for key from the trie. +func (h *testHasher) Delete(key []byte) error { + h.dirties[common.BytesToHash(key)] = nil + return nil +} + +// Commit computes the new hash of the states and returns the set with all +// state changes. +func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { + var ( + nodes = make(map[common.Hash][]byte) + set = trienode.NewNodeSet(h.owner) + ) + for hash, val := range h.cleans { + nodes[hash] = val + } + for hash, val := range h.dirties { + nodes[hash] = val + if bytes.Equal(val, h.cleans[hash]) { + continue + } + if len(val) == 0 { + set.AddNode(hash.Bytes(), trienode.NewDeleted()) + } else { + set.AddNode(hash.Bytes(), trienode.New(crypto.Keccak256Hash(val), val)) + } + } + root, blob := hash(nodes) + + // Include the dirty root node as well. + if root != types.EmptyRootHash && root != h.root { + set.AddNode(nil, trienode.New(root, blob)) + } + if root == types.EmptyRootHash && h.root != types.EmptyRootHash { + set.AddNode(nil, trienode.NewDeleted()) + } + return root, set +} + +// hash performs the hash computation upon the provided states. +func hash(states map[common.Hash][]byte) (common.Hash, []byte) { + var hs []common.Hash + for hash := range states { + hs = append(hs, hash) + } + slices.SortFunc(hs, func(a, b common.Hash) bool { return a.Less(b) }) + + var input []byte + for _, hash := range hs { + if len(states[hash]) == 0 { + continue + } + input = append(input, hash.Bytes()...) + input = append(input, states[hash]...) + } + if len(input) == 0 { + return types.EmptyRootHash, nil + } + return crypto.Keccak256Hash(input), input +} + +type hashLoader struct { + accounts map[common.Hash][]byte + storages map[common.Hash]map[common.Hash][]byte +} + +func newHashLoader(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *hashLoader { + return &hashLoader{ + accounts: accounts, + storages: storages, + } +} + +// OpenTrie opens the main account trie. +func (l *hashLoader) OpenTrie(root common.Hash) (triestate.Trie, error) { + return newTestHasher(common.Hash{}, root, l.accounts) +} + +// OpenStorageTrie opens the storage trie of an account. +func (l *hashLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) { + return newTestHasher(addrHash, root, l.storages[addrHash]) +} diff --git a/trie/trienode/node.go b/trie/trienode/node.go index d99f04bd5e5c..8998bcba060a 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -25,8 +25,8 @@ import ( ) // Node is a wrapper which contains the encoded blob of the trie node and its -// unique hash identifier. It is general enough that can be used to represent -// trie nodes corresponding to different trie implementations. +// node hash. It is general enough that can be used to represent trie node +// corresponding to different trie implementations. type Node struct { Hash common.Hash // Node hash, empty for deleted node Blob []byte // Encoded node blob, nil for the deleted node @@ -42,35 +42,13 @@ func (n *Node) IsDeleted() bool { return n.Hash == (common.Hash{}) } -// WithPrev wraps the Node with the previous node value attached. -type WithPrev struct { - *Node - Prev []byte // Encoded original value, nil means it's non-existent -} - -// Unwrap returns the internal Node object. -func (n *WithPrev) Unwrap() *Node { - return n.Node -} - -// Size returns the total memory size used by this node. It overloads -// the function in Node by counting the size of previous value as well. -func (n *WithPrev) Size() int { - return n.Node.Size() + len(n.Prev) -} - // New constructs a node with provided node information. func New(hash common.Hash, blob []byte) *Node { return &Node{Hash: hash, Blob: blob} } -// NewWithPrev constructs a node with provided node information. -func NewWithPrev(hash common.Hash, blob []byte, prev []byte) *WithPrev { - return &WithPrev{ - Node: New(hash, blob), - Prev: prev, - } -} +// NewDeleted constructs a node which is deleted. +func NewDeleted() *Node { return New(common.Hash{}, nil) } // leaf represents a trie leaf node type leaf struct { @@ -83,7 +61,7 @@ type leaf struct { type NodeSet struct { Owner common.Hash Leaves []*leaf - Nodes map[string]*WithPrev + Nodes map[string]*Node updates int // the count of updated and inserted nodes deletes int // the count of deleted nodes } @@ -93,7 +71,7 @@ type NodeSet struct { func NewNodeSet(owner common.Hash) *NodeSet { return &NodeSet{ Owner: owner, - Nodes: make(map[string]*WithPrev), + Nodes: make(map[string]*Node), } } @@ -104,17 +82,17 @@ func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) { for path := range set.Nodes { paths = append(paths, path) } - // Bottom-up, longest path first + // Bottom-up, the longest path first slices.SortFunc(paths, func(a, b string) bool { return a > b // Sort in reverse order }) for _, path := range paths { - callback(path, set.Nodes[path].Unwrap()) + callback(path, set.Nodes[path]) } } // AddNode adds the provided node into set. -func (set *NodeSet) AddNode(path []byte, n *WithPrev) { +func (set *NodeSet) AddNode(path []byte, n *Node) { if n.IsDeleted() { set.deletes += 1 } else { @@ -124,7 +102,7 @@ func (set *NodeSet) AddNode(path []byte, n *WithPrev) { } // Merge adds a set of nodes into the set. -func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*WithPrev) error { +func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error { if set.Owner != owner { return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner) } @@ -172,16 +150,11 @@ func (set *NodeSet) Summary() string { for path, n := range set.Nodes { // Deletion if n.IsDeleted() { - fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev) - continue - } - // Insertion - if len(n.Prev) == 0 { - fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash) + fmt.Fprintf(out, " [-]: %x\n", path) continue } - // Update - fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev) + // Insertion or update + fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash) } } for _, n := range set.Leaves { @@ -217,3 +190,12 @@ func (set *MergedNodeSet) Merge(other *NodeSet) error { set.Sets[other.Owner] = other return nil } + +// Flatten returns a two-dimensional map for internal nodes. +func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node { + nodes := make(map[common.Hash]map[string]*Node) + for owner, set := range set.Sets { + nodes[owner] = set.Nodes + } + return nodes +} diff --git a/trie/triestate/state.go b/trie/triestate/state.go index 68fee26d571b..cb3611baf9cd 100644 --- a/trie/triestate/state.go +++ b/trie/triestate/state.go @@ -16,7 +16,44 @@ package triestate -import "github.com/ethereum/go-ethereum/common" +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/trienode" + "golang.org/x/crypto/sha3" +) + +// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia +// tree or Verkle tree. +type Trie interface { + // Get returns the value for key stored in the trie. + Get(key []byte) ([]byte, error) + + // Update associates key with value in the trie. + Update(key, value []byte) error + + // Delete removes any existing value for key from the trie. + Delete(key []byte) error + + // Commit the trie and returns a set of dirty nodes generated along with + // the new root hash. + Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) +} + +// TrieLoader wraps functions to load tries. +type TrieLoader interface { + // OpenTrie opens the main account trie. + OpenTrie(root common.Hash) (Trie, error) + + // OpenStorageTrie opens the storage trie of an account. + OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) +} // Set represents a collection of mutated states during a state transition. // The value refers to the original content of state before the transition @@ -24,5 +61,207 @@ import "github.com/ethereum/go-ethereum/common" type Set struct { Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present - Incomplete map[common.Address]struct{} // Indicator whether the storage slot is incomplete due to large deletion + Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion + size common.StorageSize // Approximate size of set +} + +// New constructs the state set with provided data. +func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set { + return &Set{ + Accounts: accounts, + Storages: storages, + Incomplete: incomplete, + } +} + +// Size returns the approximate memory size occupied by the set. +func (s *Set) Size() common.StorageSize { + if s.size != 0 { + return s.size + } + for _, account := range s.Accounts { + s.size += common.StorageSize(common.AddressLength + len(account)) + } + for _, slots := range s.Storages { + for _, val := range slots { + s.size += common.StorageSize(common.HashLength + len(val)) + } + s.size += common.StorageSize(common.AddressLength) + } + s.size += common.StorageSize(common.AddressLength * len(s.Incomplete)) + return s.size +} + +// context wraps all fields for executing state diffs. +type context struct { + prevRoot common.Hash + postRoot common.Hash + accounts map[common.Address][]byte + storages map[common.Address]map[common.Hash][]byte + accountTrie Trie + nodes *trienode.MergedNodeSet +} + +// Apply traverses the provided state diffs, apply them in the associated +// post-state and return the generated dirty trie nodes. The state can be +// loaded via the provided trie loader. +func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) { + tr, err := loader.OpenTrie(postRoot) + if err != nil { + return nil, err + } + ctx := &context{ + prevRoot: prevRoot, + postRoot: postRoot, + accounts: accounts, + storages: storages, + accountTrie: tr, + nodes: trienode.NewMergedNodeSet(), + } + for addr, account := range accounts { + var err error + if len(account) == 0 { + err = deleteAccount(ctx, loader, addr) + } else { + err = updateAccount(ctx, loader, addr) + } + if err != nil { + return nil, fmt.Errorf("failed to revert state, err: %w", err) + } + } + root, result := tr.Commit(false) + if root != prevRoot { + return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root) + } + if err := ctx.nodes.Merge(result); err != nil { + return nil, err + } + return ctx.nodes.Flatten(), nil +} + +// updateAccount the account was present in prev-state, and may or may not +// existent in post-state. Apply the reverse diff and verify if the storage +// root matches the one in prev-state account. +func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error { + // The account was present in prev-state, decode it from the + // 'slim-rlp' format bytes. + h := newHasher() + defer h.release() + + addrHash := h.hash(addr.Bytes()) + prev, err := types.FullAccount(ctx.accounts[addr]) + if err != nil { + return err + } + // The account may or may not existent in post-state, try to + // load it and decode if it's found. + blob, err := ctx.accountTrie.Get(addrHash.Bytes()) + if err != nil { + return err + } + post := types.NewEmptyStateAccount() + if len(blob) != 0 { + if err := rlp.DecodeBytes(blob, &post); err != nil { + return err + } + } + // Apply all storage changes into the post-state storage trie. + st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root) + if err != nil { + return err + } + for key, val := range ctx.storages[addr] { + var err error + if len(val) == 0 { + err = st.Delete(key.Bytes()) + } else { + err = st.Update(key.Bytes(), val) + } + if err != nil { + return err + } + } + root, result := st.Commit(false) + if root != prev.Root { + return errors.New("failed to reset storage trie") + } + // The returned set can be nil if storage trie is not changed + // at all. + if result != nil { + if err := ctx.nodes.Merge(result); err != nil { + return err + } + } + // Write the prev-state account into the main trie + full, err := rlp.EncodeToBytes(prev) + if err != nil { + return err + } + return ctx.accountTrie.Update(addrHash.Bytes(), full) +} + +// deleteAccount the account was not present in prev-state, and is expected +// to be existent in post-state. Apply the reverse diff and verify if the +// account and storage is wiped out correctly. +func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { + // The account must be existent in post-state, load the account. + h := newHasher() + defer h.release() + + addrHash := h.hash(addr.Bytes()) + blob, err := ctx.accountTrie.Get(addrHash.Bytes()) + if err != nil { + return err + } + if len(blob) == 0 { + return fmt.Errorf("account is non-existent %#x", addrHash) + } + var post types.StateAccount + if err := rlp.DecodeBytes(blob, &post); err != nil { + return err + } + st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root) + if err != nil { + return err + } + for key, val := range ctx.storages[addr] { + if len(val) != 0 { + return errors.New("expect storage deletion") + } + if err := st.Delete(key.Bytes()); err != nil { + return err + } + } + root, result := st.Commit(false) + if root != types.EmptyRootHash { + return errors.New("failed to clear storage trie") + } + // The returned set can be nil if storage trie is not changed + // at all. + if result != nil { + if err := ctx.nodes.Merge(result); err != nil { + return err + } + } + // Delete the post-state account from the main trie. + return ctx.accountTrie.Delete(addrHash.Bytes()) +} + +// hasher is used to compute the sha256 hash of the provided data. +type hasher struct{ sha crypto.KeccakState } + +var hasherPool = sync.Pool{ + New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, +} + +func newHasher() *hasher { + return hasherPool.Get().(*hasher) +} + +func (h *hasher) hash(data []byte) common.Hash { + return crypto.HashData(h.sha, data) +} + +func (h *hasher) release() { + hasherPool.Put(h) } From ac152143e1018be96b96fb3f0eaa21ab613064bb Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 6 May 2021 18:58:39 +0200 Subject: [PATCH 02/99] Verkle tree-based state with overlay transition Squash the main verkle PR ahead of rebase don't call Bytes() in GetTreeKey (#137) trie: avoid endianness conversion in GetTreeKey (#140) * trie/utils: add concrete expected value in trie key generation test Signed-off-by: Ignacio Hagopian * mod: update to latest go-verkle Signed-off-by: Ignacio Hagopian * trie/utils: avoid endianness conversions Signed-off-by: Ignacio Hagopian * apply review changes & update to official go-verkle version Signed-off-by: Ignacio Hagopian Signed-off-by: Ignacio Hagopian upgrade go-verkle to CoW version and get TestProcessVerkle to build (#138) updating ci to use self-hosted machine (#143) fix: storage offset in non-header group + reuse of value buffer (#145) dedup call to ChunkifyCode, same as replay branch (#156) * dedup call to ChunkifyCode, same as replay branch * fix some linter issues fix code offset in tree update (#157) fix REVERT in state processor test execution (#158) * fix code offset in tree update * fix REVERT in test execution save on key hashing: lump code size update with first code chunk group (#159) fix code chunk key calculation and storage key calculation (#161) * fix codeKey calculation * Remove * fix storageOffset * fix the fix to the fix to the offset fix * Remove copy/pasted, unused code in test * fix linter --------- Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> fix: infinite loop when calling extcodecopy on empty code (#151) upgrade to latest go-verkle fix: only update code in the tree if it's dirty (#174) fix: read-touch the code size and Keccak of the origin (#175) List of changes for converting a sepolia database (#182) * naive conversion rebased on top of beverly hills * changes for the sepolia shadow fork conversion * fixes to please the linter * fixes to please the linter Unified point cache (#180) * Unified point cache * Use cache for Try*Account * alter Trie interface to use caching for slots (#181) * alter Trie interface to use caching for slots * fix: use a lock to protect the point cache (#185) * use fastest non-master go-verkle version & pull trie/Verkle.go changes to use new api (#184) * mod: update to fastest go-verkle version today Signed-off-by: Ignacio Hagopian * trie/verkle: use new batch serialization api Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian * fix: TryDelete signature in unit tests --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian trie/utils: fix potential overflow (#191) * trie/utils: fix potential overflow Signed-off-by: Ignacio Hagopian * trie/utils: receive storage key as a byte slice Signed-off-by: Ignacio Hagopian * revert formatter changes Signed-off-by: Ignacio Hagopian * trie/utils: fix mod 256 Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian trie/utils: fix incorrect bigint assignment (#193) Signed-off-by: Ignacio Hagopian upgrade precomp link to fix CI fix: add missing code size&keccak leaves in empty accounts (#192) fixes to use the latest go-verkle@master (#197) * fixes to use the latest go-verkle@master * linter fixes * linter fixes for tests * fix: use jsign's go-verkle fix refactor: remove unused (*StateDB).GetXLittleEndian methods (#204) fix gas accounting issue in state_processor_test.go (#207) update go-verkle not to use StatelessNode anymore (#206) * update go-verkle not to use StatelessNode anymore * update go-verkle to latest refactor: move verkle gas accounting to its own block in TransitionDB (#208) fix a panic in deserializeVerkleProof if GetProofItems returns a nil ProofElements use the cachingDB instead of a custom VerkleDB (#209) * use the cachingDB instead of a custom VerkleDB * fix stack trace in LES remove holiman from CODEOWNERS as he gets too many emails read from tree in state object if the snapshot is nil (#205) add missing error checks for the root node type (#214) implement OpenStorageTrie for verkle trees (#210) * implement OpenStorageTrie for verkle trees * add a few comments for future maintenance * fix linter issue fix: copy balance leaf to new buffer in TryGetAccount (#217) implement some heretofore unimplemented iterator methods (#219) params: move verkle params to their own file (#228) fix: proper number of chunk evals (#215) overlay transition (#244) * overlay transition Fix some bugs identified in the code review Co-authored-by: Ignacio Hagopian Include base -> overlay key-values migration logic (#199) * mod: add go-verkle version with key-value migration new apis Signed-off-by: Ignacio Hagopian * core/stateprocessor: use constant for max number of migrated key-values Signed-off-by: Ignacio Hagopian * core: add base->overlay key-values migration logic Signed-off-by: Ignacio Hagopian * core: fix some compiler errors Signed-off-by: Ignacio Hagopian * trie: consider removing transition trie api in the future Signed-off-by: Ignacio Hagopian * mod: use latest go-verkle Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian fix some unit tests errors get convresion block from file fix compilation issues fix initialization issue in migrator fix: changes needed to run the first 28 blocks important sutff: fix the banner fix: use nonce instead of balance in nonce leaf (#202) fixes for performing the overlay transition (#203) * fixes for performing the overlay transition * fixes for the full replay * fix: deletion-and-recreation of EoA * fixes to replay 2M+ blocks * upgrade to go-verkle@master * fix: proper number of chunk evals * rewrite conversion loop to fix known issues changes to make replay work with the overlay method (#216) * fixes for performing the overlay transition fixes for the full replay fix: deletion-and-recreation of EoA fixes to replay 2M+ blocks upgrade to go-verkle@master fix: proper number of chunk evals rewrite conversion loop to fix known issues changes to make replay work with the overlay method fixes to replay 2M+ blocks update to latest go-verkle@master * use a PBSS-like scheme for internal nodes (#221) * use a PBSS-like scheme for internal nodes * a couple of fixes coming from debugging replay * fix: use an error to notify the transition tree that a deleted account was found in the overlay tree (#222) * fixes for pbss replay (#227) * fixes for pbss replay * trie/verkle: use capped batch size (#229) * trie/verkle: use capped batch size Signed-off-by: Ignacio Hagopian * trie/verkle: avoid path variable allocation per db.Put Signed-off-by: Ignacio Hagopian * don't keep more than 32 state root conversions in RAM (#230) --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> * cleanup some code * mod: update go-verkle Signed-off-by: Ignacio Hagopian * re-enable snapshot (#231) * re-enable cancun block / snapshot (#226) * clear storage conversion key upon translating account (#234) * clear storage conversion key upon translating account * mod: use latest go-verkle Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian * fix: self-deadlock with translated root map mutex (#236) * return compressed commitment as root commitment (#237) --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian fix first panic in *TransitionTrie.Copy() upgrade go-verkle to latest master mod: update go-verkle (#239) Signed-off-by: Ignacio Hagopian core: print state root every 100 blocks (#240) Signed-off-by: Ignacio Hagopian fix: only Commit the account trie (#242) fixes to get TestProcessVerkle to work with the overlay branch (#238) * fixes to get TestProcessVerkle to work with the overlay branch * fix all panics in verkle state processor test * fix proof verification move transition management to cachingDB * fix: mark the verkle transition as started if it's ended without being started * fix the verkle state processing test * fix linter errors * Add a function to clear verkle params for replay * fix: handle TransitionTrie in OpenStorageTrie * fix linter issue * fix the deleted account error (#247) * code cleanup (#248) * fix: don't error on a missing conversion.txt (#249) * Overlay Tree preimages exporting and usage (#246) * export overlay preimages tool Signed-off-by: Ignacio Hagopian * use preimages flat file in overlay tree migration logic Signed-off-by: Ignacio Hagopian * cmd/geth: add --roothash to overlay tree preimage exporting command Signed-off-by: Ignacio Hagopian * cleanup Signed-off-by: Ignacio Hagopian * review feedback Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian * fix: reduce the PR footprint (#250) * fix: don't fail when preimages.bin is missing (#251) * fix: don't fail when preimages.bin is missing * fix: don't open the preimages file when outside of transition --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian review changes remove replay-specific code --- .github/CODEOWNERS | 2 +- .github/workflows/go.yml | 48 ++++ cmd/geth/chaincmd.go | 38 +++ cmd/geth/main.go | 1 + cmd/geth/verkle.go | 255 ++++++++++++++++- cmd/utils/cmd.go | 81 ++++++ cmd/utils/flags.go | 5 + consensus/ethash/consensus.go | 16 ++ core/block_validator.go | 1 + core/blockchain.go | 58 ++++ core/chain_makers.go | 117 ++++++++ core/error.go | 5 + core/genesis.go | 31 +- core/genesis_test.go | 2 +- core/state/access_witness.go | 410 +++++++++++++++++++++++++++ core/state/database.go | 245 +++++++++++++++- core/state/iterator.go | 14 +- core/state/snapshot/snapshot.go | 14 + core/state/state_object.go | 4 +- core/state/statedb.go | 112 +++++++- core/state/statedb_test.go | 31 +- core/state/sync_test.go | 5 +- core/state/trie_prefetcher.go | 2 +- core/state_processor.go | 331 ++++++++++++++++++++++ core/state_processor_test.go | 121 ++++++++ core/state_transition.go | 40 +++ core/types/gen_account_rlp.go | 7 +- core/types/gen_header_rlp.go | 113 ++++++++ core/types/gen_log_rlp.go | 7 +- core/types/state_account.go | 5 + core/vm/common.go | 12 + core/vm/contract.go | 22 +- core/vm/evm.go | 45 ++- core/vm/gas_table.go | 82 +++++- core/vm/instructions.go | 135 ++++++++- core/vm/interface.go | 4 + core/vm/interpreter.go | 29 ++ core/vm/jump_table.go | 2 + core/vm/operations_acl.go | 19 +- eth/tracers/js/tracer_test.go | 3 + go.mod | 6 +- go.sum | 6 + internal/ethapi/api.go | 2 +- les/server_requests.go | 2 +- light/odr_test.go | 2 +- light/trie.go | 63 ++++- miner/worker.go | 6 + miner/worker_test.go | 19 ++ params/verkle_params.go | 36 +++ trie/database.go | 39 +++ trie/secure_trie.go | 4 + trie/transition.go | 201 +++++++++++++ trie/utils/verkle.go | 290 +++++++++++++++++++ trie/utils/verkle_test.go | 95 +++++++ trie/verkle.go | 482 ++++++++++++++++++++++++++++++++ trie/verkle_iterator.go | 218 +++++++++++++++ trie/verkle_iterator_test.go | 68 +++++ trie/verkle_test.go | 381 +++++++++++++++++++++++++ 58 files changed, 4338 insertions(+), 56 deletions(-) create mode 100644 .github/workflows/go.yml create mode 100644 core/state/access_witness.go create mode 100644 params/verkle_params.go create mode 100644 trie/transition.go create mode 100644 trie/utils/verkle.go create mode 100644 trie/utils/verkle_test.go create mode 100644 trie/verkle.go create mode 100644 trie/verkle_iterator.go create mode 100644 trie/verkle_iterator_test.go create mode 100644 trie/verkle_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index faf922df0161..f38d522b7edb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,7 +6,7 @@ accounts/scwallet @gballet accounts/abi @gballet @MariusVanDerWijden cmd/clef @holiman consensus @karalabe -core/ @karalabe @holiman @rjl493456442 +core/ @karalabe @rjl493456442 eth/ @karalabe @holiman @rjl493456442 eth/catalyst/ @gballet eth/tracers/ @s1na diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 000000000000..5ae526f1eedc --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,48 @@ +name: Go lint and test + +on: + push: + branches: [ master ] + pull_request: + branches: [ master, verkle-trie-proof-in-block-rebased, verkle-trie-post-merge, beverly-hills-head, 'verkle/replay-change-with-tree-group-tryupdate' ] + workflow_dispatch: + +jobs: + build: + runs-on: self-hosted + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + - name: Build + run: go build -v ./... + + lint: + runs-on: self-hosted + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + - name: Download golangci-lint + run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s latest + - name: Lint + run: ./bin/golangci-lint run + - name: Vet + run: go vet + + test: + runs-on: self-hosted + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + - name: Download precomputed points + run: wget -nv https://github.com/gballet/go-verkle/releases/download/banderwagonv3/precomp -Otrie/utils/precomp + - name: Test + run: go test ./... diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 41591ac138a8..8e195bcf964d 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -144,6 +144,17 @@ It's deprecated, please use "geth db import" instead. Description: ` The export-preimages command exports hash preimages to an RLP encoded stream. It's deprecated, please use "geth db export" instead. +`, + } + exportOverlayPreimagesCommand = &cli.Command{ + Action: exportOverlayPreimages, + Name: "export-overlay-preimages", + Usage: "Export the preimage in overlay tree migration order", + ArgsUsage: "", + Flags: flags.Merge([]cli.Flag{utils.TreeRootFlag}, utils.DatabasePathFlags), + Description: ` +The export-overlay-preimages command exports hash preimages to a flat file, in exactly +the expected order for the overlay tree migration. `, } dumpCommand = &cli.Command{ @@ -399,6 +410,33 @@ func exportPreimages(ctx *cli.Context) error { return nil } +// exportOverlayPreimages dumps the preimage data to a flat file. +func exportOverlayPreimages(ctx *cli.Context) error { + if ctx.Args().Len() < 1 { + utils.Fatalf("This command requires an argument.") + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, _ := utils.MakeChain(ctx, stack, true) + + var root common.Hash + if ctx.String(utils.TreeRootFlag.Name) != "" { + rootBytes := common.FromHex(ctx.String(utils.StartKeyFlag.Name)) + if len(rootBytes) != common.HashLength { + return fmt.Errorf("invalid root hash length") + } + root = common.BytesToHash(rootBytes) + } + + start := time.Now() + if err := utils.ExportOverlayPreimages(chain, ctx.Args().First(), root); err != nil { + utils.Fatalf("Export error: %v\n", err) + } + fmt.Printf("Export done in %v\n", time.Since(start)) + return nil +} + func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { db := utils.MakeChainDatabase(ctx, stack, true) var header *types.Header diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 0dcb38358f68..a239f88499a1 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -209,6 +209,7 @@ func init() { exportCommand, importPreimagesCommand, exportPreimagesCommand, + exportOverlayPreimagesCommand, removedbCommand, dumpCommand, dumpGenesisCommand, diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 9ba2b4167164..d1953697b9bd 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -18,17 +18,26 @@ package main import ( "bytes" + "encoding/binary" "encoding/hex" "errors" "fmt" "os" + "runtime" + "time" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + tutils "github.com/ethereum/go-ethereum/trie/utils" "github.com/gballet/go-verkle" + "github.com/holiman/uint256" cli "github.com/urfave/cli/v2" ) @@ -40,6 +49,20 @@ var ( Usage: "A set of experimental verkle tree management commands", Description: "", Subcommands: []*cli.Command{ + { + Name: "to-verkle", + Usage: "use the snapshot to compute a translation of a MPT into a verkle tree", + ArgsUsage: "", + Action: convertToVerkle, + Flags: flags.Merge([]cli.Flag{}, utils.NetworkFlags, utils.DatabasePathFlags), + Description: ` +geth verkle to-verkle +This command takes a snapshot and inserts its values in a fresh verkle tree. + +The argument is interpreted as the root hash. If none is provided, the latest +block is used. + `, + }, { Name: "verify", Usage: "verify the conversion of a MPT into a verkle tree", @@ -67,6 +90,228 @@ in which key1, key2, ... are expanded. } ) +func convertToVerkle(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chaindb := utils.MakeChainDatabase(ctx, stack, false) + if chaindb == nil { + return errors.New("nil chaindb") + } + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var ( + root common.Hash + err error + ) + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args().First()) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + log.Info("Start traversing the state", "root", root) + } else { + root = headBlock.Root() + log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) + } + + var ( + accounts int + lastReport time.Time + start = time.Now() + vRoot = verkle.New().(*verkle.InternalNode) + ) + + saveverkle := func(path []byte, node verkle.VerkleNode) { + node.Commit() + s, err := node.Serialize() + if err != nil { + panic(err) + } + if err := chaindb.Put(path, s); err != nil { + panic(err) + } + } + + snaptree, err := snapshot.New(snapshot.Config{CacheSize: 256}, chaindb, trie.NewDatabase(chaindb), root) + if err != nil { + return err + } + accIt, err := snaptree.AccountIterator(root, common.Hash{}) + if err != nil { + return err + } + defer accIt.Release() + + // root.FlushAtDepth(depth, saveverkle) + + // Process all accounts sequentially + for accIt.Next() { + accounts += 1 + acc, err := types.FullAccount(accIt.Account()) + if err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return err + } + + // Store the basic account data + var ( + nonce, balance, version, size [32]byte + newValues = make([][]byte, 256) + ) + newValues[0] = version[:] + newValues[1] = balance[:] + newValues[2] = nonce[:] + newValues[4] = version[:] // memory-saving trick: by default, an account has 0 size + binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) + for i, b := range acc.Balance.Bytes() { + balance[len(acc.Balance.Bytes())-1-i] = b + } + addr := rawdb.ReadPreimage(chaindb, accIt.Hash()) + if addr == nil { + return fmt.Errorf("could not find preimage for address %x %v %v", accIt.Hash(), acc, accIt.Error()) + } + addrPoint := tutils.EvaluateAddressPoint(addr) + stem := tutils.GetTreeKeyVersionWithEvaluatedAddress(addrPoint) + + // Store the account code if present + if !bytes.Equal(acc.CodeHash, types.EmptyRootHash[:]) { + code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) + chunks := trie.ChunkifyCode(code) + + for i := 0; i < 128 && i < len(chunks)/32; i++ { + newValues[128+i] = chunks[32*i : 32*(i+1)] + } + + for i := 128; i < len(chunks)/32; { + values := make([][]byte, 256) + chunkkey := tutils.GetTreeKeyCodeChunkWithEvaluatedAddress(addrPoint, uint256.NewInt(uint64(i))) + j := i + for ; (j-i) < 256 && j < len(chunks)/32; j++ { + values[(j-128)%256] = chunks[32*j : 32*(j+1)] + } + i = j + + // Otherwise, store the previous group in the tree with a + // stem insertion. + vRoot.InsertStem(chunkkey[:31], values, chaindb.Get) + } + + // Write the code size in the account header group + binary.LittleEndian.PutUint64(size[:8], uint64(len(code))) + } + newValues[3] = acc.CodeHash[:] + newValues[4] = size[:] + + // Save every slot into the tree + if acc.Root != types.EmptyRootHash { + var translatedStorage = map[string][][]byte{} + + storageIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{}) + if err != nil { + log.Error("Failed to open storage trie", "root", acc.Root, "error", err) + return err + } + for storageIt.Next() { + // The value is RLP-encoded, decode it + var ( + value []byte // slot value after RLP decoding + safeValue [32]byte // 32-byte aligned value + ) + if err := rlp.DecodeBytes(storageIt.Slot(), &value); err != nil { + return fmt.Errorf("error decoding bytes %x: %w", storageIt.Slot(), err) + } + copy(safeValue[32-len(value):], value) + + slotnr := rawdb.ReadPreimage(chaindb, storageIt.Hash()) + if slotnr == nil { + return fmt.Errorf("could not find preimage for slot %x", storageIt.Hash()) + } + + // if the slot belongs to the header group, store it there - and skip + // calculating the slot key. + slotnrbig := uint256.NewInt(0).SetBytes(slotnr) + if slotnrbig.Cmp(uint256.NewInt(64)) < 0 { + newValues[64+slotnr[31]] = safeValue[:] + continue + } + + // Slot not in the header group, get its tree key + slotkey := tutils.GetTreeKeyStorageSlotWithEvaluatedAddress(addrPoint, slotnr) + + // Create the group if need be + values := translatedStorage[string(slotkey[:31])] + if values == nil { + values = make([][]byte, 256) + } + + // Store value in group + values[slotkey[31]] = safeValue[:] + translatedStorage[string(slotkey[:31])] = values + + // Dump the stuff to disk if we ran out of space + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + if mem.Alloc > 25*1024*1024*1024 { + fmt.Println("Memory usage exceeded threshold, calling mitigation function") + for s, vs := range translatedStorage { + var k [31]byte + copy(k[:], []byte(s)) + // reminder that InsertStem will merge leaves + // if they exist. + vRoot.InsertStem(k[:31], vs, chaindb.Get) + } + translatedStorage = make(map[string][][]byte) + vRoot.FlushAtDepth(2, saveverkle) + } + } + for s, vs := range translatedStorage { + var k [31]byte + copy(k[:], []byte(s)) + vRoot.InsertStem(k[:31], vs, chaindb.Get) + } + storageIt.Release() + if storageIt.Error() != nil { + log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIt.Error()) + return storageIt.Error() + } + } + // Finish with storing the complete account header group inside the tree. + vRoot.InsertStem(stem[:31], newValues, chaindb.Get) + + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "accounts", accounts, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } + + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + if mem.Alloc > 25*1024*1024*1024 { + fmt.Println("Memory usage exceeded threshold, calling mitigation function") + vRoot.FlushAtDepth(2, saveverkle) + } + } + if accIt.Error() != nil { + log.Error("Failed to compute commitment", "root", root, "error", accIt.Error()) + return accIt.Error() + } + log.Info("Wrote all leaves", "accounts", accounts, "elapsed", common.PrettyDuration(time.Since(start))) + + vRoot.Commit() + vRoot.Flush(saveverkle) + + log.Info("Conversion complete", "root commitment", fmt.Sprintf("%x", vRoot.Commit().Bytes()), "accounts", accounts, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + // recurse into each child to ensure they can be loaded from the db. The tree isn't rebuilt // (only its nodes are loaded) so there is no need to flush them, the garbage collector should // take care of that for us. @@ -74,7 +319,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error switch node := root.(type) { case *verkle.InternalNode: for i, child := range node.Children() { - childC := child.Commit().Bytes() + childC := child.Commitment().Bytes() childS, err := resolver(childC[:]) if bytes.Equal(childC[:], zero[:]) { @@ -84,7 +329,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error return fmt.Errorf("could not find child %x in db: %w", childC, err) } // depth is set to 0, the tree isn't rebuilt so it's not a problem - childN, err := verkle.ParseNode(childS, 0, childC[:]) + childN, err := verkle.ParseNode(childS, 0) if err != nil { return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err) } @@ -144,7 +389,7 @@ func verifyVerkle(ctx *cli.Context) error { if err != nil { return err } - root, err := verkle.ParseNode(serializedRoot, 0, rootC[:]) + root, err := verkle.ParseNode(serializedRoot, 0) if err != nil { return err } @@ -193,7 +438,7 @@ func expandVerkle(ctx *cli.Context) error { if err != nil { return err } - root, err := verkle.ParseNode(serializedRoot, 0, rootC[:]) + root, err := verkle.ParseNode(serializedRoot, 0) if err != nil { return err } @@ -203,7 +448,7 @@ func expandVerkle(ctx *cli.Context) error { root.Get(key, chaindb.Get) } - if err := os.WriteFile("dump.dot", []byte(verkle.ToDot(root)), 0600); err != nil { + if err := os.WriteFile("dump.dot", []byte(verkle.ToDot(root)), 0o600); err != nil { log.Error("Failed to dump file", "err", err) } else { log.Info("Tree was dumped to file", "file", "dump.dot") diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 16b126057218..24da4911bc14 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -176,6 +176,18 @@ func ImportChain(chain *core.BlockChain, fn string) error { return err } } + // cpuProfile, err := os.Create("cpu.out") + // if err != nil { + // return fmt.Errorf("Error creating CPU profile: %v", err) + // } + // defer cpuProfile.Close() + // err = pprof.StartCPUProfile(cpuProfile) + // if err != nil { + // return fmt.Errorf("Error starting CPU profile: %v", err) + // } + // defer pprof.StopCPUProfile() + // params.ClearVerkleWitnessCosts() + stream := rlp.NewStream(reader, 0) // Run actual the import. @@ -374,6 +386,75 @@ func ExportPreimages(db ethdb.Database, fn string) error { return nil } +// ExportOverlayPreimages exports all known hash preimages into the specified file, +// in the same order as expected by the overlay tree migration. +func ExportOverlayPreimages(chain *core.BlockChain, fn string, root common.Hash) error { + log.Info("Exporting preimages", "file", fn) + + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + writer := bufio.NewWriter(fh) + defer writer.Flush() + + statedb, err := chain.State() + if err != nil { + return fmt.Errorf("failed to open statedb: %w", err) + } + + if root == (common.Hash{}) { + root = chain.CurrentBlock().Root + } + + accIt, err := statedb.Snaps().AccountIterator(root, common.Hash{}) + if err != nil { + return err + } + defer accIt.Release() + + count := 0 + for accIt.Next() { + acc, err := types.FullAccount(accIt.Account()) + if err != nil { + return fmt.Errorf("invalid account encountered during traversal: %s", err) + } + addr := rawdb.ReadPreimage(statedb.Database().DiskDB(), accIt.Hash()) + if len(addr) != 20 { + return fmt.Errorf("addr len is zero is not 32: %d", len(addr)) + } + if _, err := writer.Write(addr); err != nil { + return fmt.Errorf("failed to write addr preimage: %w", err) + } + + if acc.HasStorage() { + stIt, err := statedb.Snaps().StorageIterator(root, accIt.Hash(), common.Hash{}) + if err != nil { + return fmt.Errorf("failed to create storage iterator: %w", err) + } + for stIt.Next() { + slotnr := rawdb.ReadPreimage(statedb.Database().DiskDB(), stIt.Hash()) + if len(slotnr) != 32 { + return fmt.Errorf("slotnr not 32 len") + } + if _, err := writer.Write(slotnr); err != nil { + return fmt.Errorf("failed to write slotnr preimage: %w", err) + } + } + stIt.Release() + } + count++ + if count%100000 == 0 { + log.Info("Last exported account", "account", accIt.Hash()) + } + } + + log.Info("Exported preimages", "file", fn) + return nil +} + // exportHeader is used in the export/import flow. When we do an export, // the first element we output is the exportHeader. // Whenever a backwards-incompatible change is made, the Version header diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e0c7a42670e1..c92f49d432b6 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -216,6 +216,11 @@ var ( Usage: "Max number of elements (0 = no limit)", Value: 0, } + TreeRootFlag = &cli.StringFlag{ + Name: "roothash", + Usage: "Root hash of the tree (if empty, use the latest)", + Value: "", + } defaultSyncMode = ethconfig.Defaults.SyncMode SyncModeFlag = &flags.TextMarshalerFlag{ diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 8eb9863da1e2..81563f810705 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" "golang.org/x/crypto/sha3" ) @@ -564,10 +565,25 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Sub(r, header.Number) r.Mul(r, blockReward) r.Div(r, big8) + + if config.IsCancun(header.Number, header.Time) { + uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes()) + state.Witness().TouchAddressOnReadAndComputeGas(uncleCoinbase) + } state.AddBalance(uncle.Coinbase, r) r.Div(blockReward, big32) reward.Add(reward, r) } + if config.IsCancun(header.Number, header.Time) { + coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes()) + state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + coinbase[31] = utils.VersionLeafKey // mark version + state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + coinbase[31] = utils.NonceLeafKey // mark nonce + state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + coinbase[31] = utils.CodeKeccakLeafKey // mark code keccak + state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + } state.AddBalance(header.Coinbase, reward) } diff --git a/core/block_validator.go b/core/block_validator.go index 3c9ac3dc49d5..b1ceab9d5c6c 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -102,6 +102,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { return consensus.ErrUnknownAncestor } + fmt.Println("failure here") return consensus.ErrPrunedAncestor } return nil diff --git a/core/blockchain.go b/core/blockchain.go index 3952c31b688f..1e1a10f9bb98 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -18,11 +18,15 @@ package core import ( + "bufio" "errors" "fmt" "io" + "math" "math/big" + "os" "runtime" + "strconv" "strings" "sync" "sync/atomic" @@ -305,6 +309,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis } // Make sure the state associated with the block is available head := bc.CurrentBlock() + + // Declare the end of the verkle transition is need be + if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsCancun { + bc.stateCache.EndVerkleTransition() + } + if !bc.HasState(head.Root) { // Head state is missing, before the state recovery, find out the // disk layer point of snapshot(if it's enabled). Make sure the @@ -401,6 +411,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis Recovery: recover, NoBuild: bc.cacheConfig.SnapshotNoBuild, AsyncBuild: !bc.cacheConfig.SnapshotWait, + Verkle: chainConfig.IsCancun(head.Number, head.Time), } bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root) } @@ -1508,6 +1519,30 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { return bc.insertChain(chain, true) } +func findVerkleConversionBlock() (uint64, error) { + if _, err := os.Stat("conversion.txt"); os.IsNotExist(err) { + return math.MaxUint64, nil + } + + f, err := os.Open("conversion.txt") + if err != nil { + log.Error("Failed to open conversion.txt", "err", err) + return 0, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + scanner.Scan() + conversionBlock, err := strconv.ParseUint(scanner.Text(), 10, 64) + if err != nil { + log.Error("Failed to parse conversionBlock", "err", err) + return 0, err + } + log.Info("Found conversion block info", "conversionBlock", conversionBlock) + + return conversionBlock, nil +} + // insertChain is the internal implementation of InsertChain, which assumes that // 1) chains are contiguous, and 2) The chain mutex is held. // @@ -1522,6 +1557,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) return 0, nil } + conversionBlock, err := findVerkleConversionBlock() + if err != nil { + return 0, err + } + // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain) @@ -1703,6 +1743,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } + + if parent.Number.Uint64() == conversionBlock { + bc.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), &parent.Time) + } statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) if err != nil { return it.index, err @@ -2332,6 +2376,8 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { return false } +var emptyVerkleRoot common.Hash + // indexBlocks reindexes or unindexes transactions depending on user configuration func (bc *BlockChain) indexBlocks(tail *uint64, head uint64, done chan struct{}) { defer func() { close(done) }() @@ -2483,3 +2529,15 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) { func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } + +func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64) { + bc.stateCache.StartVerkleTransition(originalRoot, translatedRoot, chainConfig, cancunTime) +} + +func (bc *BlockChain) EndVerkleTransition() { + bc.stateCache.EndVerkleTransition() +} + +func (bc *BlockChain) AddRootTranslation(originalRoot, translatedRoot common.Hash) { + bc.stateCache.AddRootTranslation(originalRoot, translatedRoot) +} diff --git a/core/chain_makers.go b/core/chain_makers.go index 4e8e80b92b56..87dff7b564d3 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -26,11 +26,13 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/gballet/go-verkle" ) // BlockGen creates blocks for testing. @@ -355,10 +357,125 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, if err != nil { panic(err) } + if genesis.Config != nil && genesis.Config.IsCancun(genesis.ToBlock().Number(), genesis.ToBlock().Time()) { + blocks, receipts, _, _ := GenerateVerkleChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen) + return db, blocks, receipts + } blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen) return db, blocks, receipts } +func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) { + if config == nil { + config = params.TestChainConfig + } + proofs := make([]*verkle.VerkleProof, 0, n) + keyvals := make([]verkle.StateDiff, 0, n) + blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) + chainreader := &fakeChainReader{config: config} + var preStateTrie *trie.VerkleTrie + genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { + b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} + b.header = makeHeader(chainreader, parent, statedb, b.engine) + preState := statedb.Copy() + fmt.Println("prestate", preState.GetTrie().(*trie.VerkleTrie).ToDot()) + + // Mutate the state and block according to any hard-fork specs + if daoBlock := config.DAOForkBlock; daoBlock != nil { + limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) + if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 { + if config.DAOForkSupport { + b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra) + } + } + } + if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 { + misc.ApplyDAOHardFork(statedb) + } + // Execute any user modifications to the block + if gen != nil { + gen(i, b) + } + if b.engine != nil { + // Finalize and seal the block + block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts, b.withdrawals) + if err != nil { + panic(err) + } + + // Write state changes to db + root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number)) + if err != nil { + panic(fmt.Sprintf("state write error: %v", err)) + } + if err := statedb.Database().TrieDB().Commit(root, false); err != nil { + panic(fmt.Sprintf("trie write error: %v", err)) + } + + // Generate an associated verkle proof + tr := preState.GetTrie() + if !tr.IsVerkle() { + panic("tree should be verkle") + } + + vtr := tr.(*trie.VerkleTrie) + // Make sure all keys are resolved before + // building the proof. Ultimately, node + // resolution can be done with a prefetcher + // or from GetCommitmentsAlongPath. + kvs := make(map[string][]byte) + keys := statedb.Witness().Keys() + for _, key := range keys { + v, err := vtr.GetWithHashedKey(key) + if err != nil { + panic(err) + } + kvs[string(key)] = v + } + + // Initialize the preStateTrie if it is nil, this should + // correspond to the genesis block. This is a workaround + // needed until the main verkle PR is rebased on top of + // PBSS. + if preStateTrie == nil { + preStateTrie = vtr + } + + vtr.Hash() + p, k, err := preStateTrie.ProveAndSerialize(statedb.Witness().Keys(), kvs) + if err != nil { + panic(err) + } + proofs = append(proofs, p) + keyvals = append(keyvals, k) + + // save the current state of the trie for producing the proof for the next block, + // since reading it from disk is broken with the intermediate PBSS-like system we + // have: it will read the post-state as this is the only state present on disk. + // This is a workaround needed until the main verkle PR is rebased on top of PBSS. + preStateTrie = statedb.GetTrie().(*trie.VerkleTrie) + + return block, b.receipts + } + return nil, nil + } + var snaps *snapshot.Tree + for i := 0; i < n; i++ { + triedb := state.NewDatabaseWithConfig(db, nil) + triedb.EndVerkleTransition() + statedb, err := state.New(parent.Root(), triedb, snaps) + if err != nil { + panic(fmt.Sprintf("could not find state for block %d: err=%v, parent root=%x", i, err, parent.Root())) + } + block, receipt := genblock(i, parent, statedb) + blocks[i] = block + receipts[i] = receipt + parent = block + snaps = statedb.Snaps() + } + return blocks, receipts, proofs, keyvals +} + func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { var time uint64 if parent.Time() == 0 { diff --git a/core/error.go b/core/error.go index 4214ed207a91..2d9fa5463ce7 100644 --- a/core/error.go +++ b/core/error.go @@ -67,6 +67,11 @@ var ( // than init code size limit. ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded") + // ErrInsufficientBalanceWitness is returned if the transaction sender has enough + // funds to cover the transfer, but not enough to pay for witness access/modification + // costs for the transaction + ErrInsufficientBalanceWitness = errors.New("insufficient funds to cover witness access costs for transaction") + // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") diff --git a/core/genesis.go b/core/genesis.go index 33716d0b61df..5c724342f5e2 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -121,10 +121,15 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error { } // deriveHash computes the state root according to the genesis specification. -func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { +func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig) (common.Hash, error) { // Create an ephemeral in-memory database for computing hash, // all the derived states will be discarded to not pollute disk. db := state.NewDatabase(rawdb.NewMemoryDatabase()) + // XXX check this is the case + // TODO remove the nil config check once we have rebased, it should never be nil + if cfg != nil && cfg.IsCancun(big.NewInt(int64(0)), 0 /* XXX */) { + db.EndVerkleTransition() + } statedb, err := state.New(types.EmptyRootHash, db, nil) if err != nil { return common.Hash{}, err @@ -143,11 +148,17 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { // flush is very similar with deriveHash, but the main difference is // all the generated states will be persisted into the given database. // Also, the genesis state specification will be flushed as well. -func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { +func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash, cfg *params.ChainConfig) error { statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { return err } + + // End the verkle conversion at genesis if the fork block is 0 + if cfg != nil && cfg.IsCancun(big.NewInt(int64(0)), 0 /* XXX */) { + statedb.Database().EndVerkleTransition() + } + for addr, account := range *ga { statedb.AddBalance(addr, account.Balance) statedb.SetCode(addr, account.Code) @@ -171,19 +182,25 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas if err != nil { return err } + rawdb.WriteGenesisStateSpec(db, blockhash, blob) - return nil + return statedb.Cap(root) // XXX check this is still necessary } // CommitGenesisState loads the stored genesis state with the given block // hash and commits it into the provided trie database. func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { var alloc GenesisAlloc + var config *params.ChainConfig blob := rawdb.ReadGenesisStateSpec(db, blockhash) if len(blob) != 0 { if err := alloc.UnmarshalJSON(blob); err != nil { return err } + config = rawdb.ReadChainConfig(db, blockhash) + if config == nil { + return errors.New("genesis config missing from db") + } } else { // Genesis allocation is missing and there are several possibilities: // the node is legacy which doesn't persist the genesis allocation or @@ -201,11 +218,12 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash comm } if genesis != nil { alloc = genesis.Alloc + config = genesis.Config } else { return errors.New("not found") } } - return alloc.flush(db, triedb, blockhash) + return alloc.flush(db, triedb, blockhash, config) } // GenesisAccount is an account in the state of the genesis block. @@ -326,6 +344,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) + if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { if genesis == nil { genesis = DefaultGenesisBlock() @@ -438,7 +457,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { // ToBlock returns the genesis block according to genesis specification. func (g *Genesis) ToBlock() *types.Block { - root, err := g.Alloc.deriveHash() + root, err := g.Alloc.deriveHash(g.Config) if err != nil { panic(err) } @@ -510,7 +529,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // All the checks has passed, flush the states derived from the genesis // specification as well as the specification itself into the provided // database. - if err := g.Alloc.flush(db, triedb, block.Hash()); err != nil { + if err := g.Alloc.flush(db, triedb, block.Hash(), g.Config); err != nil { return nil, err } rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) diff --git a/core/genesis_test.go b/core/genesis_test.go index 723d1e476bf1..c6df6f59a3ef 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -219,7 +219,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) { {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}}, } - hash, _ = alloc.deriveHash() + hash, _ = alloc.deriveHash(¶ms.ChainConfig{}) ) blob, _ := json.Marshal(alloc) rawdb.WriteGenesisStateSpec(db, hash, blob) diff --git a/core/state/access_witness.go b/core/state/access_witness.go new file mode 100644 index 000000000000..522b5f308096 --- /dev/null +++ b/core/state/access_witness.go @@ -0,0 +1,410 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" +) + +type VerkleStem [31]byte + +// Mode specifies how a tree location has been accessed +// for the byte value: +// * the first bit is set if the branch has been edited +// * the second bit is set if the branch has been read +type Mode byte + +const ( + AccessWitnessReadFlag = Mode(1) + AccessWitnessWriteFlag = Mode(2) +) + +// AccessWitness lists the locations of the state that are being accessed +// during the production of a block. +type AccessWitness struct { + // Branches flags if a given branch has been loaded + Branches map[VerkleStem]Mode + + // Chunks contains the initial value of each address + Chunks map[common.Hash]Mode + + // InitialValue contains either `nil` if the location + // didn't exist before it was accessed, or the value + // that a location had before the execution of this + // block. + InitialValue map[string][]byte + + // Caches which code chunks have been accessed, in order + // to reduce the number of times that GetTreeKeyCodeChunk + // is called. + CodeLocations map[string]map[uint64]struct{} + + statedb *StateDB +} + +func NewAccessWitness(statedb *StateDB) *AccessWitness { + return &AccessWitness{ + Branches: make(map[VerkleStem]Mode), + Chunks: make(map[common.Hash]Mode), + InitialValue: make(map[string][]byte), + CodeLocations: make(map[string]map[uint64]struct{}), + statedb: statedb, + } +} + +func (aw *AccessWitness) HasCodeChunk(addr []byte, chunknr uint64) bool { + if locs, ok := aw.CodeLocations[string(addr)]; ok { + if _, ok = locs[chunknr]; ok { + return true + } + } + + return false +} + +// SetCodeLeafValue does the same thing as SetLeafValue, but for code chunks. It +// maintains a cache of which (address, chunk) were calculated, in order to avoid +// calling GetTreeKey more than once per chunk. +func (aw *AccessWitness) SetCachedCodeChunk(addr []byte, chunknr uint64) { + if locs, ok := aw.CodeLocations[string(addr)]; ok { + if _, ok = locs[chunknr]; ok { + return + } + } else { + aw.CodeLocations[string(addr)] = map[uint64]struct{}{} + } + + aw.CodeLocations[string(addr)][chunknr] = struct{}{} +} + +func (aw *AccessWitness) touchAddressOnWrite(addr []byte) (bool, bool, bool) { + var stem VerkleStem + var stemWrite, chunkWrite, chunkFill bool + copy(stem[:], addr[:31]) + + // NOTE: stem, selector access flags already exist in their + // respective maps because this function is called at the end of + // processing a read access event + + if (aw.Branches[stem] & AccessWitnessWriteFlag) == 0 { + stemWrite = true + aw.Branches[stem] |= AccessWitnessWriteFlag + } + + chunkValue := aw.Chunks[common.BytesToHash(addr)] + // if chunkValue.mode XOR AccessWitnessWriteFlag + if ((chunkValue & AccessWitnessWriteFlag) == 0) && ((chunkValue | AccessWitnessWriteFlag) != 0) { + chunkWrite = true + chunkValue |= AccessWitnessWriteFlag + aw.Chunks[common.BytesToHash(addr)] = chunkValue + } + + // TODO charge chunk filling costs if the leaf was previously empty in the state + /* + if chunkWrite { + if _, err := verkleDb.TryGet(addr); err != nil { + chunkFill = true + } + } + */ + + return stemWrite, chunkWrite, chunkFill +} + +// TouchAddress adds any missing addr to the witness and returns respectively +// true if the stem or the stub weren't arleady present. +func (aw *AccessWitness) touchAddress(addr []byte, isWrite bool) (bool, bool, bool, bool, bool) { + var ( + stem [31]byte + stemRead, selectorRead bool + stemWrite, selectorWrite, chunkFill bool + ) + copy(stem[:], addr[:31]) + + // Check for the presence of the stem + if _, hasStem := aw.Branches[stem]; !hasStem { + stemRead = true + aw.Branches[stem] = AccessWitnessReadFlag + } + + // Check for the presence of the leaf selector + if _, hasSelector := aw.Chunks[common.BytesToHash(addr)]; !hasSelector { + selectorRead = true + aw.Chunks[common.BytesToHash(addr)] = AccessWitnessReadFlag + } + + if isWrite { + stemWrite, selectorWrite, chunkFill = aw.touchAddressOnWrite(addr) + } + + return stemRead, selectorRead, stemWrite, selectorWrite, chunkFill +} + +func (aw *AccessWitness) touchAddressAndChargeGas(addr []byte, isWrite bool) uint64 { + var gas uint64 + + stemRead, selectorRead, stemWrite, selectorWrite, selectorFill := aw.touchAddress(addr, isWrite) + + if stemRead { + gas += params.WitnessBranchReadCost + } + if selectorRead { + gas += params.WitnessChunkReadCost + } + if stemWrite { + gas += params.WitnessBranchWriteCost + } + if selectorWrite { + gas += params.WitnessChunkWriteCost + } + if selectorFill { + gas += params.WitnessChunkFillCost + } + + return gas +} + +func (aw *AccessWitness) TouchAddressOnWriteAndComputeGas(addr []byte) uint64 { + return aw.touchAddressAndChargeGas(addr, true) +} + +func (aw *AccessWitness) TouchAddressOnReadAndComputeGas(addr []byte) uint64 { + return aw.touchAddressAndChargeGas(addr, false) +} + +// Merge is used to merge the witness that got generated during the execution +// of a tx, with the accumulation of witnesses that were generated during the +// execution of all the txs preceding this one in a given block. +func (aw *AccessWitness) Merge(other *AccessWitness) { + for k := range other.Branches { + if _, ok := aw.Branches[k]; !ok { + aw.Branches[k] = other.Branches[k] + } + } + + for k, chunk := range other.Chunks { + if _, ok := aw.Chunks[k]; !ok { + aw.Chunks[k] = chunk + } + } + + for k, v := range other.InitialValue { + if _, ok := aw.InitialValue[k]; !ok { + aw.InitialValue[k] = v + } + } + + // TODO see if merging improves performance + //for k, v := range other.addrToPoint { + //if _, ok := aw.addrToPoint[k]; !ok { + //aw.addrToPoint[k] = v + //} + //} +} + +// Key returns, predictably, the list of keys that were touched during the +// buildup of the access witness. +func (aw *AccessWitness) Keys() [][]byte { + keys := make([][]byte, 0, len(aw.Chunks)) + for key := range aw.Chunks { + var k [32]byte + copy(k[:], key[:]) + keys = append(keys, k[:]) + } + return keys +} + +func (aw *AccessWitness) KeyVals() map[string][]byte { + result := make(map[string][]byte) + for k, v := range aw.InitialValue { + result[k] = v + } + return result +} + +func (aw *AccessWitness) Copy() *AccessWitness { + naw := &AccessWitness{ + Branches: make(map[VerkleStem]Mode), + Chunks: make(map[common.Hash]Mode), + InitialValue: make(map[string][]byte), + } + + naw.Merge(aw) + + return naw +} + +func (aw *AccessWitness) GetTreeKeyVersionCached(addr []byte) []byte { + return aw.statedb.db.(*cachingDB).addrToPoint.GetTreeKeyVersionCached(addr) +} + +func (aw *AccessWitness) TouchAndChargeProofOfAbsence(addr []byte) uint64 { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := aw.GetTreeKeyVersionCached(addr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnReadAndComputeGas(versionkey) + gas += aw.TouchAddressOnReadAndComputeGas(balancekey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(noncekey[:]) + return gas +} + +func (aw *AccessWitness) TouchAndChargeMessageCall(addr []byte) uint64 { + var ( + gas uint64 + cskey [32]byte + ) + // Only evaluate the polynomial once + versionkey := aw.GetTreeKeyVersionCached(addr[:]) + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + gas += aw.TouchAddressOnReadAndComputeGas(versionkey) + gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + return gas +} + +func (aw *AccessWitness) TouchAndChargeValueTransfer(callerAddr, targetAddr []byte) uint64 { + var gas uint64 + gas += aw.TouchAddressOnWriteAndComputeGas(utils.GetTreeKeyBalance(callerAddr[:])) + gas += aw.TouchAddressOnWriteAndComputeGas(utils.GetTreeKeyBalance(targetAddr[:])) + return gas +} + +// TouchAndChargeContractCreateInit charges access costs to initiate +// a contract creation +func (aw *AccessWitness) TouchAndChargeContractCreateInit(addr []byte, createSendsValue bool) uint64 { + var ( + balancekey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := aw.GetTreeKeyVersionCached(addr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnWriteAndComputeGas(versionkey) + gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) + if createSendsValue { + gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + } + gas += aw.TouchAddressOnWriteAndComputeGas(ckkey[:]) + return gas +} + +// TouchAndChargeContractCreateCompleted charges access access costs after +// the completion of a contract creation to populate the created account in +// the tree +func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte, withValue bool) uint64 { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := aw.GetTreeKeyVersionCached(addr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnWriteAndComputeGas(versionkey) + gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(cskey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(ckkey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) + return gas +} + +func (aw *AccessWitness) TouchTxOriginAndComputeGas(originAddr []byte) uint64 { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := aw.GetTreeKeyVersionCached(originAddr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnReadAndComputeGas(versionkey) + gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + + return gas +} + +func (aw *AccessWitness) TouchTxExistingAndComputeGas(targetAddr []byte, sendsValue bool) uint64 { + var ( + balancekey, cskey, ckkey, noncekey [32]byte + gas uint64 + ) + + // Only evaluate the polynomial once + versionkey := aw.GetTreeKeyVersionCached(targetAddr[:]) + copy(balancekey[:], versionkey) + balancekey[31] = utils.BalanceLeafKey + copy(noncekey[:], versionkey) + noncekey[31] = utils.NonceLeafKey + copy(cskey[:], versionkey) + cskey[31] = utils.CodeSizeLeafKey + copy(ckkey[:], versionkey) + ckkey[31] = utils.CodeKeccakLeafKey + + gas += aw.TouchAddressOnReadAndComputeGas(versionkey) + gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(noncekey[:]) + gas += aw.TouchAddressOnReadAndComputeGas(balancekey[:]) + + if sendsValue { + gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + } + return gas +} diff --git a/core/state/database.go b/core/state/database.go index a3b6322ae313..7d7b3b14572f 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -19,6 +19,7 @@ package state import ( "errors" "fmt" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" @@ -26,8 +27,11 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" ) const ( @@ -44,7 +48,7 @@ type Database interface { OpenTrie(root common.Hash) (Trie, error) // OpenStorageTrie opens the storage trie of an account. - OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) + OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, main Trie) (Trie, error) // CopyTrie returns an independent copy of the given trie. CopyTrie(Trie) Trie @@ -60,6 +64,34 @@ type Database interface { // TrieDB retrieves the low level trie database used for data storage. TrieDB() *trie.Database + + StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64) + + EndVerkleTransition() + + InTransition() bool + + Transitioned() bool + + SetCurrentSlotHash(hash common.Hash) + + GetCurrentAccountAddress() *common.Address + + SetCurrentAccountAddress(common.Address) + + GetCurrentAccountHash() common.Hash + + GetCurrentSlotHash() common.Hash + + SetStorageProcessed(bool) + + GetStorageProcessed() bool + + GetCurrentPreimageOffset() int64 + + SetCurrentPreimageOffset(int64) + + AddRootTranslation(originalRoot, translatedRoot common.Hash) } // Trie is a Ethereum Merkle Patricia trie. @@ -130,6 +162,9 @@ type Trie interface { // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. Prove(key []byte, proofDb ethdb.KeyValueWriter) error + + // IsVerkle returns true if the trie is verkle-tree based + IsVerkle() bool } // NewDatabase creates a backing store for state. The returned database is safe for @@ -148,6 +183,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: trie.NewDatabaseWithConfig(db, config), + addrToPoint: utils.NewPointCache(), } } @@ -158,18 +194,98 @@ func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: triedb, + addrToPoint: utils.NewPointCache(), } } +func (db *cachingDB) InTransition() bool { + return db.started && !db.ended +} + +func (db *cachingDB) Transitioned() bool { + return db.ended +} + +// Fork implements the fork +func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64) { + fmt.Println(` + __________.__ .__ .__ __ .__ .__ ____ + \__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ __ _ _|__| ____ / ___\ ______ + | | | | \_/ __ \ _/ __ \| | _/ __ \\____ \| | \\__ \ / \ __\ | | \\__ \ / ___/ \ \/ \/ | |/ \ / /_/ / ___/ + | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ \ /| | | \\___ /\___ \ + |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ \/\_/ |__|___| /_____//_____/ + |__|`) + db.started = true + db.AddTranslation(originalRoot, translatedRoot) + db.baseRoot = originalRoot + // initialize so that the first storage-less accounts are processed + db.StorageProcessed = true + chainConfig.CancunTime = cancunTime +} + +func (db *cachingDB) EndVerkleTransition() { + if !db.started { + db.started = true + } + + fmt.Println(` + __________.__ .__ .__ __ .__ .__ .___ .___ + \__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ | | _____ ____ __| _/____ __| _/ + | | | | \_/ __ \ _/ __ \| | _/ __ \\____ \| | \\__ \ / \ __\ | | \\__ \ / ___/ | | \__ \ / \ / __ _/ __ \ / __ | + | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ | |__/ __ \| | / /_/ \ ___// /_/ | + |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ |____(____ |___| \____ |\___ \____ | + |__|`) + db.ended = true +} + +func (db *cachingDB) AddTranslation(orig, trans common.Hash) { + // TODO make this persistent + db.translatedRootsLock.Lock() + defer db.translatedRootsLock.Unlock() + db.translatedRoots[db.translationIndex] = trans + db.origRoots[db.translationIndex] = orig + db.translationIndex = (db.translationIndex + 1) % len(db.translatedRoots) +} + +func (db *cachingDB) getTranslation(orig common.Hash) common.Hash { + db.translatedRootsLock.RLock() + defer db.translatedRootsLock.RUnlock() + for i, o := range db.origRoots { + if o == orig { + return db.translatedRoots[i] + } + } + return common.Hash{} +} + type cachingDB struct { disk ethdb.KeyValueStore codeSizeCache *lru.Cache[common.Hash, int] codeCache *lru.SizeConstrainedCache[common.Hash, []byte] triedb *trie.Database + + // Verkle specific fields + // TODO ensure that this info is in the DB + started, ended bool + translatedRoots [32]common.Hash // hash of the translated root, for opening + origRoots [32]common.Hash + translationIndex int + translatedRootsLock sync.RWMutex + + addrToPoint *utils.PointCache + + baseRoot common.Hash // hash of the read-only base tree + CurrentAccountAddress *common.Address // addresss of the last translated account + CurrentSlotHash common.Hash // hash of the last translated storage slot + CurrentPreimageOffset int64 // next byte to read from the preimage file + + // Mark whether the storage for an account has been processed. This is useful if the + // maximum number of leaves of the conversion is reached before the whole storage is + // processed. + StorageProcessed bool } -// OpenTrie opens the main account trie at a specific root hash. -func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { +func (db *cachingDB) openMPTTrie(root common.Hash) (Trie, error) { tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) if err != nil { return nil, err @@ -177,8 +293,57 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { return tr, nil } -// OpenStorageTrie opens the storage trie of an account. -func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) { +func (db *cachingDB) openVKTrie(root common.Hash) (Trie, error) { + payload, err := db.DiskDB().Get(trie.FlatDBVerkleNodeKeyPrefix) + if err != nil { + return trie.NewVerkleTrie(verkle.New(), db.triedb, db.addrToPoint, db.ended), nil + } + + r, err := verkle.ParseNode(payload, 0) + if err != nil { + panic(err) + } + return trie.NewVerkleTrie(r, db.triedb, db.addrToPoint, db.ended), err +} + +// OpenTrie opens the main account trie at a specific root hash. +func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { + var ( + mpt Trie + err error + ) + + if db.started { + vkt, err := db.openVKTrie(db.getTranslation(root)) + if err != nil { + return nil, err + } + + // If the verkle conversion has ended, return a single + // verkle trie. + if db.ended { + return vkt, nil + } + + // Otherwise, return a transition trie, with a base MPT + // trie and an overlay, verkle trie. + mpt, err = db.openMPTTrie(db.baseRoot) + if err != nil { + return nil, err + } + + return trie.NewTransitionTree(mpt.(*trie.SecureTrie), vkt.(*trie.VerkleTrie), false), nil + } else { + mpt, err = db.openMPTTrie(root) + if err != nil { + return nil, err + } + } + + return mpt, nil +} + +func (db *cachingDB) openStorageMPTrie(stateRoot common.Hash, address common.Address, root common.Hash, _ Trie) (Trie, error) { tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb) if err != nil { return nil, err @@ -186,11 +351,33 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre return tr, nil } +// OpenStorageTrie opens the storage trie of an account +func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) { + mpt, err := db.openStorageMPTrie(stateRoot, address, root, nil) + if db.started && err == nil { + // Return a "storage trie" that is an adapter between the storge MPT + // and the unique verkle tree. + switch self := self.(type) { + case *trie.VerkleTrie: + return trie.NewTransitionTree(mpt.(*trie.SecureTrie), self, true), nil + case *trie.TransitionTrie: + return trie.NewTransitionTree(mpt.(*trie.SecureTrie), self.Overlay(), true), nil + default: + panic("unexpected trie type") + } + } + return mpt, err +} + // CopyTrie returns an independent copy of the given trie. func (db *cachingDB) CopyTrie(t Trie) Trie { switch t := t.(type) { case *trie.StateTrie: return t.Copy() + case *trie.TransitionTrie: + return t.Copy() + case *trie.VerkleTrie: + return t.Copy() default: panic(fmt.Errorf("unknown trie type %T", t)) } @@ -246,3 +433,51 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore { func (db *cachingDB) TrieDB() *trie.Database { return db.triedb } + +func (db *cachingDB) GetTreeKeyHeader(addr []byte) *verkle.Point { + return db.addrToPoint.GetTreeKeyHeader(addr) +} + +func (db *cachingDB) SetCurrentAccountAddress(addr common.Address) { + db.CurrentAccountAddress = &addr +} + +func (db *cachingDB) GetCurrentAccountHash() common.Hash { + var addrHash common.Hash + if db.CurrentAccountAddress != nil { + addrHash = crypto.Keccak256Hash(db.CurrentAccountAddress[:]) + } + return addrHash +} + +func (db *cachingDB) GetCurrentAccountAddress() *common.Address { + return db.CurrentAccountAddress +} + +func (db *cachingDB) GetCurrentPreimageOffset() int64 { + return db.CurrentPreimageOffset +} + +func (db *cachingDB) SetCurrentPreimageOffset(offset int64) { + db.CurrentPreimageOffset = offset +} + +func (db *cachingDB) SetCurrentSlotHash(hash common.Hash) { + db.CurrentSlotHash = hash +} + +func (db *cachingDB) GetCurrentSlotHash() common.Hash { + return db.CurrentSlotHash +} + +func (db *cachingDB) SetStorageProcessed(processed bool) { + db.StorageProcessed = processed +} + +func (db *cachingDB) GetStorageProcessed() bool { + return db.StorageProcessed +} + +func (db *cachingDB) AddRootTranslation(originalRoot, translatedRoot common.Hash) { + db.AddTranslation(originalRoot, translatedRoot) +} diff --git a/core/state/iterator.go b/core/state/iterator.go index bb9af082061f..26846730d10e 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -82,6 +82,15 @@ func (it *nodeIterator) step() error { if err != nil { return err } + + // If the trie is a verkle trie, then the data and state + // are the same tree, and as a result both iterators are + // the same. This is a hack meant for both tree types to + // work. + // XXX check if this is still needed + if _, ok := it.state.trie.(*trie.VerkleTrie); ok { + it.dataIt = it.stateIt + } } // If we had data nodes previously, we surely have at least state nodes if it.dataIt != nil { @@ -106,10 +115,11 @@ func (it *nodeIterator) step() error { it.state, it.stateIt = nil, nil return nil } - // If the state trie node is an internal entry, leave as is + // If the state trie node is an internal entry, leave as is. if !it.stateIt.Leaf() { return nil } + // Otherwise we've reached an account node, initiate data iteration var account types.StateAccount if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { @@ -123,7 +133,7 @@ func (it *nodeIterator) step() error { address := common.BytesToAddress(preimage) // Traverse the storage slots belong to the account - dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root) + dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root, nil) if err != nil { return err } diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index efc0fc26afdf..ed01170941c1 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -23,6 +23,7 @@ import ( "fmt" "sync" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" @@ -154,6 +155,7 @@ type Config struct { Recovery bool // Indicator that the snapshots is in the recovery mode NoBuild bool // Indicator that the snapshots generation is disallowed AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously + Verkle bool // True if verkle trees are enabled } // Tree is an Ethereum state snapshot tree. It consists of one persistent base @@ -213,6 +215,18 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root if err != nil { log.Warn("Failed to load snapshot", "err", err) if !config.NoBuild { + if config.Verkle { + snap.layers = map[common.Hash]snapshot{ + root: &diskLayer{ + diskdb: diskdb, + triedb: triedb, + root: root, + cache: fastcache.New(config.CacheSize * 1024 * 1024), + }, + } + return snap, nil + } + log.Warn("Failed to load snapshot, regenerating", "err", err) snap.Rebuild(root) return snap, nil } diff --git a/core/state/state_object.go b/core/state/state_object.go index cd72f3fb9b91..a250d48ffabb 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -102,6 +102,7 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s if acct == nil { acct = types.NewEmptyStateAccount() } + return &stateObject{ db: db, address: address, @@ -145,7 +146,7 @@ func (s *stateObject) getTrie() (Trie, error) { s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) } if s.trie == nil { - tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root) + tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) if err != nil { return nil, err } @@ -222,6 +223,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { } value.SetBytes(val) } + s.originStorage[key] = value return value } diff --git a/core/state/statedb.go b/core/state/statedb.go index fdaeacc6b3d9..2a61ea7f1174 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,6 +18,7 @@ package state import ( + "encoding/binary" "errors" "fmt" "math/big" @@ -36,6 +37,9 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) type revision struct { @@ -118,6 +122,9 @@ type StateDB struct { // Transient storage transientStorage transientStorage + // Verkle witness + witness *AccessWitness + // Journal of state modifications. This is the backbone of // Snapshot and RevertToSnapshot. journal *journal @@ -170,12 +177,50 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) transientStorage: newTransientStorage(), hasher: crypto.NewKeccakState(), } + if tr.IsVerkle() { + sdb.witness = NewAccessWitness(sdb) + if sdb.snaps == nil { + snapconfig := snapshot.Config{ + CacheSize: 256, + Recovery: false, + NoBuild: false, + AsyncBuild: false, + Verkle: true, + } + sdb.snaps, err = snapshot.New(snapconfig, db.DiskDB(), db.TrieDB(), root) + if err != nil { + return nil, err + } + } + } if sdb.snaps != nil { - sdb.snap = sdb.snaps.Snapshot(root) + if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil { + if db, ok := db.(*cachingDB); ok { + trans := db.getTranslation(root) + if trans != (common.Hash{}) { + sdb.snap = sdb.snaps.Snapshot(trans) + } + } + } } return sdb, nil } +func (s *StateDB) Snaps() *snapshot.Tree { + return s.snaps +} + +func (s *StateDB) Witness() *AccessWitness { + if s.witness == nil { + s.witness = NewAccessWitness(s) + } + return s.witness +} + +func (s *StateDB) SetWitness(aw *AccessWitness) { + s.witness = aw +} + // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. @@ -184,7 +229,7 @@ func (s *StateDB) StartPrefetcher(namespace string) { s.prefetcher.close() s.prefetcher = nil } - if s.snap != nil { + if s.snap != nil && !s.trie.IsVerkle() { s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) } } @@ -546,6 +591,41 @@ func (s *StateDB) updateStateObject(obj *stateObject) { if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) } + if s.trie.IsVerkle() && obj.dirtyCode { + var ( + chunks = trie.ChunkifyCode(obj.code) + values [][]byte + key []byte + err error + ) + for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 { + groupOffset := (chunknr + 128) % 256 + if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ { + values = make([][]byte, verkle.NodeWidth) + key = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(obj.db.db.(*cachingDB).GetTreeKeyHeader(obj.address[:]), uint256.NewInt(chunknr)) + } + values[groupOffset] = chunks[i : i+32] + + // Reuse the calculated key to also update the code size. + if i == 0 { + cs := make([]byte, 32) + binary.LittleEndian.PutUint64(cs, uint64(len(obj.code))) + values[utils.CodeSizeLeafKey] = cs + } + + if groupOffset == 255 || len(chunks)-i <= 32 { + switch t := s.trie.(type) { + case *trie.VerkleTrie: + err = t.UpdateStem(key[:31], values) + case *trie.TransitionTrie: + err = t.UpdateStem(key[:31], values) + } + if err != nil { + s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err)) + } + } + } + } // Cache the data until commit. Note, this update mechanism is not symmetric // to the deletion, because whereas it is enough to track account updates // at commit time, deletions need tracking at transaction boundary level to @@ -786,6 +866,9 @@ func (s *StateDB) Copy() *StateDB { snaps: s.snaps, snap: s.snap, } + if s.witness != nil { + state.witness = s.witness.Copy() + } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), @@ -968,7 +1051,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // to pull useful data from disk. for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot() + if s.trie.IsVerkle() { + obj.updateTrie() + } else { + obj.updateRoot() + } } } // Now we're about to start to write changes to the trie. The trie is so far @@ -1023,7 +1110,9 @@ func (s *StateDB) clearJournalAndRefund() { // slots inside as deleted. func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { start := time.Now() - tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root) + tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) + // XXX NOTE: it might just be possible to use an empty trie here, as verkle will not + // delete anything in the tree. if err != nil { return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) } @@ -1150,6 +1239,21 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A return incomplete, nil } +// GetTrie returns the account trie. +func (s *StateDB) GetTrie() Trie { + return s.trie +} + +// XXX check it's still needed +func (s *StateDB) Cap(root common.Hash) error { + if s.snaps != nil { + return s.snaps.Cap(root, 0) + } + // pre-verkle path: noop if s.snaps hasn't been + // initialized. + return nil +} + // Commit writes the state to the underlying in-memory trie database. // Once the state is committed, tries cached in stateDB (including account // trie, storage tries) will no longer be functional. A new state instance diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 66dda238e11e..e6479076fa98 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -751,7 +751,10 @@ func TestMissingTrieNodes(t *testing.T) { memDb := rawdb.NewMemoryDatabase() db := NewDatabase(memDb) var root common.Hash - state, _ := New(types.EmptyRootHash, db, nil) + state, err := New(types.EmptyRootHash, db, nil) + if err != nil { + panic("nil stte") + } addr := common.BytesToAddress([]byte("so")) { state.SetBalance(addr, big.NewInt(1)) @@ -783,7 +786,7 @@ func TestMissingTrieNodes(t *testing.T) { } // Modify the state state.SetBalance(addr, big.NewInt(2)) - root, err := state.Commit(0, false) + root, err = state.Commit(0, false) if err == nil { t.Fatalf("expected error, got root :%x", root) } @@ -1070,3 +1073,27 @@ func TestResetObject(t *testing.T) { t.Fatalf("Unexpected storage slot value %v", slot) } } + +// Test that an account with more than 128 pieces of code overflows +// correctly into the next group. +func TestCodeChunkOverflow(t *testing.T) { + // Create an empty state database + db := rawdb.NewMemoryDatabase() + state, _ := New(common.Hash{}, NewDatabaseWithConfig(db, nil), nil) + + // Update it with some accounts + addr := common.BytesToAddress([]byte{1}) + state.AddBalance(addr, big.NewInt(int64(11))) + state.SetNonce(addr, uint64(42)) + state.SetState(addr, common.BytesToHash([]byte{1, 1, 1}), common.BytesToHash([]byte{1, 1, 1, 1})) + code := make([]byte, 31*256) + for i := range code { + code[i] = 1 + } + state.SetCode(addr, code) + + root := state.IntermediateRoot(false) + if err := state.Database().TrieDB().Commit(root, false); err != nil { + t.Errorf("can not commit trie %v to persistent database", root.Hex()) + } +} diff --git a/core/state/sync_test.go b/core/state/sync_test.go index b065ad8355c3..db0b23a8e1b4 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -70,7 +70,10 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { state.updateStateObject(obj) accounts = append(accounts, acc) } - root, _ := state.Commit(0, false) + root, err := state.Commit(0, false) + if err != nil { + panic(err) + } // Return the generated state return db, sdb, root, accounts diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 4e8fd1e10f57..6c5c158cc239 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -302,7 +302,7 @@ func (sf *subfetcher) loop() { } sf.trie = trie } else { - trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root) + trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil /* safe to set to nil for now, as there is no prefetcher for verkle */) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) return diff --git a/core/state_processor.go b/core/state_processor.go index fcaf5a8ff3c9..744538d03e1a 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,18 +17,31 @@ package core import ( + "bufio" + "bytes" + "encoding/binary" "errors" "fmt" + "io" "math/big" + "os" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + tutils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -95,15 +108,217 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if len(withdrawals) > 0 && !p.config.IsShanghai(block.Number(), block.Time()) { return nil, nil, 0, errors.New("withdrawals before shanghai") } + + // Overlay tree migration logic + migrdb := statedb.Database() + + // verkle transition: if the conversion process is in progress, move + // N values from the MPT into the verkle tree. + if migrdb.InTransition() { + var ( + now = time.Now() + tt = statedb.GetTrie().(*trie.TransitionTrie) + mpt = tt.Base() + vkt = tt.Overlay() + hasPreimagesBin = false + preimageSeek = migrdb.GetCurrentPreimageOffset() + fpreimages *bufio.Reader + ) + + // TODO: avoid opening the preimages file here and make it part of, potentially, statedb.Database(). + filePreimages, err := os.Open("preimages.bin") + if err != nil { + // fallback on reading the db + log.Warn("opening preimage file", "error", err) + } else { + defer filePreimages.Close() + if _, err := filePreimages.Seek(preimageSeek, io.SeekStart); err != nil { + return nil, nil, 0, fmt.Errorf("seeking preimage file: %s", err) + } + fpreimages = bufio.NewReader(filePreimages) + hasPreimagesBin = true + } + + accIt, err := statedb.Snaps().AccountIterator(mpt.Hash(), migrdb.GetCurrentAccountHash()) + if err != nil { + return nil, nil, 0, err + } + defer accIt.Release() + accIt.Next() + + // If we're about to start with the migration process, we have to read the first account hash preimage. + if migrdb.GetCurrentAccountAddress() == nil { + var addr common.Address + if hasPreimagesBin { + if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { + return nil, nil, 0, fmt.Errorf("reading preimage file: %s", err) + } + } else { + addr = common.BytesToAddress(rawdb.ReadPreimage(migrdb.DiskDB(), accIt.Hash())) + if len(addr) != 20 { + return nil, nil, 0, fmt.Errorf("addr len is zero is not 32: %d", len(addr)) + } + } + migrdb.SetCurrentAccountAddress(addr) + if migrdb.GetCurrentAccountHash() != accIt.Hash() { + return nil, nil, 0, fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + } + preimageSeek += int64(len(addr)) + } + + const maxMovedCount = 10000 + // mkv will be assiting in the collection of up to maxMovedCount key values to be migrated to the VKT. + // It has internal caches to do efficient MPT->VKT key calculations, which will be discarded after + // this function. + mkv := &keyValueMigrator{vktLeafData: make(map[string]*verkle.BatchNewLeafNodeData)} + // move maxCount accounts into the verkle tree, starting with the + // slots from the previous account. + count := 0 + + // if less than maxCount slots were moved, move to the next account + for count < maxMovedCount { + acc, err := types.FullAccount(accIt.Account()) + if err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return nil, nil, 0, err + } + vkt.SetStorageRootConversion(*migrdb.GetCurrentAccountAddress(), acc.Root) + + // Start with processing the storage, because once the account is + // converted, the `stateRoot` field loses its meaning. Which means + // that it opens the door to a situation in which the storage isn't + // converted, but it can not be found since the account was and so + // there is no way to find the MPT storage from the information found + // in the verkle account. + // Note that this issue can still occur if the account gets written + // to during normal block execution. A mitigation strategy has been + // introduced with the `*StorageRootConversion` fields in VerkleDB. + if acc.HasStorage() { + stIt, err := statedb.Snaps().StorageIterator(mpt.Hash(), accIt.Hash(), migrdb.GetCurrentSlotHash()) + if err != nil { + return nil, nil, 0, err + } + stIt.Next() + + // fdb.StorageProcessed will be initialized to `true` if the + // entire storage for an account was not entirely processed + // by the previous block. This is used as a signal to resume + // processing the storage for that account where we left off. + // If the entire storage was processed, then the iterator was + // created in vain, but it's ok as this will not happen often. + for ; !migrdb.GetStorageProcessed() && count < maxMovedCount; count++ { + var ( + value []byte // slot value after RLP decoding + safeValue [32]byte // 32-byte aligned value + ) + if err := rlp.DecodeBytes(stIt.Slot(), &value); err != nil { + return nil, nil, 0, fmt.Errorf("error decoding bytes %x: %w", stIt.Slot(), err) + } + copy(safeValue[32-len(value):], value) + + var slotnr [32]byte + if hasPreimagesBin { + if _, err := io.ReadFull(fpreimages, slotnr[:]); err != nil { + return nil, nil, 0, fmt.Errorf("reading preimage file: %s", err) + } + } else { + slotnr := rawdb.ReadPreimage(migrdb.DiskDB(), stIt.Hash()) + if len(slotnr) != 32 { + return nil, nil, 0, fmt.Errorf("slotnr len is zero is not 32: %d", len(slotnr)) + } + } + if crypto.Keccak256Hash(slotnr[:]) != stIt.Hash() { + return nil, nil, 0, fmt.Errorf("preimage file does not match storage hash: %s!=%s", crypto.Keccak256Hash(slotnr[:]), stIt.Hash()) + } + preimageSeek += int64(len(slotnr)) + + mkv.addStorageSlot(migrdb.GetCurrentAccountAddress().Bytes(), slotnr[:], safeValue[:]) + + // advance the storage iterator + migrdb.SetStorageProcessed(!stIt.Next()) + if !migrdb.GetStorageProcessed() { + migrdb.SetCurrentSlotHash(stIt.Hash()) + } + } + stIt.Release() + } + + // If the maximum number of leaves hasn't been reached, then + // it means that the storage has finished processing (or none + // was available for this account) and that the account itself + // can be processed. + if count < maxMovedCount { + count++ // count increase for the account itself + + mkv.addAccount(migrdb.GetCurrentAccountAddress().Bytes(), acc) + vkt.ClearStrorageRootConversion(*migrdb.GetCurrentAccountAddress()) + + // Store the account code if present + if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { + code := rawdb.ReadCode(statedb.Database().DiskDB(), common.BytesToHash(acc.CodeHash)) + chunks := trie.ChunkifyCode(code) + + mkv.addAccountCode(migrdb.GetCurrentAccountAddress().Bytes(), uint64(len(code)), chunks) + } + + // reset storage iterator marker for next account + migrdb.SetStorageProcessed(false) + migrdb.SetCurrentSlotHash(common.Hash{}) + + // Move to the next account, if available - or end + // the transition otherwise. + if accIt.Next() { + var addr common.Address + if hasPreimagesBin { + if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { + return nil, nil, 0, fmt.Errorf("reading preimage file: %s", err) + } + } else { + addr = common.BytesToAddress(rawdb.ReadPreimage(migrdb.DiskDB(), accIt.Hash())) + if len(addr) != 20 { + return nil, nil, 0, fmt.Errorf("account address len is zero is not 20: %d", len(addr)) + } + } + // fmt.Printf("account switch: %s != %s\n", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + if crypto.Keccak256Hash(addr[:]) != accIt.Hash() { + return nil, nil, 0, fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + } + preimageSeek += int64(len(addr)) + migrdb.SetCurrentAccountAddress(addr) + } else { + // case when the account iterator has + // reached the end but count < maxCount + migrdb.EndVerkleTransition() + break + } + } + } + migrdb.SetCurrentPreimageOffset(preimageSeek) + + log.Info("Collected and prepared key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash()) + + now = time.Now() + if err := mkv.migrateCollectedKeyValues(tt.Overlay()); err != nil { + return nil, nil, 0, fmt.Errorf("could not migrate key values: %w", err) + } + log.Info("Inserted key values in overlay tree", "count", count, "duration", time.Since(now)) + } + // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) + if block.NumberU64()%100 == 0 { + stateRoot := statedb.GetTrie().Hash() + log.Info("State root", "number", block.NumberU64(), "hash", stateRoot) + } + return receipts, allLogs, *usedGas, nil } func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) + txContext.Accesses = state.NewAccessWitness(statedb) evm.Reset(txContext, statedb) // Apply the transaction to the current state (included in the env). @@ -137,6 +352,8 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) } + statedb.Witness().Merge(txContext.Accesses) + // Set the receipt logs and create the bloom filter. receipt.Logs = statedb.GetLogs(tx.Hash(), blockNumber.Uint64(), blockHash) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) @@ -160,3 +377,117 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo vmenv := vm.NewEVM(blockContext, vm.TxContext{BlobHashes: tx.BlobHashes()}, statedb, config, cfg) return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } + +// keyValueMigrator is a helper struct that collects key-values from the base tree. +// The walk is done in account order, so **we assume** the APIs hold this invariant. This is +// useful to be smart about caching banderwagon.Points to make VKT key calculations faster. +type keyValueMigrator struct { + currAddr []byte + currAddrPoint *verkle.Point + + vktLeafData map[string]*verkle.BatchNewLeafNodeData +} + +func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { + addrPoint := kvm.getAddrPoint(addr) + + vktKey := tutils.GetTreeKeyStorageSlotWithEvaluatedAddress(addrPoint, slotNumber) + leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + + leafNodeData.Values[vktKey[verkle.StemSize]] = slotValue +} + +func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { + addrPoint := kvm.getAddrPoint(addr) + + vktKey := tutils.GetTreeKeyVersionWithEvaluatedAddress(addrPoint) + leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + + var version [verkle.LeafValueSize]byte + leafNodeData.Values[tutils.VersionLeafKey] = version[:] + + var balance [verkle.LeafValueSize]byte + for i, b := range acc.Balance.Bytes() { + balance[len(acc.Balance.Bytes())-1-i] = b + } + leafNodeData.Values[tutils.BalanceLeafKey] = balance[:] + + var nonce [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) + leafNodeData.Values[tutils.NonceLeafKey] = nonce[:] + + leafNodeData.Values[tutils.CodeKeccakLeafKey] = acc.CodeHash[:] + + // Code size is ignored here. If this isn't an EOA, the tree-walk will call + // addAccountCode with this information. +} + +func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { + addrPoint := kvm.getAddrPoint(addr) + + vktKey := tutils.GetTreeKeyVersionWithEvaluatedAddress(addrPoint) + leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + + // Save the code size. + var codeSizeBytes [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize) + leafNodeData.Values[tutils.CodeSizeLeafKey] = codeSizeBytes[:] + + // The first 128 chunks are stored in the account header leaf. + for i := 0; i < 128 && i < len(chunks)/32; i++ { + leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)] + } + + // Potential further chunks, have their own leaf nodes. + for i := 128; i < len(chunks)/32; { + vktKey := tutils.GetTreeKeyCodeChunkWithEvaluatedAddress(addrPoint, uint256.NewInt(uint64(i))) + leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + + j := i + for ; (j-i) < 256 && j < len(chunks)/32; j++ { + leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)] + } + i = j + } +} + +func (kvm *keyValueMigrator) getAddrPoint(addr []byte) *verkle.Point { + if bytes.Equal(addr, kvm.currAddr) { + return kvm.currAddrPoint + } + kvm.currAddr = addr + kvm.currAddrPoint = tutils.EvaluateAddressPoint(addr) + return kvm.currAddrPoint +} + +func (kvm *keyValueMigrator) getOrInitLeafNodeData(stem []byte) *verkle.BatchNewLeafNodeData { + stemStr := string(stem) + if _, ok := kvm.vktLeafData[stemStr]; !ok { + kvm.vktLeafData[stemStr] = &verkle.BatchNewLeafNodeData{ + Stem: stem[:verkle.StemSize], + Values: make(map[byte][]byte), + } + } + return kvm.vktLeafData[stemStr] +} + +func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error { + // Transform the map into a slice. + nodeValues := make([]verkle.BatchNewLeafNodeData, 0, len(kvm.vktLeafData)) + for _, vld := range kvm.vktLeafData { + nodeValues = append(nodeValues, *vld) + } + + // Create all leaves in batch mode so we can optimize cryptography operations. + newLeaves, err := verkle.BatchNewLeafNode(nodeValues) + if err != nil { + return fmt.Errorf("failed to batch-create new leaf nodes") + } + + // Insert into the tree. + if err := tree.InsertMigratedLeaves(newLeaves); err != nil { + return fmt.Errorf("failed to insert migrated leaves: %w", err) + } + + return nil +} diff --git a/core/state_processor_test.go b/core/state_processor_test.go index b4482acf35ba..8c0a14aa31f8 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -17,8 +17,12 @@ package core import ( + //"bytes" "crypto/ecdsa" + + //"fmt" "math/big" + //"os" "testing" "github.com/ethereum/go-ethereum/common" @@ -33,6 +37,8 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + + //"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/holiman/uint256" "golang.org/x/crypto/sha3" @@ -417,3 +423,118 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr } return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) } + +// A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness +// will not contain that copied data. +// Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985 +var ( + code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`) + intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, true, true, true, true) + codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`) + intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, true, true, true, true) + // XXX if the last true in IntringsicGas makes for an invalid gas, try false +) + +func TestProcessVerkle(t *testing.T) { + var ( + cancuntime uint64 = 0 + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + CancunTime: &cancuntime, + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + genesis := gspec.MustCommit(bcdb) + blockchain, _ := NewBlockChain(bcdb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + defer blockchain.Stop() + + // Commit the genesis block to the block-generation database as it + // is now independent of the blockchain database. + gspec.MustCommit(gendb) + + txCost1 := params.WitnessBranchWriteCost*2 + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*3 + params.WitnessChunkReadCost*10 + params.TxGas + txCost2 := params.WitnessBranchWriteCost + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*2 + params.WitnessChunkReadCost*10 + params.TxGas + contractCreationCost := intrinsicContractCreationGas + uint64(6900 /* from */ +7700 /* creation */ +2939 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(6900 /* from */ +7000 /* creation */ +315894 /* execution costs */) + blockGasUsagesExpected := []uint64{ + txCost1*2 + txCost2, + txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, + } + chain, _, proofs, keyvals := GenerateVerkleChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 2, func(i int, gen *BlockGen) { + // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) + tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + + // Add two contract creations in block #2 + if i == 1 { + tx, _ = types.SignTx(types.NewContractCreation(6, big.NewInt(16), 3000000, big.NewInt(875000000), code), signer, testKey) + gen.AddTx(tx) + + tx, _ = types.SignTx(types.NewContractCreation(7, big.NewInt(0), 3000000, big.NewInt(875000000), codeWithExtCodeCopy), signer, testKey) + gen.AddTx(tx) + } + }) + + // Uncomment to extract block #2 + //f, _ := os.Create("block2.rlp") + //defer f.Close() + //var buf bytes.Buffer + //rlp.Encode(&buf, chain[1]) + //f.Write(buf.Bytes()) + //fmt.Printf("root= %x\n", chain[0].Root()) + // check the proof for the last block + err := trie.DeserializeAndVerifyVerkleProof(proofs[1], chain[0].Root().Bytes(), keyvals[1]) + if err != nil { + t.Fatal(err) + } + t.Log("verfied verkle proof") + + endnum, err := blockchain.InsertChain(chain) + if err != nil { + t.Fatalf("block %d imported with error: %v", endnum, err) + } + + for i := 0; i < 2; i++ { + b := blockchain.GetBlockByNumber(uint64(i) + 1) + if b == nil { + t.Fatalf("expected block %d to be present in chain", i+1) + } + if b.Hash() != chain[i].Hash() { + t.Fatalf("block #%d not found at expected height", b.NumberU64()) + } + if b.GasUsed() != blockGasUsagesExpected[i] { + t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed()) + } + } +} diff --git a/core/state_transition.go b/core/state_transition.go index f84757be781f..a5fb83084a96 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" ) @@ -339,6 +340,19 @@ func (st *StateTransition) preCheck() error { return st.buyGas() } +// tryConsumeGas tries to subtract gas from gasPool, setting the result in gasPool +// if subtracting more gas than remains in gasPool, set gasPool = 0 and return false +// otherwise, do the subtraction setting the result in gasPool and return true +func tryConsumeGas(gasPool *uint64, gas uint64) bool { + if *gasPool < gas { + *gasPool = 0 + return false + } + + *gasPool -= gas + return true +} + // TransitionDb will transition the state by applying the current message and // returning the evm execution result with following fields. // @@ -389,6 +403,32 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } st.gasRemaining -= gas + if rules.IsCancun { + targetAddr := msg.To + originAddr := msg.From + + statelessGasOrigin := st.evm.Accesses.TouchTxOriginAndComputeGas(originAddr.Bytes()) + if !tryConsumeGas(&st.gasRemaining, statelessGasOrigin) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gasRemaining, gas) + } + originNonce := st.evm.StateDB.GetNonce(originAddr) + + if msg.To != nil { + statelessGasDest := st.evm.Accesses.TouchTxExistingAndComputeGas(targetAddr.Bytes(), msg.Value.Sign() != 0) + if !tryConsumeGas(&st.gasRemaining, statelessGasDest) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gasRemaining, gas) + } + + // ensure the code size ends up in the access witness + st.evm.StateDB.GetCodeSize(*targetAddr) + } else { + contractAddr := crypto.CreateAddress(originAddr, originNonce) + if !tryConsumeGas(&st.gasRemaining, st.evm.Accesses.TouchAndChargeContractCreateInit(contractAddr.Bytes(), msg.Value.Sign() != 0)) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gasRemaining, gas) + } + } + } + // Check clause 6 if msg.Value.Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From, msg.Value) { return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From.Hex()) diff --git a/core/types/gen_account_rlp.go b/core/types/gen_account_rlp.go index 5181d884112f..9d07200e33b3 100644 --- a/core/types/gen_account_rlp.go +++ b/core/types/gen_account_rlp.go @@ -5,8 +5,11 @@ package types -import "github.com/ethereum/go-ethereum/rlp" -import "io" +import ( + "io" + + "github.com/ethereum/go-ethereum/rlp" +) func (obj *StateAccount) EncodeRLP(_w io.Writer) error { w := rlp.NewEncoderBuffer(_w) diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go index a5ed5cd15094..f322411bad9c 100644 --- a/core/types/gen_header_rlp.go +++ b/core/types/gen_header_rlp.go @@ -5,6 +5,7 @@ package types +import "github.com/ethereum/go-ethereum/common" import "github.com/ethereum/go-ethereum/rlp" import "io" @@ -78,3 +79,115 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.ListEnd(_tmp0) return w.Flush() } + +func (obj *Header) DecodeRLP(dec *rlp.Stream) error { + var _tmp0 Header + { + if _, err := dec.List(); err != nil { + return err + } + // ParentHash: + var _tmp1 common.Hash + if err := dec.ReadBytes(_tmp1[:]); err != nil { + return err + } + _tmp0.ParentHash = _tmp1 + // UncleHash: + var _tmp2 common.Hash + if err := dec.ReadBytes(_tmp2[:]); err != nil { + return err + } + _tmp0.UncleHash = _tmp2 + // Coinbase: + var _tmp3 common.Address + if err := dec.ReadBytes(_tmp3[:]); err != nil { + return err + } + _tmp0.Coinbase = _tmp3 + // Root: + var _tmp4 common.Hash + if err := dec.ReadBytes(_tmp4[:]); err != nil { + return err + } + _tmp0.Root = _tmp4 + // TxHash: + var _tmp5 common.Hash + if err := dec.ReadBytes(_tmp5[:]); err != nil { + return err + } + _tmp0.TxHash = _tmp5 + // ReceiptHash: + var _tmp6 common.Hash + if err := dec.ReadBytes(_tmp6[:]); err != nil { + return err + } + _tmp0.ReceiptHash = _tmp6 + // Bloom: + var _tmp7 Bloom + if err := dec.ReadBytes(_tmp7[:]); err != nil { + return err + } + _tmp0.Bloom = _tmp7 + // Difficulty: + _tmp8, err := dec.BigInt() + if err != nil { + return err + } + _tmp0.Difficulty = _tmp8 + // Number: + _tmp9, err := dec.BigInt() + if err != nil { + return err + } + _tmp0.Number = _tmp9 + // GasLimit: + _tmp10, err := dec.Uint64() + if err != nil { + return err + } + _tmp0.GasLimit = _tmp10 + // GasUsed: + _tmp11, err := dec.Uint64() + if err != nil { + return err + } + _tmp0.GasUsed = _tmp11 + // Time: + _tmp12, err := dec.Uint64() + if err != nil { + return err + } + _tmp0.Time = _tmp12 + // Extra: + _tmp13, err := dec.Bytes() + if err != nil { + return err + } + _tmp0.Extra = _tmp13 + // MixDigest: + var _tmp14 common.Hash + if err := dec.ReadBytes(_tmp14[:]); err != nil { + return err + } + _tmp0.MixDigest = _tmp14 + // Nonce: + var _tmp15 BlockNonce + if err := dec.ReadBytes(_tmp15[:]); err != nil { + return err + } + _tmp0.Nonce = _tmp15 + // BaseFee: + if dec.MoreDataInList() { + _tmp16, err := dec.BigInt() + if err != nil { + return err + } + _tmp0.BaseFee = _tmp16 + } + if err := dec.ListEnd(); err != nil { + return err + } + } + *obj = _tmp0 + return nil +} diff --git a/core/types/gen_log_rlp.go b/core/types/gen_log_rlp.go index 4a6c6b0094f8..78fa783cee1f 100644 --- a/core/types/gen_log_rlp.go +++ b/core/types/gen_log_rlp.go @@ -5,8 +5,11 @@ package types -import "github.com/ethereum/go-ethereum/rlp" -import "io" +import ( + "io" + + "github.com/ethereum/go-ethereum/rlp" +) func (obj *rlpLog) EncodeRLP(_w io.Writer) error { w := rlp.NewEncoderBuffer(_w) diff --git a/core/types/state_account.go b/core/types/state_account.go index 314f4943ecab..75c188fea259 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -58,6 +58,11 @@ func (acct *StateAccount) Copy() *StateAccount { } } +// HasStorage returns true if the account has a non-empty storage tree. +func (acc *StateAccount) HasStorage() bool { + return len(acc.Root) == 32 && acc.Root == EmptyRootHash +} + // SlimAccount is a modified version of an Account, where the root is replaced // with a byte slice. This format can be used to represent full-consensus format // or slim format which replaces the empty root and code hash as nil byte slice. diff --git a/core/vm/common.go b/core/vm/common.go index 90ba4a4ad15b..ba75950e370b 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -63,6 +63,18 @@ func getData(data []byte, start uint64, size uint64) []byte { return common.RightPadBytes(data[start:end], int(size)) } +func getDataAndAdjustedBounds(data []byte, start uint64, size uint64) (codeCopyPadded []byte, actualStart uint64, sizeNonPadded uint64) { + length := uint64(len(data)) + if start > length { + start = length + } + end := start + size + if end > length { + end = length + } + return common.RightPadBytes(data[start:end], int(size)), start, end - start +} + // toWordSize returns the ceiled word size required for memory expansion. func toWordSize(size uint64) uint64 { if size > math.MaxUint64-31 { diff --git a/core/vm/contract.go b/core/vm/contract.go index bb0902969ec7..caaaa8e455f4 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -20,6 +20,9 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" "github.com/holiman/uint256" ) @@ -49,15 +52,20 @@ type Contract struct { CallerAddress common.Address caller ContractRef self ContractRef + addressPoint *verkle.Point jumpdests map[common.Hash]bitvec // Aggregated result of JUMPDEST analysis. analysis bitvec // Locally cached result of JUMPDEST analysis Code []byte + Chunks trie.ChunkedCode CodeHash common.Hash CodeAddr *common.Address Input []byte + // is the execution frame represented by this object a contract deployment + IsDeployment bool + Gas uint64 value *big.Int } @@ -93,12 +101,12 @@ func (c *Contract) validJumpdest(dest *uint256.Int) bool { if OpCode(c.Code[udest]) != JUMPDEST { return false } - return c.isCode(udest) + return c.IsCode(udest) } -// isCode returns true if the provided PC location is an actual opcode, as +// IsCode returns true if the provided PC location is an actual opcode, as // opposed to a data-segment following a PUSHN operation. -func (c *Contract) isCode(udest uint64) bool { +func (c *Contract) IsCode(udest uint64) bool { // Do we already have an analysis laying around? if c.analysis != nil { return c.analysis.codeSegment(udest) @@ -172,6 +180,14 @@ func (c *Contract) Address() common.Address { return c.self.Address() } +func (c *Contract) AddressPoint() *verkle.Point { + if c.addressPoint == nil { + c.addressPoint = utils.EvaluateAddressPoint(c.Address().Bytes()) + } + + return c.addressPoint +} + // Value returns the contract's value (sent to it from it's caller) func (c *Contract) Value() *big.Int { return c.value diff --git a/core/vm/evm.go b/core/vm/evm.go index 40e2f3554f46..a35f1094bacc 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -21,6 +21,7 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" @@ -81,9 +82,10 @@ type BlockContext struct { // All fields can change between transactions. type TxContext struct { // Message information - Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE - BlobHashes []common.Hash // Provides information for BLOBHASH + Origin common.Address // Provides information for ORIGIN + GasPrice *big.Int // Provides information for GASPRICE + BlobHashes []common.Hash // Provides information for BLOBHASH + Accesses *state.AccessWitness // Capture all state accesses for this tx } // EVM is the Ethereum Virtual Machine base object and provides @@ -133,6 +135,9 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig chainConfig: chainConfig, chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time), } + if txCtx.Accesses == nil && chainConfig.IsCancun(blockCtx.BlockNumber, blockCtx.Time) { + txCtx.Accesses = state.NewAccessWitness(evm.StateDB.(*state.StateDB)) + } evm.interpreter = NewEVMInterpreter(evm) return evm } @@ -140,6 +145,9 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig // Reset resets the EVM with a new transaction context.Reset // This is not threadsafe and should only be done very cautiously. func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { + if txCtx.Accesses == nil && evm.chainRules.IsCancun { + txCtx.Accesses = state.NewAccessWitness(evm.StateDB.(*state.StateDB)) + } evm.TxContext = txCtx evm.StateDB = statedb } @@ -168,6 +176,20 @@ func (evm *EVM) SetBlockContext(blockCtx BlockContext) { evm.chainRules = evm.chainConfig.Rules(num, blockCtx.Random != nil, timestamp) } +// tryConsumeGas tries to subtract gas from gasPool, setting the result in gasPool +// if subtracting more gas than remains in gasPool, set gasPool = 0 and return false +// otherwise, do the subtraction setting the result in gasPool and return true +func tryConsumeGas(gasPool *uint64, gas uint64) bool { + // XXX check this is still needed as a func + if *gasPool < gas { + *gasPool = 0 + return false + } + + *gasPool -= gas + return true +} + // Call executes the contract associated with the addr with the given input as // parameters. It also handles any necessary value transfer required and takes // the necessary steps to create accounts and reverses the state in case of an @@ -185,8 +207,13 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas p, isPrecompile := evm.precompile(addr) debug := evm.Config.Tracer != nil + var creation bool if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { + if evm.chainRules.IsCancun { + // proof of absence + tryConsumeGas(&gas, evm.Accesses.TouchAndChargeProofOfAbsence(caller.Address().Bytes())) + } // Calling a non existing account, don't do anything, but ping the tracer if debug { if evm.depth == 0 { @@ -200,6 +227,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas return nil, gas, nil } evm.StateDB.CreateAccount(addr) + creation = true } evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value) @@ -225,6 +253,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. code := evm.StateDB.GetCode(addr) + if len(code) == 0 { ret, err = nil, nil // gas is unchanged } else { @@ -233,6 +262,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // The depth-check is already done, and precompiles handled above contract := NewContract(caller, AccountRef(addrCopy), value, gas) contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code) + contract.IsDeployment = creation ret, err = evm.interpreter.Run(contract, input, false) gas = contract.Gas } @@ -445,12 +475,14 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, if evm.chainRules.IsEIP158 { evm.StateDB.SetNonce(address, 1) } + evm.Context.Transfer(evm.StateDB, caller.Address(), address, value) // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) + contract.IsDeployment = true if evm.Config.Tracer != nil { if evm.depth == 0 { @@ -495,6 +527,13 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } + if err == nil && evm.chainRules.IsCancun { + if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:], value.Sign() != 0)) { + evm.StateDB.RevertToSnapshot(snapshot) + err = ErrOutOfGas + } + } + if evm.Config.Tracer != nil { if evm.depth == 0 { evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, err) diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 5153c8b7a3de..899551d186df 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -21,7 +21,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" ) // memoryGasCost calculates the quadratic gas for memory expansion. It does so @@ -95,7 +97,32 @@ var ( gasReturnDataCopy = memoryCopierGas(2) ) +func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + usedGas := uint64(0) + slot := stack.Back(0) + if evm.chainRules.IsCancun { + index := trieUtils.GetTreeKeyCodeSize(slot.Bytes()) + usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(index) + } + + return usedGas, nil +} + +func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + usedGas := uint64(0) + + if evm.chainRules.IsCancun { + where := stack.Back(0) + index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(contract.AddressPoint(), where.Bytes()) + usedGas += evm.Accesses.TouchAddressOnReadAndComputeGas(index) + } + + return usedGas, nil +} + func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // Apply the witness access costs, err is nil + accessGas, _ := gasSLoad(evm, contract, stack, mem, memorySize) var ( y, x = stack.Back(1), stack.Back(0) current = evm.StateDB.GetState(contract.Address(), x.Bytes32()) @@ -111,12 +138,12 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi // 3. From a non-zero to a non-zero (CHANGE) switch { case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0 - return params.SstoreSetGas, nil + return params.SstoreSetGas + accessGas, nil case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0 evm.StateDB.AddRefund(params.SstoreRefundGas) - return params.SstoreClearGas, nil + return params.SstoreClearGas + accessGas, nil default: // non 0 => non 0 (or 0 => 0) - return params.SstoreResetGas, nil + return params.SstoreResetGas + accessGas, nil } } @@ -369,6 +396,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize transfersValue = !stack.Back(2).IsZero() address = common.Address(stack.Back(1).Bytes20()) ) + if evm.chainRules.IsEIP158 { if transfersValue && evm.StateDB.Empty(address) { gas += params.CallNewAccountGas @@ -395,6 +423,21 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsCancun { + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes()[:])) + if overflow { + return 0, ErrGasUintOverflow + } + } + if transfersValue { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeValueTransfer(contract.Address().Bytes()[:], address.Bytes()[:])) + if overflow { + return 0, ErrGasUintOverflow + } + } + } + return gas, nil } @@ -420,6 +463,15 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsCancun { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -436,6 +488,15 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsCancun { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -452,6 +513,15 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsCancun { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -472,6 +542,12 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } } + if evm.chainRules.IsCancun { + // TODO turn this into a panic (when we are sure this method + // will never execute when verkle is enabled) + log.Warn("verkle witness accumulation not supported for selfdestruct") + } + if !evm.StateDB.HasSelfDestructed(contract.Address()) { evm.StateDB.AddRefund(params.SelfdestructRefundGas) } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 2105201fce42..eeb9fc64921b 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -18,9 +18,13 @@ package vm import ( "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -341,7 +345,13 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() - slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20()))) + cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) + if interpreter.evm.chainRules.IsCancun { + index := trieUtils.GetTreeKeyCodeSize(slot.Bytes()) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(index) + scope.Contract.UseGas(statelessGas) + } + slot.SetUint64(cs) return nil, nil } @@ -362,12 +372,86 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ if overflow { uint64CodeOffset = 0xffffffffffffffff } - codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64()) - scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) + if interpreter.evm.chainRules.IsCancun { + scope.Contract.UseGas(touchEachChunksOnReadAndChargeGas(copyOffset, nonPaddedCopyLength, scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment)) + } + scope.Memory.Set(memOffset.Uint64(), uint64(len(paddedCodeCopy)), paddedCodeCopy) return nil, nil } +// touchChunkOnReadAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs +func touchChunkOnReadAndChargeGas(chunks trie.ChunkedCode, offset uint64, evals [][]byte, code []byte, accesses *state.AccessWitness, deployment bool) uint64 { + // note that in the case where the executed code is outside the range of + // the contract code but touches the last leaf with contract code in it, + // we don't include the last leaf of code in the AccessWitness. The + // reason that we do not need the last leaf is the account's code size + // is already in the AccessWitness so a stateless verifier can see that + // the code from the last leaf is not needed. + if code != nil && offset > uint64(len(code)) { + return 0 + } + var ( + chunknr = offset / 31 + statelessGasCharged uint64 + ) + + // Build the chunk address from the evaluated address of its whole group + var index [32]byte + copy(index[:], evals[chunknr/256]) + index[31] = byte((128 + chunknr) % 256) + + var overflow bool + statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, accesses.TouchAddressOnReadAndComputeGas(index[:])) + if overflow { + panic("overflow when adding gas") + } + + return statelessGasCharged +} + +// touchEachChunksOnReadAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs +func touchEachChunksOnReadAndChargeGas(offset, size uint64, contract *Contract, code []byte, accesses *state.AccessWitness, deployment bool) uint64 { + // note that in the case where the copied code is outside the range of the + // contract code but touches the last leaf with contract code in it, + // we don't include the last leaf of code in the AccessWitness. The + // reason that we do not need the last leaf is the account's code size + // is already in the AccessWitness so a stateless verifier can see that + // the code from the last leaf is not needed. + if len(code) == 0 && size == 0 || offset > uint64(len(code)) { + return 0 + } + var ( + statelessGasCharged uint64 + endOffset uint64 + ) + if code != nil && offset+size > uint64(len(code)) { + endOffset = uint64(len(code)) + } else { + endOffset = offset + size + } + + // endOffset - 1 since if the end offset is aligned on a chunk boundary, + // the last chunk should not be included. + for i := offset / 31; i <= (endOffset-1)/31; i++ { + // only charge for+cache the chunk if it isn't already present + if !accesses.HasCodeChunk(contract.Address().Bytes(), i) { + index := trieUtils.GetTreeKeyCodeChunkWithEvaluatedAddress(contract.AddressPoint(), uint256.NewInt(i)) + + var overflow bool + statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, accesses.TouchAddressOnReadAndComputeGas(index)) + if overflow { + panic("overflow when adding gas") + } + + accesses.SetCachedCodeChunk(contract.Address().Bytes(), i) + } + } + + return statelessGasCharged +} + func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( stack = scope.Stack @@ -381,8 +465,20 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) uint64CodeOffset = 0xffffffffffffffff } addr := common.Address(a.Bytes20()) - codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) - scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + if interpreter.evm.chainRules.IsCancun { + code := interpreter.evm.StateDB.GetCode(addr) + contract := &Contract{ + Code: code, + Chunks: trie.ChunkedCode(code), + self: AccountRef(addr), + } + paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) + touchEachChunksOnReadAndChargeGas(copyOffset, nonPaddedCopyLength, contract, code, interpreter.evm.Accesses, false) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), paddedCodeCopy) + } else { + codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + } return nil, nil } @@ -514,6 +610,7 @@ func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by loc := scope.Stack.peek() hash := common.Hash(loc.Bytes32()) val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash) + loc.SetBytes(val.Bytes()) return nil, nil } @@ -583,6 +680,13 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + if interpreter.evm.chainRules.IsCancun { + contractAddress := crypto.CreateAddress(scope.Contract.Address(), interpreter.evm.StateDB.GetNonce(scope.Contract.Address())) + statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], value.Sign() != 0) + if !tryConsumeGas(&gas, statelessGas) { + return nil, ErrExecutionReverted + } + } if interpreter.evm.chainRules.IsEIP150 { gas -= gas / 64 } @@ -630,6 +734,15 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + if interpreter.evm.chainRules.IsCancun { + codeAndHash := &codeAndHash{code: input} + contractAddress := crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes()) + statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], endowment.Sign() != 0) + if !tryConsumeGas(&gas, statelessGas) { + return nil, ErrExecutionReverted + } + } + // Apply EIP150 gas -= gas / 64 scope.Contract.UseGas(gas) @@ -884,6 +997,13 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by *pc += 1 if *pc < codeLen { scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) + + if interpreter.evm.chainRules.IsCancun && *pc%31 == 0 { + // touch next chunk if PUSH1 is at the boundary. if so, *pc has + // advanced past this boundary. + statelessGas := touchEachChunksOnReadAndChargeGas(*pc+1, uint64(1), scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) + scope.Contract.UseGas(statelessGas) + } } else { scope.Stack.push(integer.Clear()) } @@ -905,6 +1025,11 @@ func makePush(size uint64, pushByteSize int) executionFunc { endMin = startMin + pushByteSize } + if interpreter.evm.chainRules.IsCancun { + statelessGas := touchEachChunksOnReadAndChargeGas(uint64(startMin), uint64(pushByteSize), scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) + scope.Contract.UseGas(statelessGas) + } + integer := new(uint256.Int) scope.Stack.push(integer.SetBytes(common.RightPadBytes( scope.Contract.Code[startMin:endMin], pushByteSize))) diff --git a/core/vm/interface.go b/core/vm/interface.go index 26814d3d2f0e..0a02a0181c05 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -20,6 +20,7 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" ) @@ -78,6 +79,9 @@ type StateDB interface { AddLog(*types.Log) AddPreimage(common.Hash, []byte) + + Witness() *state.AccessWitness + SetWitness(*state.AccessWitness) } // CallContext provides a basic interface for the EVM calling conventions. The EVM diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 873337850e6f..9050addbcaec 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -21,6 +21,10 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) // Config are the configuration options for the Interpreter @@ -145,6 +149,8 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( logged bool // deferred EVMLogger should ignore already logged steps res []byte // result of the opcode execution function debug = in.evm.Config.Tracer != nil + + chunkEvals [][]byte ) // Don't move this deferred function, it's placed before the capturestate-deferred method, // so that it get's executed _after_: the capturestate needs the stacks before @@ -165,6 +171,22 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( } }() } + + // Evaluate one address per group of 256, 31-byte chunks + if in.evm.chainRules.IsCancun && !contract.IsDeployment { + contract.Chunks = trie.ChunkifyCode(contract.Code) + + // number of extra stems to evaluate after the header stem + extraEvals := (len(contract.Chunks) + 127) / verkle.NodeWidth + + chunkEvals = make([][]byte, extraEvals+1) + for i := 1; i < extraEvals+1; i++ { + chunkEvals[i] = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(contract.AddressPoint(), uint256.NewInt(uint64(i)*256)) + } + // Header account is already known, it's the header account + chunkEvals[0] = utils.GetTreeKeyVersionWithEvaluatedAddress(contract.AddressPoint()) + } + // The Interpreter main run loop (contextual). This loop runs until either an // explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during // the execution of one of the operations or until the done flag is set by the @@ -174,6 +196,13 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } + + if contract.Chunks != nil { + // if the PC ends up in a new "chunk" of verkleized code, charge the + // associated costs. + contract.Gas -= touchChunkOnReadAndChargeGas(contract.Chunks, pc, chunkEvals, contract.Code, in.evm.TxContext.Accesses, contract.IsDeployment) + } + // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 702b18661545..0a881236f64e 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -470,6 +470,7 @@ func newFrontierInstructionSet() JumpTable { EXTCODESIZE: { execute: opExtCodeSize, constantGas: params.ExtcodeSizeGasFrontier, + dynamicGas: gasExtCodeSize, minStack: minStack(1, 1), maxStack: maxStack(1, 1), }, @@ -550,6 +551,7 @@ func newFrontierInstructionSet() JumpTable { SLOAD: { execute: opSload, constantGas: params.SloadGasFrontier, + dynamicGas: gasSLoad, minStack: minStack(1, 1), maxStack: maxStack(1, 1), }, diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 04c6409ebd86..114769abda89 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" ) func makeGasSStoreFunc(clearingRefund uint64) gasFunc { @@ -51,6 +52,11 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { } value := common.Hash(y.Bytes32()) + if evm.chainRules.IsCancun { + index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(contract.AddressPoint(), x.Bytes()) + cost += evm.Accesses.TouchAddressOnWriteAndComputeGas(index) + } + if current == value { // noop (1) // EIP 2200 original clause: // return params.SloadGasEIP2200, nil @@ -103,14 +109,23 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { loc := stack.peek() slot := common.Hash(loc.Bytes32()) + var gasUsed uint64 + + if evm.chainRules.IsCancun { + where := stack.Back(0) + addr := contract.Address() + index := trieUtils.GetTreeKeyStorageSlot(addr[:], where) + gasUsed += evm.Accesses.TouchAddressOnReadAndComputeGas(index) + } + // Check slot presence in the access list if _, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { // If the caller cannot afford the cost, this change will be rolled back // If he does afford it, we can skip checking the same thing later on, during execution evm.StateDB.AddSlotToAccessList(contract.Address(), slot) - return params.ColdSloadCostEIP2929, nil + return gasUsed + params.ColdSloadCostEIP2929, nil } - return params.WarmStorageReadCostEIP2929, nil + return gasUsed + params.WarmStorageReadCostEIP2929, nil } // gasExtCodeCopyEIP2929 implements extcodecopy according to EIP-2929 diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go index bf6427faf673..c68c81e511e9 100644 --- a/eth/tracers/js/tracer_test.go +++ b/eth/tracers/js/tracer_test.go @@ -267,14 +267,17 @@ func TestEnterExit(t *testing.T) { if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context), nil); err != nil { t.Fatal(err) } + // test that the enter and exit method are correctly invoked and the values passed tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context), nil) if err != nil { t.Fatal(err) } + scope := &vm.ScopeContext{ Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), } + tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), []byte{}, 1000, new(big.Int)) tracer.CaptureExit([]byte{}, 400, nil) diff --git a/go.mod b/go.mod index 9c7121c0355c..c133d4bd5ba4 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b + github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 @@ -65,7 +65,7 @@ require ( golang.org/x/crypto v0.9.0 golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc golang.org/x/sync v0.3.0 - golang.org/x/sys v0.9.0 + golang.org/x/sys v0.10.0 golang.org/x/text v0.9.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.1 @@ -92,7 +92,7 @@ require ( github.com/cockroachdb/redact v1.1.3 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect diff --git a/go.sum b/go.sum index 591764e65830..47603d0dbc18 100644 --- a/go.sum +++ b/go.sum @@ -86,6 +86,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0= github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= +github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd h1:jgf65Q4+jHFuLlhVApaVfTUwcU7dAdXK+GESow2UlaI= +github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -146,6 +148,8 @@ github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqG github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0= github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b/go.mod h1:CDncRYVRSDqwakm282WEkjfaAj1hxU/v5RXxk5nXOiI= +github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b h1:2lDzSxjCii8FxrbuxtlFtFiw6c4nTPl9mhaZ6lgpwws= +github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b/go.mod h1:+k9fzNguudDonU5q4/TUaTdmiHw3h3oGOIVmqyhaA3E= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= @@ -574,6 +578,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index e130d9c5070f..3faf1d8bea7a 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1210,7 +1210,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr result, err := doCall(ctx, b, args, state, header, nil, nil, 0, gasCap) if err != nil { - if errors.Is(err, core.ErrIntrinsicGas) { + if errors.Is(err, core.ErrIntrinsicGas) || errors.Is(err, core.ErrInsufficientBalanceWitness) { return true, nil, nil // Special case, raise gas limit } return true, nil, err // Bail out diff --git a/les/server_requests.go b/les/server_requests.go index 30ff2cd05fb4..6f3e5e0f0335 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -430,7 +430,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { p.bumpInvalid() continue } - trie, err = statedb.OpenStorageTrie(root, address, account.Root) + trie, err = statedb.OpenStorageTrie(root, address, account.Root, nil) if trie == nil || err != nil { p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", address, "root", account.Root, "err", err) continue diff --git a/light/odr_test.go b/light/odr_test.go index 79f901bbdb68..ffe4031ce944 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -87,7 +87,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { t state.Trie ) if len(req.Id.AccountAddress) > 0 { - t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToAddress(req.Id.AccountAddress), req.Id.Root) + t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToAddress(req.Id.AccountAddress), req.Id.Root, nil) } else { t, err = odr.serverState.OpenTrie(req.Id.Root) } diff --git a/light/trie.go b/light/trie.go index 4967cc74e5ba..46f073d66a6e 100644 --- a/light/trie.go +++ b/light/trie.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" @@ -55,7 +56,7 @@ func (db *odrDatabase) OpenTrie(root common.Hash) (state.Trie, error) { return &odrTrie{db: db, id: db.id}, nil } -func (db *odrDatabase) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (state.Trie, error) { +func (db *odrDatabase) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, _ state.Trie) (state.Trie, error) { return &odrTrie{db: db, id: StorageTrieID(db.id, address, root)}, nil } @@ -100,6 +101,62 @@ func (db *odrDatabase) DiskDB() ethdb.KeyValueStore { panic("not implemented") } +func (db *odrDatabase) StartVerkleTransition(originalRoot common.Hash, translatedRoot common.Hash, chainConfig *params.ChainConfig, _ *uint64) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) EndVerkleTransition() { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) InTransition() bool { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) Transitioned() bool { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetCurrentSlotHash(hash common.Hash) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetCurrentAccountAddress() *common.Address { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetCurrentAccountAddress(_ common.Address) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetCurrentAccountHash() common.Hash { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetCurrentSlotHash() common.Hash { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetStorageProcessed(_ bool) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetStorageProcessed() bool { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetCurrentPreimageOffset() int64 { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetCurrentPreimageOffset(_ int64) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) AddRootTranslation(originalRoot common.Hash, translatedRoot common.Hash) { + panic("not implemented") // TODO: Implement +} + type odrTrie struct { db *odrDatabase id *TrieID @@ -230,6 +287,10 @@ func (t *odrTrie) do(key []byte, fn func() error) error { } } +func (t *odrTrie) IsVerkle() bool { + return false +} + type nodeIterator struct { trie.NodeIterator t *odrTrie diff --git a/miner/worker.go b/miner/worker.go index 97967ea2f18b..b23fbdeaff33 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -89,6 +89,9 @@ type environment struct { header *types.Header txs []*types.Transaction receipts []*types.Receipt + + // XXX check if this is still necessary + preRoot common.Hash } // copy creates a deep copy of environment. @@ -100,6 +103,7 @@ func (env *environment) copy() *environment { coinbase: env.coinbase, header: types.CopyHeader(env.header), receipts: copyReceipts(env.receipts), + preRoot: env.preRoot, } if env.gasPool != nil { gasPool := *env.gasPool @@ -712,6 +716,7 @@ func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase co state: state, coinbase: coinbase, header: header, + preRoot: parent.Root, } // Keep track of transactions which return errors so they can be removed env.tcount = 0 @@ -1050,6 +1055,7 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti if err != nil { return err } + // If we're post merge, just ignore if !w.isTTDReached(block.Header()) { select { diff --git a/miner/worker_test.go b/miner/worker_test.go index 80557d99bfcf..d8a5f8437228 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -18,6 +18,7 @@ package miner import ( "math/big" + "math/rand" "sync/atomic" "testing" "time" @@ -129,6 +130,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine default: t.Fatalf("unexpected consensus engine type: %T", engine) } + // I no longer see a call to GenerateChain so this probably broke state_processor_test.go chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("core.NewBlockChain failed: %v", err) @@ -147,6 +149,23 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain } func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool } +//nolint:unused +func (b *testWorkerBackend) newRandomVerkleUncle() *types.Block { + var parent *types.Block + cur := b.chain.CurrentBlock() + if cur.Number.Uint64() == 0 { + parent = b.chain.Genesis() + } else { + parent = b.chain.GetBlockByHash(b.chain.CurrentBlock().ParentHash) + } + blocks, _, _, _ := core.GenerateVerkleChain(b.chain.Config(), parent, b.chain.Engine(), b.db, 1, func(i int, gen *core.BlockGen) { + var addr = make([]byte, common.AddressLength) + rand.Read(addr) + gen.SetCoinbase(common.BytesToAddress(addr)) + }) + return blocks[0] +} + func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { var tx *types.Transaction gasPrice := big.NewInt(10 * params.InitialBaseFee) diff --git a/params/verkle_params.go b/params/verkle_params.go new file mode 100644 index 000000000000..93d4f7cd6476 --- /dev/null +++ b/params/verkle_params.go @@ -0,0 +1,36 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package params + +// Verkle tree EIP: costs associated to witness accesses +var ( + WitnessBranchReadCost uint64 = 1900 + WitnessChunkReadCost uint64 = 200 + WitnessBranchWriteCost uint64 = 3000 + WitnessChunkWriteCost uint64 = 500 + WitnessChunkFillCost uint64 = 6200 +) + +// ClearVerkleWitnessCosts sets all witness costs to 0, which is necessary +// for historical block replay simulations. +func ClearVerkleWitnessCosts() { + WitnessBranchReadCost = 0 + WitnessChunkReadCost = 0 + WitnessBranchWriteCost = 0 + WitnessChunkWriteCost = 0 + WitnessChunkFillCost = 0 +} diff --git a/trie/database.go b/trie/database.go index 49a884fd7f39..5989e11481af 100644 --- a/trie/database.go +++ b/trie/database.go @@ -18,6 +18,7 @@ package trie import ( "errors" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" @@ -75,6 +76,10 @@ type Database struct { diskdb ethdb.Database // Persistent database to store the snapshot preimages *preimageStore // The store for caching preimages backend backend // The backend for managing trie nodes + + // Items used for root conversion during the verkle transition + addrToRoot map[common.Address]common.Hash + addrToRootLock sync.RWMutex } // prepare initializes the database with provided configs, but the @@ -240,3 +245,37 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { } return hdb.Node(hash) } + +func (db *Database) HasStorageRootConversion(addr common.Address) bool { + db.addrToRootLock.RLock() + defer db.addrToRootLock.RUnlock() + if db.addrToRoot == nil { + return false + } + _, ok := db.addrToRoot[addr] + return ok +} + +func (db *Database) SetStorageRootConversion(addr common.Address, root common.Hash) { + db.addrToRootLock.Lock() + defer db.addrToRootLock.Unlock() + if db.addrToRoot == nil { + db.addrToRoot = make(map[common.Address]common.Hash) + } + db.addrToRoot[addr] = root +} + +func (db *Database) StorageRootConversion(addr common.Address) common.Hash { + db.addrToRootLock.RLock() + defer db.addrToRootLock.RUnlock() + if db.addrToRoot == nil { + return common.Hash{} + } + return db.addrToRoot[addr] +} + +func (db *Database) ClearStorageRootConversion(addr common.Address) { + db.addrToRootLock.Lock() + defer db.addrToRootLock.Unlock() + delete(db.addrToRoot, addr) +} diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 7f0685e30666..f4a999c2f68f 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -288,3 +288,7 @@ func (t *StateTrie) getSecKeyCache() map[string][]byte { } return t.secKeyCache } + +func (t *StateTrie) IsVerkle() bool { + return false +} diff --git a/trie/transition.go b/trie/transition.go new file mode 100644 index 000000000000..514d3e99825b --- /dev/null +++ b/trie/transition.go @@ -0,0 +1,201 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/gballet/go-verkle" +) + +type TransitionTrie struct { + overlay *VerkleTrie + base *SecureTrie + storage bool +} + +func NewTransitionTree(base *SecureTrie, overlay *VerkleTrie, st bool) *TransitionTrie { + return &TransitionTrie{ + overlay: overlay, + base: base, + storage: st, + } +} + +func (t *TransitionTrie) Base() *SecureTrie { + return t.base +} + +// TODO(gballet/jsign): consider removing this API. +func (t *TransitionTrie) Overlay() *VerkleTrie { + return t.overlay +} + +// GetKey returns the sha3 preimage of a hashed key that was previously used +// to store a value. +// +// TODO(fjl): remove this when StateTrie is removed +func (t *TransitionTrie) GetKey(key []byte) []byte { + if key := t.overlay.GetKey(key); key != nil { + return key + } + return t.base.GetKey(key) +} + +// Get returns the value for key stored in the trie. The value bytes must +// not be modified by the caller. If a node was not found in the database, a +// trie.MissingNodeError is returned. +func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) { + if val, err := t.overlay.GetStorage(addr, key); len(val) != 0 || err != nil { + return val, nil + } + // TODO also insert value into overlay + rlpval, err := t.base.GetStorage(addr, key) + if err != nil { + return nil, err + } + if len(rlpval) == 0 { + return nil, nil + } + // the value will come as RLP, decode it so that the + // interface is consistent. + _, content, _, err := rlp.Split(rlpval) + if err != nil || len(content) == 0 { + return nil, err + } + var v [32]byte + copy(v[32-len(content):], content) + return v[:], nil +} + +// GetAccount abstract an account read from the trie. +func (t *TransitionTrie) GetAccount(address common.Address) (*types.StateAccount, error) { + data, err := t.overlay.GetAccount(address) + if err != nil { + // WORKAROUND, see the definition of errDeletedAccount + // for an explainer of why this if is needed. + if err == errDeletedAccount { + return nil, nil + } + return nil, err + } + if data != nil { + if t.overlay.db.HasStorageRootConversion(address) { + data.Root = t.overlay.db.StorageRootConversion(address) + } + return data, nil + } + // TODO also insert value into overlay + return t.base.GetAccount(address) +} + +// Update associates key with value in the trie. If value has length zero, any +// existing value is deleted from the trie. The value bytes must not be modified +// by the caller while they are stored in the trie. If a node was not found in the +// database, a trie.MissingNodeError is returned. +func (t *TransitionTrie) UpdateStorage(address common.Address, key []byte, value []byte) error { + return t.overlay.UpdateStorage(address, key, value) +} + +// UpdateAccount abstract an account write to the trie. +func (t *TransitionTrie) UpdateAccount(addr common.Address, account *types.StateAccount) error { + if account.Root != (common.Hash{}) && account.Root != types.EmptyRootHash { + t.overlay.db.SetStorageRootConversion(addr, account.Root) + } + return t.overlay.UpdateAccount(addr, account) +} + +// Delete removes any existing value for key from the trie. If a node was not +// found in the database, a trie.MissingNodeError is returned. +func (t *TransitionTrie) DeleteStorage(addr common.Address, key []byte) error { + return t.overlay.DeleteStorage(addr, key) +} + +// DeleteAccount abstracts an account deletion from the trie. +func (t *TransitionTrie) DeleteAccount(key common.Address) error { + return t.overlay.DeleteAccount(key) +} + +// Hash returns the root hash of the trie. It does not write to the database and +// can be used even if the trie doesn't have one. +func (t *TransitionTrie) Hash() common.Hash { + return t.overlay.Hash() +} + +// Commit collects all dirty nodes in the trie and replace them with the +// corresponding node hash. All collected nodes(including dirty leaves if +// collectLeaf is true) will be encapsulated into a nodeset for return. +// The returned nodeset can be nil if the trie is clean(nothing to commit). +// Once the trie is committed, it's not usable anymore. A new trie must +// be created with new root and updated trie database for following usage +func (t *TransitionTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { + // Just return if the trie is a storage trie: otherwise, + // the overlay trie will be committed as many times as + // there are storage tries. This would kill performance. + if t.storage { + return common.Hash{}, nil, nil + } + return t.overlay.Commit(collectLeaf) +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration +// starts at the key after the given start key. +func (t *TransitionTrie) NodeIterator(startKey []byte) (NodeIterator, error) { + panic("not implemented") // TODO: Implement +} + +// Prove constructs a Merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root), ending +// with the node that proves the absence of the key. +func (t *TransitionTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { + panic("not implemented") // TODO: Implement +} + +// IsVerkle returns true if the trie is verkle-tree based +func (t *TransitionTrie) IsVerkle() bool { + // For all intents and purposes, the calling code should treat this as a verkle trie + return true +} + +func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error { + trie := t.overlay + switch root := trie.root.(type) { + case *verkle.InternalNode: + return root.InsertStem(key, values, t.overlay.flatdbNodeResolver) + default: + panic("invalid root type") + } +} + +func (t *TransitionTrie) Copy() *TransitionTrie { + return &TransitionTrie{ + overlay: t.overlay.Copy(), + base: t.base.Copy(), + storage: t.storage, + } +} + +func (t *TransitionTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error { + return t.overlay.UpdateContractCode(addr, codeHash, code) +} diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go new file mode 100644 index 000000000000..c06c189b99b2 --- /dev/null +++ b/trie/utils/verkle.go @@ -0,0 +1,290 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "math/big" + "sync" + + "github.com/crate-crypto/go-ipa/bandersnatch/fr" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" +) + +const ( + VersionLeafKey = 0 + BalanceLeafKey = 1 + NonceLeafKey = 2 + CodeKeccakLeafKey = 3 + CodeSizeLeafKey = 4 +) + +var ( + zero = uint256.NewInt(0) + HeaderStorageOffset = uint256.NewInt(64) + CodeOffset = uint256.NewInt(128) + MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(256), 31) + VerkleNodeWidth = uint256.NewInt(256) + codeStorageDelta = uint256.NewInt(0).Sub(CodeOffset, HeaderStorageOffset) + + // BigInt versions of the above. + headerStorageOffsetBig = HeaderStorageOffset.ToBig() + mainStorageOffsetBig = MainStorageOffset.ToBig() + verkleNodeWidthBig = VerkleNodeWidth.ToBig() + codeStorageDeltaBig = codeStorageDelta.ToBig() + + getTreePolyIndex0Point *verkle.Point +) + +type PointCache struct { + cache map[string]*verkle.Point + lock sync.RWMutex +} + +func NewPointCache() *PointCache { + return &PointCache{ + cache: make(map[string]*verkle.Point), + } +} + +func (pc *PointCache) GetTreeKeyHeader(addr []byte) *verkle.Point { + pc.lock.RLock() + point, ok := pc.cache[string(addr)] + pc.lock.RUnlock() + if ok { + return point + } + + point = EvaluateAddressPoint(addr) + pc.lock.Lock() + pc.cache[string(addr)] = point + pc.lock.Unlock() + return point +} + +func (pc *PointCache) GetTreeKeyVersionCached(addr []byte) []byte { + p := pc.GetTreeKeyHeader(addr) + v := PointToHash(p, VersionLeafKey) + return v[:] +} + +func init() { + // The byte array is the Marshalled output of the point computed as such: + //cfg, _ := verkle.GetConfig() + //verkle.FromLEBytes(&getTreePolyIndex0Fr[0], []byte{2, 64}) + //= cfg.CommitToPoly(getTreePolyIndex0Fr[:], 1) + getTreePolyIndex0Point = new(verkle.Point) + err := getTreePolyIndex0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191}) + if err != nil { + panic(err) + } +} + +// GetTreeKey performs both the work of the spec's get_tree_key function, and that +// of pedersen_hash: it builds the polynomial in pedersen_hash without having to +// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte +// array. Since at most the first 5 coefficients of the polynomial will be non-zero, +// these 5 coefficients are created directly. +func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + + // poly = [2+256*64, address_le_low, address_le_high, tree_index_le_low, tree_index_le_high] + var poly [5]fr.Element + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + // treeIndex must be interpreted as a 32-byte aligned little-endian integer. + // e.g: if treeIndex is 0xAABBCC, we need the byte representation to be 0xCCBBAA00...00. + // poly[3] = LE({CC,BB,AA,00...0}) (16 bytes), poly[4]=LE({00,00,...}) (16 bytes). + // + // To avoid unnecessary endianness conversions for go-ipa, we do some trick: + // - poly[3]'s byte representation is the same as the *top* 16 bytes (trieIndexBytes[16:]) of + // 32-byte aligned big-endian representation (BE({00,...,AA,BB,CC})). + // - poly[4]'s byte representation is the same as the *low* 16 bytes (trieIndexBytes[:16]) of + // the 32-byte aligned big-endian representation (BE({00,00,...}). + trieIndexBytes := treeIndex.Bytes32() + verkle.FromBytes(&poly[3], trieIndexBytes[16:]) + verkle.FromBytes(&poly[4], trieIndexBytes[:16]) + + cfg := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point corresponding to poly[0]=[2+256*64]. + ret.Add(ret, getTreePolyIndex0Point) + + return PointToHash(ret, subIndex) +} + +func GetTreeKeyAccountLeaf(address []byte, leaf byte) []byte { + return GetTreeKey(address, zero, leaf) +} + +func GetTreeKeyVersion(address []byte) []byte { + return GetTreeKey(address, zero, VersionLeafKey) +} + +func GetTreeKeyVersionWithEvaluatedAddress(addrp *verkle.Point) []byte { + return getTreeKeyWithEvaluatedAddess(addrp, zero, VersionLeafKey) +} + +func GetTreeKeyBalance(address []byte) []byte { + return GetTreeKey(address, zero, BalanceLeafKey) +} + +func GetTreeKeyNonce(address []byte) []byte { + return GetTreeKey(address, zero, NonceLeafKey) +} + +func GetTreeKeyCodeKeccak(address []byte) []byte { + return GetTreeKey(address, zero, CodeKeccakLeafKey) +} + +func GetTreeKeyCodeSize(address []byte) []byte { + return GetTreeKey(address, zero, CodeSizeLeafKey) +} + +func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth) + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = byte(subIndexMod[0]) + } + return GetTreeKey(address, treeIndex, subIndex) +} + +func GetTreeKeyCodeChunkWithEvaluatedAddress(addressPoint *verkle.Point, chunk *uint256.Int) []byte { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth) + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = byte(subIndexMod[0]) + } + return getTreeKeyWithEvaluatedAddess(addressPoint, treeIndex, subIndex) +} + +func GetTreeKeyStorageSlot(address []byte, storageKey *uint256.Int) []byte { + pos := storageKey.Clone() + if storageKey.Cmp(codeStorageDelta) < 0 { + pos.Add(HeaderStorageOffset, storageKey) + } else { + pos.Add(MainStorageOffset, storageKey) + } + treeIndex := new(uint256.Int).Div(pos, VerkleNodeWidth) + + // calculate the sub_index, i.e. the index in the stem tree. + // Because the modulus is 256, it's the last byte of treeIndex + subIndexMod := new(uint256.Int).Mod(pos, VerkleNodeWidth) + var subIndex byte + if len(subIndexMod) != 0 { + // uint256 is broken into 4 little-endian quads, + // each with native endianness. Extract the least + // significant byte. + subIndex = byte(subIndexMod[0]) + } + return GetTreeKey(address, treeIndex, subIndex) +} + +func PointToHash(evaluated *verkle.Point, suffix byte) []byte { + // The output of Byte() is big engian for banderwagon. This + // introduces an imbalance in the tree, because hashes are + // elements of a 253-bit field. This means more than half the + // tree would be empty. To avoid this problem, use a little + // endian commitment and chop the MSB. + retb := evaluated.Bytes() + for i := 0; i < 16; i++ { + retb[31-i], retb[i] = retb[i], retb[31-i] + } + retb[31] = suffix + return retb[:] +} + +func getTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte { + var poly [5]fr.Element + + poly[0].SetZero() + poly[1].SetZero() + poly[2].SetZero() + + // little-endian, 32-byte aligned treeIndex + var index [32]byte + for i, b := range treeIndex.Bytes() { + index[len(treeIndex.Bytes())-1-i] = b + } + verkle.FromLEBytes(&poly[3], index[:16]) + verkle.FromLEBytes(&poly[4], index[16:]) + + cfg := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add the pre-evaluated address + ret.Add(ret, evaluated) + + return PointToHash(ret, subIndex) +} + +func EvaluateAddressPoint(address []byte) *verkle.Point { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + var poly [3]fr.Element + + poly[0].SetZero() + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + cfg := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point + ret.Add(ret, getTreePolyIndex0Point) + + return ret +} + +func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte { + // Note that `pos` must be a big.Int and not a uint256.Int, because the subsequent + // arithmetics operations could overflow. (e.g: imagine if storageKey is 2^256-1) + pos := new(big.Int).SetBytes(storageKey) + if pos.Cmp(codeStorageDeltaBig) < 0 { + pos.Add(headerStorageOffsetBig, pos) + } else { + pos.Add(mainStorageOffsetBig, pos) + } + treeIndex, overflow := uint256.FromBig(big.NewInt(0).Div(pos, verkleNodeWidthBig)) + if overflow { // Must never happen considering the EIP definition. + panic("tree index overflow") + } + // calculate the sub_index, i.e. the index in the stem tree. + // Because the modulus is 256, it's the last byte of treeIndex + posBytes := pos.Bytes() + subIndex := posBytes[len(posBytes)-1] + + return getTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex) +} diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go new file mode 100644 index 000000000000..744df9df26ac --- /dev/null +++ b/trie/utils/verkle_test.go @@ -0,0 +1,95 @@ +// Copyright 2022 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "crypto/sha256" + "encoding/hex" + "math/big" + "math/rand" + "testing" + + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" +) + +func TestGetTreeKey(t *testing.T) { + var addr [32]byte + for i := 0; i < 16; i++ { + addr[1+2*i] = 0xff + } + n := uint256.NewInt(1) + n = n.Lsh(n, 129) + n.Add(n, uint256.NewInt(3)) + tk := GetTreeKey(addr[:], n, 1) + + got := hex.EncodeToString(tk) + exp := "f42f932f43faf5d14b292b9009c45c28da61dbf66e20dbedc2e02dfd64ff5a01" + if got != exp { + t.Fatalf("Generated trie key is incorrect: %s != %s", got, exp) + } +} + +func TestConstantPoint(t *testing.T) { + var expectedPoly [1]verkle.Fr + + cfg := verkle.GetConfig() + verkle.FromLEBytes(&expectedPoly[0], []byte{2, 64}) + expected := cfg.CommitToPoly(expectedPoly[:], 1) + + if !verkle.Equal(expected, getTreePolyIndex0Point) { + t.Fatalf("Marshalled constant value is incorrect: %x != %x", expected.Bytes(), getTreePolyIndex0Point.Bytes()) + } +} + +func BenchmarkPedersenHash(b *testing.B) { + var addr, v [32]byte + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + rand.Read(v[:]) + rand.Read(addr[:]) + GetTreeKeyCodeSize(addr[:]) + } +} + +func sha256GetTreeKeyCodeSize(addr []byte) []byte { + digest := sha256.New() + digest.Write(addr) + treeIndexBytes := new(big.Int).Bytes() + var payload [32]byte + copy(payload[:len(treeIndexBytes)], treeIndexBytes) + digest.Write(payload[:]) + h := digest.Sum(nil) + h[31] = CodeKeccakLeafKey + return h +} + +func BenchmarkSha256Hash(b *testing.B) { + var addr, v [32]byte + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + rand.Read(v[:]) + rand.Read(addr[:]) + sha256GetTreeKeyCodeSize(addr[:]) + } +} diff --git a/trie/verkle.go b/trie/verkle.go new file mode 100644 index 000000000000..4a87bc88ce4d --- /dev/null +++ b/trie/verkle.go @@ -0,0 +1,482 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" +) + +// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie +// interface so that Verkle trees can be reused verbatim. +type VerkleTrie struct { + root verkle.VerkleNode + db *Database + pointCache *utils.PointCache + ended bool +} + +func (vt *VerkleTrie) ToDot() string { + return verkle.ToDot(vt.root) +} + +func NewVerkleTrie(root verkle.VerkleNode, db *Database, pointCache *utils.PointCache, ended bool) *VerkleTrie { + return &VerkleTrie{ + root: root, + db: db, + pointCache: pointCache, + ended: ended, + } +} + +func (trie *VerkleTrie) flatdbNodeResolver(path []byte) ([]byte, error) { + return trie.db.diskdb.Get(append(FlatDBVerkleNodeKeyPrefix, path...)) +} + +func (trie *VerkleTrie) InsertMigratedLeaves(leaves []verkle.LeafNode) error { + return trie.root.(*verkle.InternalNode).InsertMigratedLeaves(leaves, trie.flatdbNodeResolver) +} + +var ( + errInvalidProof = errors.New("invalid proof") + errInvalidRootType = errors.New("invalid node type for root") + + // WORKAROUND: this special error is returned if it has been + // detected that the account was deleted in the verkle tree. + // This is needed in case an account was translated while it + // was in the MPT, and was selfdestructed in verkle mode. + // + // This is only a problem for replays, and this code is not + // needed after SELFDESTRUCT has been removed. + errDeletedAccount = errors.New("account deleted in VKT") + + FlatDBVerkleNodeKeyPrefix = []byte("flat-") // prefix for flatdb keys +) + +// GetKey returns the sha3 preimage of a hashed key that was previously used +// to store a value. +func (trie *VerkleTrie) GetKey(key []byte) []byte { + return key +} + +// Get returns the value for key stored in the trie. The value bytes must +// not be modified by the caller. If a node was not found in the database, a +// trie.MissingNodeError is returned. +func (trie *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) { + pointEval := trie.pointCache.GetTreeKeyHeader(addr[:]) + k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(pointEval, key) + return trie.root.Get(k, trie.flatdbNodeResolver) +} + +// GetWithHashedKey returns the value, assuming that the key has already +// been hashed. +func (trie *VerkleTrie) GetWithHashedKey(key []byte) ([]byte, error) { + return trie.root.Get(key, trie.flatdbNodeResolver) +} + +func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error) { + acc := &types.StateAccount{} + versionkey := t.pointCache.GetTreeKeyVersionCached(addr[:]) + var ( + values [][]byte + err error + ) + switch t.root.(type) { + case *verkle.InternalNode: + values, err = t.root.(*verkle.InternalNode).GetStem(versionkey[:31], t.flatdbNodeResolver) + default: + return nil, errInvalidRootType + } + if err != nil { + return nil, fmt.Errorf("GetAccount (%x) error: %v", addr, err) + } + + if values == nil { + return nil, nil + } + if len(values[utils.NonceLeafKey]) > 0 { + acc.Nonce = binary.LittleEndian.Uint64(values[utils.NonceLeafKey]) + } + // if the account has been deleted, then values[10] will be 0 and not nil. If it has + // been recreated after that, then its code keccak will NOT be 0. So return `nil` if + // the nonce, and values[10], and code keccak is 0. + + if acc.Nonce == 0 && len(values) > 10 && len(values[10]) > 0 && bytes.Equal(values[utils.CodeKeccakLeafKey], zero[:]) { + if !t.ended { + return nil, errDeletedAccount + } else { + return nil, nil + } + } + var balance [32]byte + copy(balance[:], values[utils.BalanceLeafKey]) + for i := 0; i < len(balance)/2; i++ { + balance[len(balance)-i-1], balance[i] = balance[i], balance[len(balance)-i-1] + } + // var balance [32]byte + // if len(values[utils.BalanceLeafKey]) > 0 { + // for i := 0; i < len(balance); i++ { + // balance[len(balance)-i-1] = values[utils.BalanceLeafKey][i] + // } + // } + acc.Balance = new(big.Int).SetBytes(balance[:]) + acc.CodeHash = values[utils.CodeKeccakLeafKey] + // TODO fix the code size as well + + return acc, nil +} + +var zero [32]byte + +func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) error { + var ( + err error + nonce, balance [32]byte + values = make([][]byte, verkle.NodeWidth) + stem = t.pointCache.GetTreeKeyVersionCached(addr[:]) + ) + + // Only evaluate the polynomial once + values[utils.VersionLeafKey] = zero[:] + values[utils.NonceLeafKey] = nonce[:] + values[utils.BalanceLeafKey] = balance[:] + values[utils.CodeKeccakLeafKey] = acc.CodeHash[:] + + binary.LittleEndian.PutUint64(nonce[:], acc.Nonce) + bbytes := acc.Balance.Bytes() + if len(bbytes) > 0 { + for i, b := range bbytes { + balance[len(bbytes)-i-1] = b + } + } + + switch root := t.root.(type) { + case *verkle.InternalNode: + err = root.InsertStem(stem, values, t.flatdbNodeResolver) + default: + return errInvalidRootType + } + if err != nil { + return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err) + } + // TODO figure out if the code size needs to be updated, too + + return nil +} + +func (trie *VerkleTrie) UpdateStem(key []byte, values [][]byte) error { + switch root := trie.root.(type) { + case *verkle.InternalNode: + return root.InsertStem(key, values, trie.flatdbNodeResolver) + default: + panic("invalid root type") + } +} + +// Update associates key with value in the trie. If value has length zero, any +// existing value is deleted from the trie. The value bytes must not be modified +// by the caller while they are stored in the trie. If a node was not found in the +// database, a trie.MissingNodeError is returned. +func (trie *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error { + k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(trie.pointCache.GetTreeKeyHeader(address[:]), key) + var v [32]byte + copy(v[:], value[:]) + return trie.root.Insert(k, v[:], trie.flatdbNodeResolver) +} + +func (t *VerkleTrie) DeleteAccount(addr common.Address) error { + var ( + err error + values = make([][]byte, verkle.NodeWidth) + stem = t.pointCache.GetTreeKeyVersionCached(addr[:]) + ) + + for i := 0; i < verkle.NodeWidth; i++ { + values[i] = zero[:] + } + + switch root := t.root.(type) { + case *verkle.InternalNode: + err = root.InsertStem(stem, values, t.flatdbNodeResolver) + default: + return errInvalidRootType + } + if err != nil { + return fmt.Errorf("DeleteAccount (%x) error: %v", addr, err) + } + // TODO figure out if the code size needs to be updated, too + + return nil +} + +// Delete removes any existing value for key from the trie. If a node was not +// found in the database, a trie.MissingNodeError is returned. +func (trie *VerkleTrie) DeleteStorage(addr common.Address, key []byte) error { + pointEval := trie.pointCache.GetTreeKeyHeader(addr[:]) + k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(pointEval, key) + var zero [32]byte + return trie.root.Insert(k, zero[:], trie.flatdbNodeResolver) +} + +// Hash returns the root hash of the trie. It does not write to the database and +// can be used even if the trie doesn't have one. +func (trie *VerkleTrie) Hash() common.Hash { + return trie.root.Commit().Bytes() +} + +func nodeToDBKey(n verkle.VerkleNode) []byte { + ret := n.Commitment().Bytes() + return ret[:] +} + +// Commit writes all nodes to the trie's memory database, tracking the internal +// and external (for account tries) references. +func (trie *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet, error) { + root, ok := trie.root.(*verkle.InternalNode) + if !ok { + return common.Hash{}, nil, errors.New("unexpected root node type") + } + nodes, err := root.BatchSerialize() + if err != nil { + return common.Hash{}, nil, fmt.Errorf("serializing tree nodes: %s", err) + } + + batch := trie.db.diskdb.NewBatch() + path := make([]byte, 0, len(FlatDBVerkleNodeKeyPrefix)+32) + path = append(path, FlatDBVerkleNodeKeyPrefix...) + for _, node := range nodes { + path := append(path[:len(FlatDBVerkleNodeKeyPrefix)], node.Path...) + + if err := batch.Put(path, node.SerializedBytes); err != nil { + return common.Hash{}, nil, fmt.Errorf("put node to disk: %s", err) + } + + if batch.ValueSize() >= ethdb.IdealBatchSize { + batch.Write() + batch.Reset() + } + } + batch.Write() + + // Serialize root commitment form + rootH := root.Hash().BytesLE() + return common.BytesToHash(rootH[:]), nil, nil +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration +// starts at the key after the given start key. +func (trie *VerkleTrie) NodeIterator(startKey []byte) (NodeIterator, error) { + return newVerkleNodeIterator(trie, nil) +} + +// Prove constructs a Merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root), ending +// with the node that proves the absence of the key. +func (trie *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { + panic("not implemented") +} + +func (trie *VerkleTrie) Copy() *VerkleTrie { + return &VerkleTrie{ + root: trie.root.Copy(), + db: trie.db, + } +} + +func (trie *VerkleTrie) IsVerkle() bool { + return true +} + +func (trie *VerkleTrie) ProveAndSerialize(keys [][]byte, kv map[string][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) { + proof, _, _, _, err := verkle.MakeVerkleMultiProof(trie.root, keys) + if err != nil { + return nil, nil, err + } + + p, kvps, err := verkle.SerializeProof(proof) + if err != nil { + return nil, nil, err + } + + return p, kvps, nil +} + +type set = map[string]struct{} + +func addKey(s set, key []byte) { + s[string(key)] = struct{}{} +} + +func DeserializeAndVerifyVerkleProof(vp *verkle.VerkleProof, root []byte, statediff verkle.StateDiff) error { + rootC := new(verkle.Point) + rootC.SetBytesTrusted(root) + proof, cis, indices, yis, err := deserializeVerkleProof(vp, rootC, statediff) + if err != nil { + return fmt.Errorf("could not deserialize proof: %w", err) + } + cfg := verkle.GetConfig() + if !verkle.VerifyVerkleProof(proof, cis, indices, yis, cfg) { + return errInvalidProof + } + + return nil +} + +func deserializeVerkleProof(vp *verkle.VerkleProof, rootC *verkle.Point, statediff verkle.StateDiff) (*verkle.Proof, []*verkle.Point, []byte, []*verkle.Fr, error) { + var others set = set{} // Mark when an "other" stem has been seen + + proof, err := verkle.DeserializeProof(vp, statediff) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("verkle proof deserialization error: %w", err) + } + + for _, stem := range proof.PoaStems { + addKey(others, stem) + } + + if len(proof.Keys) != len(proof.Values) { + return nil, nil, nil, nil, fmt.Errorf("keys and values are of different length %d != %d", len(proof.Keys), len(proof.Values)) + } + + tree, err := verkle.TreeFromProof(proof, rootC) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("error rebuilding the tree from proof: %w", err) + } + for _, stemdiff := range statediff { + for _, suffixdiff := range stemdiff.SuffixDiffs { + var key [32]byte + copy(key[:31], stemdiff.Stem[:]) + key[31] = suffixdiff.Suffix + + val, err := tree.Get(key[:], nil) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("could not find key %x in tree rebuilt from proof: %w", key, err) + } + if len(val) > 0 { + if !bytes.Equal(val, suffixdiff.CurrentValue[:]) { + return nil, nil, nil, nil, fmt.Errorf("could not find correct value at %x in tree rebuilt from proof: %x != %x", key, val, *suffixdiff.CurrentValue) + } + } else { + if suffixdiff.CurrentValue != nil && len(suffixdiff.CurrentValue) != 0 { + return nil, nil, nil, nil, fmt.Errorf("could not find correct value at %x in tree rebuilt from proof: %x != %x", key, val, *suffixdiff.CurrentValue) + } + } + } + } + + // no need to resolve as the tree has been reconstructed from the proof + // and must not contain any unresolved nodes. + pe, _, _, err := tree.GetProofItems(proof.Keys) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("could not get proof items from tree rebuilt from proof: %w", err) + } + + return proof, pe.Cis, pe.Zis, pe.Yis, err +} + +// ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which +// are actual code, and 1 byte is the pushdata offset). +type ChunkedCode []byte + +// Copy the values here so as to avoid an import cycle +const ( + PUSH1 = byte(0x60) + PUSH3 = byte(0x62) + PUSH4 = byte(0x63) + PUSH7 = byte(0x66) + PUSH21 = byte(0x74) + PUSH30 = byte(0x7d) + PUSH32 = byte(0x7f) +) + +// ChunkifyCode generates the chunked version of an array representing EVM bytecode +func ChunkifyCode(code []byte) ChunkedCode { + var ( + chunkOffset = 0 // offset in the chunk + chunkCount = len(code) / 31 + codeOffset = 0 // offset in the code + ) + if len(code)%31 != 0 { + chunkCount++ + } + chunks := make([]byte, chunkCount*32) + for i := 0; i < chunkCount; i++ { + // number of bytes to copy, 31 unless + // the end of the code has been reached. + end := 31 * (i + 1) + if len(code) < end { + end = len(code) + } + + // Copy the code itself + copy(chunks[i*32+1:], code[31*i:end]) + + // chunk offset = taken from the + // last chunk. + if chunkOffset > 31 { + // skip offset calculation if push + // data covers the whole chunk + chunks[i*32] = 31 + chunkOffset = 1 + continue + } + chunks[32*i] = byte(chunkOffset) + chunkOffset = 0 + + // Check each instruction and update the offset + // it should be 0 unless a PUSHn overflows. + for ; codeOffset < end; codeOffset++ { + if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 { + codeOffset += int(code[codeOffset] - PUSH1 + 1) + if codeOffset+1 >= 31*(i+1) { + codeOffset++ + chunkOffset = codeOffset - 31*(i+1) + break + } + } + } + } + + return chunks +} + +func (t *VerkleTrie) SetStorageRootConversion(addr common.Address, root common.Hash) { + t.db.SetStorageRootConversion(addr, root) +} + +func (t *VerkleTrie) ClearStrorageRootConversion(addr common.Address) { + t.db.ClearStorageRootConversion(addr) +} + +func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error { + // XXX a copier depuis statedb/state_object + return nil +} diff --git a/trie/verkle_iterator.go b/trie/verkle_iterator.go new file mode 100644 index 000000000000..c5f59a0f5937 --- /dev/null +++ b/trie/verkle_iterator.go @@ -0,0 +1,218 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/gballet/go-verkle" +) + +type verkleNodeIteratorState struct { + Node verkle.VerkleNode + Index int +} + +type verkleNodeIterator struct { + trie *VerkleTrie + current verkle.VerkleNode + lastErr error + + stack []verkleNodeIteratorState +} + +func newVerkleNodeIterator(trie *VerkleTrie, start []byte) (NodeIterator, error) { + if trie.Hash() == zero { + return new(nodeIterator), nil + } + it := &verkleNodeIterator{trie: trie, current: trie.root} + // it.err = it.seek(start) + return it, nil +} + +// Next moves the iterator to the next node. If the parameter is false, any child +// nodes will be skipped. +func (it *verkleNodeIterator) Next(descend bool) bool { + if it.lastErr == errIteratorEnd { + it.lastErr = errIteratorEnd + return false + } + + if len(it.stack) == 0 { + it.stack = append(it.stack, verkleNodeIteratorState{Node: it.trie.root, Index: 0}) + it.current = it.trie.root + + return true + } + + switch node := it.current.(type) { + case *verkle.InternalNode: + context := &it.stack[len(it.stack)-1] + + // Look for the next non-empty child + children := node.Children() + for ; context.Index < len(children); context.Index++ { + if _, ok := children[context.Index].(verkle.Empty); !ok { + it.stack = append(it.stack, verkleNodeIteratorState{Node: children[context.Index], Index: 0}) + it.current = children[context.Index] + return it.Next(descend) + } + } + + // Reached the end of this node, go back to the parent, if + // this isn't root. + if len(it.stack) == 1 { + it.lastErr = errIteratorEnd + return false + } + it.stack = it.stack[:len(it.stack)-1] + it.current = it.stack[len(it.stack)-1].Node + it.stack[len(it.stack)-1].Index++ + return it.Next(descend) + case *verkle.LeafNode: + // Look for the next non-empty value + for i := it.stack[len(it.stack)-1].Index; i < 256; i++ { + if node.Value(i) != nil { + it.stack[len(it.stack)-1].Index = i + 1 + return true + } + } + + // go back to parent to get the next leaf + it.stack = it.stack[:len(it.stack)-1] + it.current = it.stack[len(it.stack)-1].Node + it.stack[len(it.stack)-1].Index++ + return it.Next(descend) + case *verkle.HashedNode: + // resolve the node + data, err := it.trie.db.diskdb.Get(nodeToDBKey(node)) + if err != nil { + panic(err) + } + it.current, err = verkle.ParseNode(data, byte(len(it.stack)-1)) + if err != nil { + panic(err) + } + + // update the stack and parent with the resolved node + it.stack[len(it.stack)-1].Node = it.current + parent := &it.stack[len(it.stack)-2] + parent.Node.(*verkle.InternalNode).SetChild(parent.Index, it.current) + return true + default: + panic("invalid node type") + } +} + +// Error returns the error status of the iterator. +func (it *verkleNodeIterator) Error() error { + if it.lastErr == errIteratorEnd { + return nil + } + return it.lastErr +} + +// Hash returns the hash of the current node. +func (it *verkleNodeIterator) Hash() common.Hash { + return it.current.Commit().Bytes() +} + +// Parent returns the hash of the parent of the current node. The hash may be the one +// grandparent if the immediate parent is an internal node with no hash. +func (it *verkleNodeIterator) Parent() common.Hash { + return it.stack[len(it.stack)-1].Node.Commit().Bytes() +} + +// Path returns the hex-encoded path to the current node. +// Callers must not retain references to the return value after calling Next. +// For leaf nodes, the last element of the path is the 'terminator symbol' 0x10. +func (it *verkleNodeIterator) Path() []byte { + if it.Leaf() { + return it.LeafKey() + } + var path []byte + for i, state := range it.stack { + // skip the last byte + if i <= len(it.stack)-1 { + break + } + path = append(path, byte(state.Index)) + } + return path +} + +func (it *verkleNodeIterator) NodeBlob() []byte { + panic("not completely implemented") +} + +// Leaf returns true iff the current node is a leaf node. +func (it *verkleNodeIterator) Leaf() bool { + _, ok := it.current.(*verkle.LeafNode) + return ok +} + +// LeafKey returns the key of the leaf. The method panics if the iterator is not +// positioned at a leaf. Callers must not retain references to the value after +// calling Next. +func (it *verkleNodeIterator) LeafKey() []byte { + leaf, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("Leaf() called on an verkle node iterator not at a leaf location") + } + + return leaf.Key(it.stack[len(it.stack)-1].Index - 1) +} + +// LeafBlob returns the content of the leaf. The method panics if the iterator +// is not positioned at a leaf. Callers must not retain references to the value +// after calling Next. +func (it *verkleNodeIterator) LeafBlob() []byte { + leaf, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("LeafBlob() called on an verkle node iterator not at a leaf location") + } + + return leaf.Value(it.stack[len(it.stack)-1].Index - 1) +} + +// LeafProof returns the Merkle proof of the leaf. The method panics if the +// iterator is not positioned at a leaf. Callers must not retain references +// to the value after calling Next. +func (it *verkleNodeIterator) LeafProof() [][]byte { + _, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("LeafProof() called on an verkle node iterator not at a leaf location") + } + + // return it.trie.Prove(leaf.Key()) + panic("not completely implemented") +} + +// AddResolver sets an intermediate database to use for looking up trie nodes +// before reaching into the real persistent layer. +// +// This is not required for normal operation, rather is an optimization for +// cases where trie nodes can be recovered from some external mechanism without +// reading from disk. In those cases, this resolver allows short circuiting +// accesses and returning them from memory. +// +// Before adding a similar mechanism to any other place in Geth, consider +// making trie.Database an interface and wrapping at that level. It's a huge +// refactor, but it could be worth it if another occurrence arises. +func (it *verkleNodeIterator) AddResolver(NodeResolver) { + // Not implemented, but should not panic +} diff --git a/trie/verkle_iterator_test.go b/trie/verkle_iterator_test.go new file mode 100644 index 000000000000..1fd3fd76a6d9 --- /dev/null +++ b/trie/verkle_iterator_test.go @@ -0,0 +1,68 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" +) + +func TestVerkleIterator(t *testing.T) { + trie := NewVerkleTrie(verkle.New(), NewDatabase(rawdb.NewMemoryDatabase()), utils.NewPointCache(), true) + account0 := &types.StateAccount{ + Nonce: 1, + Balance: big.NewInt(2), + Root: types.EmptyRootHash, + CodeHash: nil, + } + // NOTE: the code size isn't written to the trie via TryUpdateAccount + // so it will be missing from the test nodes. + trie.UpdateAccount(common.Address{}, account0) + account1 := &types.StateAccount{ + Nonce: 1337, + Balance: big.NewInt(2000), + Root: types.EmptyRootHash, + CodeHash: nil, + } + // This address is meant to hash to a value that has the same first byte as 0xbf + var clash = common.HexToAddress("69fd8034cdb20934dedffa7dccb4fb3b8062a8be") + trie.UpdateAccount(clash, account1) + + // Manually go over every node to check that we get all + // the correct nodes. + it, err := trie.NodeIterator(nil) + if err != nil { + t.Fatal(err) + } + var leafcount int + for it.Next(true) { + t.Logf("Node: %x", it.Path()) + if it.Leaf() { + leafcount++ + t.Logf("\tLeaf: %x", it.LeafKey()) + } + } + if leafcount != 6 { + t.Fatalf("invalid leaf count: %d != 6", leafcount) + } +} diff --git a/trie/verkle_test.go b/trie/verkle_test.go new file mode 100644 index 000000000000..5c9e1f03330d --- /dev/null +++ b/trie/verkle_test.go @@ -0,0 +1,381 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" +) + +func TestReproduceTree(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d01"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526400"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a02"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d02"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a04"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526402"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526403"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a00"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a03"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526401"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526404"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d00"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a01"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d03"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d04"), + } + + values := [][]byte{ + common.Hex2Bytes("320122e8584be00d000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0300000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("1bc176f2790c91e6000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("e703000000000000000000000000000000000000000000000000000000000000"), + } + + root := verkle.New() + kv := make(map[string][]byte) + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + kv[string(key)] = values[i] + } + + proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) + cfg := verkle.GetConfig() + if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + t.Fatal("could not verify proof") + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %v", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.Commitment().Bytes()) +} + +func TestChunkifyCodeTestnet(t *testing.T) { + code, _ := hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea264697066735822122000382db0489577c1646ea2147a05f92f13f32336a32f1f82c6fb10b63e19f04064736f6c63430008070033") + chunks := ChunkifyCode(code) + if len(chunks) != 32*(len(code)/31+1) { + t.Fatalf("invalid length %d != %d", len(chunks), 32*(len(code)/31+1)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + t.Logf("%x\n", chunks[0]) + for i := 32; i < len(chunks); i += 32 { + chunk := chunks[i : 32+i] + if chunk[0] != 0 && i != 5*32 { + t.Fatalf("invalid offset in chunk #%d %d != 0", i+1, chunk[0]) + } + if i == 4 && chunk[0] != 12 { + t.Fatalf("invalid offset in chunk #%d %d != 0", i+1, chunk[0]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code, _ = hex.DecodeString("608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220d8add45a339f741a94b4fe7f22e101b560dc8a5874cbd957a884d8c9239df86264736f6c63430008070033") + chunks = ChunkifyCode(code) + if len(chunks) != 32*((len(code)+30)/31) { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + t.Logf("%x\n", chunks[0]) + expected := []byte{0, 1, 0, 13, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3} + for i := 32; i < len(chunks); i += 32 { + chunk := chunks[i : 32+i] + t.Log(i, i/32, chunk[0]) + if chunk[0] != expected[i/32-1] { + t.Fatalf("invalid offset in chunk #%d %d != %d", i/32-1, chunk[0], expected[i/32-1]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code, _ = hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea2646970667358221220163c79eab5630c3dbe22f7cc7692da08575198dda76698ae8ee2e3bfe62af3de64736f6c63430008070033") + chunks = ChunkifyCode(code) + if len(chunks) != 32*((len(code)+30)/31) { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + expected = []byte{0, 0, 0, 0, 13} + for i := 32; i < len(chunks); i += 32 { + chunk := chunks[i : 32+i] + if chunk[0] != expected[i/32-1] { + t.Fatalf("invalid offset in chunk #%d %d != %d", i/32-1, chunk[0], expected[i/32-1]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +func TestChunkifyCodeSimple(t *testing.T) { + code := []byte{ + 0, PUSH4, 1, 2, 3, 4, PUSH3, 58, 68, 12, PUSH21, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + // Second 31 bytes + 0, PUSH21, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + PUSH7, 1, 2, 3, 4, 5, 6, 7, + // Third 31 bytes + PUSH30, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, + } + t.Logf("code=%x", code) + chunks := ChunkifyCode(code) + if len(chunks) != 96 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + if chunks[32] != 1 { + t.Fatalf("invalid offset in second chunk %d != 1, chunk=%x", chunks[32], chunks[32:64]) + } + if chunks[64] != 0 { + t.Fatalf("invalid offset in third chunk %d != 0", chunks[64]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +func TestChunkifyCodeFuzz(t *testing.T) { + code := []byte{ + 3, PUSH32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + } + chunks := ChunkifyCode(code) + if len(chunks) != 32 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, PUSH32, + } + chunks = ChunkifyCode(code) + if len(chunks) != 32 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + PUSH4, PUSH32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + chunks = ChunkifyCode(code) + if len(chunks) != 64 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + if chunks[32] != 0 { + t.Fatalf("invalid offset in second chunk %d != 0, chunk=%x", chunks[32], chunks[32:64]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + PUSH4, PUSH32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + chunks = ChunkifyCode(code) + if len(chunks) != 64 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + if chunks[32] != 0 { + t.Fatalf("invalid offset in second chunk %d != 0, chunk=%x", chunks[32], chunks[32:64]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +// This test case checks what happens when two keys whose absence is being proven start with the +// same byte (0x0b in this case). Only one 'extension status' should be declared. +func TestReproduceCondrieuStemAggregationInProofOfAbsence(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580800"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580801"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580802"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580803"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580804"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd00"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd01"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd02"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd03"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb00"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb01"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb02"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb03"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb04"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f00"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f01"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f02"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f03"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f04"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f80"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f81"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f82"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f83"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e700"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e702"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e703"), + common.Hex2Bytes("3aeba70b6afb762af4a507c8ec10747479d797c6ec11c14f92b5699634bd18d4"), + } + + values := [][]byte{ + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("53bfa56cfcaddf191e0200000000000000000000000000000000000000000000"), + common.Hex2Bytes("0700000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("389a890a6ce3e618843300000000000000000000000000000000000000000000"), + common.Hex2Bytes("0200000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + } + + root := verkle.New() + kv := make(map[string][]byte) + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + kv[string(key)] = values[i] + } + + proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) + cfg := verkle.GetConfig() + if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + t.Fatal("could not verify proof") + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %p", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.Commitment().Bytes()) + + t.Logf("%d", len(proof.ExtStatus)) + if len(proof.ExtStatus) != 5 { + t.Fatalf("invalid number of declared stems: %d != 5", len(proof.ExtStatus)) + } +} + +// Cover the case in which a stem is both used for a proof of absence, and for a proof of presence. +func TestReproduceCondrieuPoAStemConflictWithAnotherStem(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580800"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73008ffa580800"), + // the key differs from the key present... ^^ here + } + + values := [][]byte{ + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + } + + root := verkle.New() + kv := make(map[string][]byte) + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + kv[string(key)] = values[i] + } + + proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) + cfg := verkle.GetConfig() + if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + t.Fatal("could not verify proof") + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %p", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.Commitment().Bytes()) + + t.Logf("%d", len(proof.ExtStatus)) + if len(proof.PoaStems) != 0 { + t.Fatal("a proof-of-absence stem was declared, when there was no need") + } +} + +func TestEmptyKeySetInProveAndSerialize(t *testing.T) { + tree := verkle.New() + verkle.MakeVerkleMultiProof(tree, [][]byte{}) +} + +func TestGetTreeKeys(t *testing.T) { + addr := common.Hex2Bytes("71562b71999873DB5b286dF957af199Ec94617f7") + target := common.Hex2Bytes("274cde18dd9dbb04caf16ad5ee969c19fe6ca764d5688b5e1d419f4ac6cd1600") + key := utils.GetTreeKeyVersion(addr) + t.Logf("key=%x", key) + t.Logf("actualKey=%x", target) + if !bytes.Equal(key, target) { + t.Fatalf("differing output %x != %x", key, target) + } +} From 045f53e67a07c849806835817a36e87e77b21436 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 4 Aug 2023 11:16:14 +0200 Subject: [PATCH 03/99] move UpdateContractCode to updateStateObject --- core/state/statedb.go | 41 ++--------------------------------------- trie/verkle.go | 31 ++++++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 40 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 2a61ea7f1174..f65dbb21c6f9 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,7 +18,6 @@ package state import ( - "encoding/binary" "errors" "fmt" "math/big" @@ -37,9 +36,6 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" - "github.com/holiman/uint256" ) type revision struct { @@ -591,40 +587,8 @@ func (s *StateDB) updateStateObject(obj *stateObject) { if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) } - if s.trie.IsVerkle() && obj.dirtyCode { - var ( - chunks = trie.ChunkifyCode(obj.code) - values [][]byte - key []byte - err error - ) - for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 { - groupOffset := (chunknr + 128) % 256 - if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ { - values = make([][]byte, verkle.NodeWidth) - key = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(obj.db.db.(*cachingDB).GetTreeKeyHeader(obj.address[:]), uint256.NewInt(chunknr)) - } - values[groupOffset] = chunks[i : i+32] - - // Reuse the calculated key to also update the code size. - if i == 0 { - cs := make([]byte, 32) - binary.LittleEndian.PutUint64(cs, uint64(len(obj.code))) - values[utils.CodeSizeLeafKey] = cs - } - - if groupOffset == 255 || len(chunks)-i <= 32 { - switch t := s.trie.(type) { - case *trie.VerkleTrie: - err = t.UpdateStem(key[:31], values) - case *trie.TransitionTrie: - err = t.UpdateStem(key[:31], values) - } - if err != nil { - s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err)) - } - } - } + if obj.dirtyCode { + s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) } // Cache the data until commit. Note, this update mechanism is not symmetric // to the deletion, because whereas it is enough to track account updates @@ -1293,7 +1257,6 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er // Write any contract code associated with the state object if obj.code != nil && obj.dirtyCode { rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) - s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) obj.dirtyCode = false } // Write any storage changes in the state object to its storage trie diff --git a/trie/verkle.go b/trie/verkle.go index 4a87bc88ce4d..385af5287e16 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/utils" "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) // VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie @@ -477,6 +478,34 @@ func (t *VerkleTrie) ClearStrorageRootConversion(addr common.Address) { } func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error { - // XXX a copier depuis statedb/state_object + var ( + chunks = ChunkifyCode(code) + values [][]byte + key []byte + err error + ) + for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 { + groupOffset := (chunknr + 128) % 256 + if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ { + values = make([][]byte, verkle.NodeWidth) + key = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(t.pointCache.GetTreeKeyHeader(addr[:]), uint256.NewInt(chunknr)) + } + values[groupOffset] = chunks[i : i+32] + + // Reuse the calculated key to also update the code size. + if i == 0 { + cs := make([]byte, 32) + binary.LittleEndian.PutUint64(cs, uint64(len(code))) + values[utils.CodeSizeLeafKey] = cs + } + + if groupOffset == 255 || len(chunks)-i <= 32 { + err = t.UpdateStem(key[:31], values) + + if err != nil { + return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err) + } + } + } return nil } From 9a0784d68aea63ed4ea76accec037e15063668c2 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 4 Aug 2023 12:16:12 +0200 Subject: [PATCH 04/99] fix: generated rlp decoders --- core/types/gen_account_rlp.go | 7 +-- core/types/gen_header_rlp.go | 113 ---------------------------------- core/types/gen_log_rlp.go | 7 +-- 3 files changed, 4 insertions(+), 123 deletions(-) diff --git a/core/types/gen_account_rlp.go b/core/types/gen_account_rlp.go index 9d07200e33b3..5181d884112f 100644 --- a/core/types/gen_account_rlp.go +++ b/core/types/gen_account_rlp.go @@ -5,11 +5,8 @@ package types -import ( - "io" - - "github.com/ethereum/go-ethereum/rlp" -) +import "github.com/ethereum/go-ethereum/rlp" +import "io" func (obj *StateAccount) EncodeRLP(_w io.Writer) error { w := rlp.NewEncoderBuffer(_w) diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go index f322411bad9c..a5ed5cd15094 100644 --- a/core/types/gen_header_rlp.go +++ b/core/types/gen_header_rlp.go @@ -5,7 +5,6 @@ package types -import "github.com/ethereum/go-ethereum/common" import "github.com/ethereum/go-ethereum/rlp" import "io" @@ -79,115 +78,3 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.ListEnd(_tmp0) return w.Flush() } - -func (obj *Header) DecodeRLP(dec *rlp.Stream) error { - var _tmp0 Header - { - if _, err := dec.List(); err != nil { - return err - } - // ParentHash: - var _tmp1 common.Hash - if err := dec.ReadBytes(_tmp1[:]); err != nil { - return err - } - _tmp0.ParentHash = _tmp1 - // UncleHash: - var _tmp2 common.Hash - if err := dec.ReadBytes(_tmp2[:]); err != nil { - return err - } - _tmp0.UncleHash = _tmp2 - // Coinbase: - var _tmp3 common.Address - if err := dec.ReadBytes(_tmp3[:]); err != nil { - return err - } - _tmp0.Coinbase = _tmp3 - // Root: - var _tmp4 common.Hash - if err := dec.ReadBytes(_tmp4[:]); err != nil { - return err - } - _tmp0.Root = _tmp4 - // TxHash: - var _tmp5 common.Hash - if err := dec.ReadBytes(_tmp5[:]); err != nil { - return err - } - _tmp0.TxHash = _tmp5 - // ReceiptHash: - var _tmp6 common.Hash - if err := dec.ReadBytes(_tmp6[:]); err != nil { - return err - } - _tmp0.ReceiptHash = _tmp6 - // Bloom: - var _tmp7 Bloom - if err := dec.ReadBytes(_tmp7[:]); err != nil { - return err - } - _tmp0.Bloom = _tmp7 - // Difficulty: - _tmp8, err := dec.BigInt() - if err != nil { - return err - } - _tmp0.Difficulty = _tmp8 - // Number: - _tmp9, err := dec.BigInt() - if err != nil { - return err - } - _tmp0.Number = _tmp9 - // GasLimit: - _tmp10, err := dec.Uint64() - if err != nil { - return err - } - _tmp0.GasLimit = _tmp10 - // GasUsed: - _tmp11, err := dec.Uint64() - if err != nil { - return err - } - _tmp0.GasUsed = _tmp11 - // Time: - _tmp12, err := dec.Uint64() - if err != nil { - return err - } - _tmp0.Time = _tmp12 - // Extra: - _tmp13, err := dec.Bytes() - if err != nil { - return err - } - _tmp0.Extra = _tmp13 - // MixDigest: - var _tmp14 common.Hash - if err := dec.ReadBytes(_tmp14[:]); err != nil { - return err - } - _tmp0.MixDigest = _tmp14 - // Nonce: - var _tmp15 BlockNonce - if err := dec.ReadBytes(_tmp15[:]); err != nil { - return err - } - _tmp0.Nonce = _tmp15 - // BaseFee: - if dec.MoreDataInList() { - _tmp16, err := dec.BigInt() - if err != nil { - return err - } - _tmp0.BaseFee = _tmp16 - } - if err := dec.ListEnd(); err != nil { - return err - } - } - *obj = _tmp0 - return nil -} diff --git a/core/types/gen_log_rlp.go b/core/types/gen_log_rlp.go index 78fa783cee1f..4a6c6b0094f8 100644 --- a/core/types/gen_log_rlp.go +++ b/core/types/gen_log_rlp.go @@ -5,11 +5,8 @@ package types -import ( - "io" - - "github.com/ethereum/go-ethereum/rlp" -) +import "github.com/ethereum/go-ethereum/rlp" +import "io" func (obj *rlpLog) EncodeRLP(_w io.Writer) error { w := rlp.NewEncoderBuffer(_w) From e2ac75246e68fa2157f13fb2a8308ceefa6058cf Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 4 Aug 2023 12:16:53 +0200 Subject: [PATCH 05/99] fix: activate Shanghai in test --- core/genesis.go | 1 - core/state_processor_test.go | 6 ++++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 5c724342f5e2..2306598db58b 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -344,7 +344,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) - if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { if genesis == nil { genesis = DefaultGenesisBlock() diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 8c0a14aa31f8..57f17d4b5310 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -437,8 +437,9 @@ var ( func TestProcessVerkle(t *testing.T) { var ( - cancuntime uint64 = 0 - config = ¶ms.ChainConfig{ + cancuntime uint64 = 0 + shanghaiTime uint64 = 0 + config = ¶ms.ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), @@ -452,6 +453,7 @@ func TestProcessVerkle(t *testing.T) { BerlinBlock: big.NewInt(0), LondonBlock: big.NewInt(0), Ethash: new(params.EthashConfig), + ShanghaiTime: &shanghaiTime, CancunTime: &cancuntime, } signer = types.LatestSigner(config) From faf7fa052cb899db7dc37f8d3f22157d499328e1 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 4 Aug 2023 16:57:57 +0200 Subject: [PATCH 06/99] add withdrawals to witness --- consensus/beacon/consensus.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 1ad4358cfff8..d8a29f42fe89 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -352,6 +352,9 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. amount := new(big.Int).SetUint64(w.Amount) amount = amount.Mul(amount, big.NewInt(params.GWei)) state.AddBalance(w.Address, amount) + + // The returned gas is not charged + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:]) } // No block reward which is issued by consensus layer instead. } From ff71485e31e427714e182f98a6461aa2a745477b Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 9 Aug 2023 07:54:32 +0200 Subject: [PATCH 07/99] fix the first half of verkle test --- core/genesis.go | 11 ++++++----- core/genesis_test.go | 2 +- core/state/database.go | 13 +++++++++++-- core/state/statedb.go | 26 +++++++++++++------------- trie/database.go | 5 +++++ 5 files changed, 36 insertions(+), 21 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 2306598db58b..4dbc3ad96347 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -121,13 +121,13 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error { } // deriveHash computes the state root according to the genesis specification. -func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig) (common.Hash, error) { +func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (common.Hash, error) { // Create an ephemeral in-memory database for computing hash, // all the derived states will be discarded to not pollute disk. db := state.NewDatabase(rawdb.NewMemoryDatabase()) // XXX check this is the case // TODO remove the nil config check once we have rebased, it should never be nil - if cfg != nil && cfg.IsCancun(big.NewInt(int64(0)), 0 /* XXX */) { + if cfg != nil && cfg.IsCancun(big.NewInt(int64(0)), timestamp) { db.EndVerkleTransition() } statedb, err := state.New(types.EmptyRootHash, db, nil) @@ -155,7 +155,7 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas } // End the verkle conversion at genesis if the fork block is 0 - if cfg != nil && cfg.IsCancun(big.NewInt(int64(0)), 0 /* XXX */) { + if triedb.IsVerkle() { statedb.Database().EndVerkleTransition() } @@ -456,7 +456,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { // ToBlock returns the genesis block according to genesis specification. func (g *Genesis) ToBlock() *types.Block { - root, err := g.Alloc.deriveHash(g.Config) + root, err := g.Alloc.deriveHash(g.Config, g.Timestamp) if err != nil { panic(err) } @@ -547,7 +547,8 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // Note the state changes will be committed in hash-based scheme, use Commit // if path-scheme is preferred. func (g *Genesis) MustCommit(db ethdb.Database) *types.Block { - block, err := g.Commit(db, trie.NewDatabase(db)) + triedb := trie.NewDatabaseWithConfig(db, &trie.Config{Verkle: g.Config != nil && g.Config.IsCancun(big.NewInt(int64(g.Number)), g.Timestamp)}) + block, err := g.Commit(db, triedb) if err != nil { panic(err) } diff --git a/core/genesis_test.go b/core/genesis_test.go index c6df6f59a3ef..77a214ebe785 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -219,7 +219,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) { {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}}, } - hash, _ = alloc.deriveHash(¶ms.ChainConfig{}) + hash, _ = alloc.deriveHash(¶ms.ChainConfig{}, 0) ) blob, _ := json.Marshal(alloc) rawdb.WriteGenesisStateSpec(db, hash, blob) diff --git a/core/state/database.go b/core/state/database.go index 7d7b3b14572f..df3365dc253d 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -195,6 +195,7 @@ func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: triedb, addrToPoint: utils.NewPointCache(), + ended: triedb.IsVerkle(), } } @@ -313,8 +314,16 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { err error ) - if db.started { - vkt, err := db.openVKTrie(db.getTranslation(root)) + // TODO separate both cases when I can be certain that it won't + // find a Verkle trie where is expects a Transitoion trie. + if db.started || db.ended { + var r common.Hash + if db.ended { + r = root + } else { + r = db.getTranslation(root) + } + vkt, err := db.openVKTrie(r) if err != nil { return nil, err } diff --git a/core/state/statedb.go b/core/state/statedb.go index f65dbb21c6f9..a3d320e6f863 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -175,19 +175,19 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) } if tr.IsVerkle() { sdb.witness = NewAccessWitness(sdb) - if sdb.snaps == nil { - snapconfig := snapshot.Config{ - CacheSize: 256, - Recovery: false, - NoBuild: false, - AsyncBuild: false, - Verkle: true, - } - sdb.snaps, err = snapshot.New(snapconfig, db.DiskDB(), db.TrieDB(), root) - if err != nil { - return nil, err - } - } + // if sdb.snaps == nil { + // snapconfig := snapshot.Config{ + // CacheSize: 256, + // Recovery: false, + // NoBuild: false, + // AsyncBuild: false, + // Verkle: true, + // } + // sdb.snaps, err = snapshot.New(snapconfig, db.DiskDB(), db.TrieDB(), root) + // if err != nil { + // return nil, err + // } + // } } if sdb.snaps != nil { if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil { diff --git a/trie/database.go b/trie/database.go index 5989e11481af..a6f7d98913a6 100644 --- a/trie/database.go +++ b/trie/database.go @@ -33,6 +33,7 @@ type Config struct { Cache int // Memory allowance (MB) to use for caching trie nodes in memory Preimages bool // Flag whether the preimage of trie key is recorded PathDB *pathdb.Config // Configs for experimental path-based scheme, not used yet. + Verkle bool // Testing hooks OnCommit func(states *triestate.Set) // Hook invoked when commit is performed @@ -279,3 +280,7 @@ func (db *Database) ClearStorageRootConversion(addr common.Address) { defer db.addrToRootLock.Unlock() delete(db.addrToRoot, addr) } + +func (db *Database) IsVerkle() bool { + return db.config != nil && db.config.Verkle +} From f5a7321bef65d71921b2342704ab9ba6d7d0181d Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 10 Aug 2023 13:44:28 +0200 Subject: [PATCH 08/99] fix: open an empty storage trie after transition --- core/state/database.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/core/state/database.go b/core/state/database.go index df3365dc253d..c6ebbd1fbac3 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -362,8 +362,23 @@ func (db *cachingDB) openStorageMPTrie(stateRoot common.Hash, address common.Add // OpenStorageTrie opens the storage trie of an account func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) { - mpt, err := db.openStorageMPTrie(stateRoot, address, root, nil) - if db.started && err == nil { + if db.ended { + mpt, err := db.openStorageMPTrie(common.Hash{}, address, common.Hash{}, self) + if err != nil { + return nil, err + } + // Return a "storage trie" that is an adapter between the storge MPT + // and the unique verkle tree. + switch self := self.(type) { + case *trie.VerkleTrie: + return trie.NewTransitionTree(mpt.(*trie.StateTrie), self, true), nil + case *trie.TransitionTrie: + return trie.NewTransitionTree(mpt.(*trie.StateTrie), self.Overlay(), true), nil + default: + panic("unexpected trie type") + } + } + if db.started { // Return a "storage trie" that is an adapter between the storge MPT // and the unique verkle tree. switch self := self.(type) { @@ -375,6 +390,7 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre panic("unexpected trie type") } } + mpt, err := db.openStorageMPTrie(stateRoot, address, root, nil) return mpt, err } From c2cdd50666746f51b33d85de41024bd9a311d725 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 10 Aug 2023 13:49:13 +0200 Subject: [PATCH 09/99] activate verkle on IsVerkle, not IsCancun --- consensus/ethash/consensus.go | 5 +++-- core/blockchain.go | 4 ++-- core/chain_makers.go | 2 +- core/genesis.go | 4 ++-- core/state_processor_test.go | 36 +++++++++++++++++------------------ core/state_transition.go | 2 +- core/vm/contracts.go | 2 ++ core/vm/evm.go | 10 ++++++---- core/vm/gas_table.go | 14 +++++++------- core/vm/instructions.go | 14 +++++++------- core/vm/interpreter.go | 3 +++ core/vm/jump_table_export.go | 3 ++- core/vm/operations_acl.go | 4 ++-- 13 files changed, 56 insertions(+), 47 deletions(-) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 81563f810705..00bc136872dc 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -566,7 +566,8 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Mul(r, blockReward) r.Div(r, big8) - if config.IsCancun(header.Number, header.Time) { + // This should not happen, but it's useful for replay tests + if config.IsVerkle(header.Number, header.Time) { uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes()) state.Witness().TouchAddressOnReadAndComputeGas(uncleCoinbase) } @@ -575,7 +576,7 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Div(blockReward, big32) reward.Add(reward, r) } - if config.IsCancun(header.Number, header.Time) { + if config.IsVerkle(header.Number, header.Time) { coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes()) state.Witness().TouchAddressOnReadAndComputeGas(coinbase) coinbase[31] = utils.VersionLeafKey // mark version diff --git a/core/blockchain.go b/core/blockchain.go index 1e1a10f9bb98..1c7dad957e98 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -311,7 +311,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis head := bc.CurrentBlock() // Declare the end of the verkle transition is need be - if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsCancun { + if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsVerkle { bc.stateCache.EndVerkleTransition() } @@ -411,7 +411,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis Recovery: recover, NoBuild: bc.cacheConfig.SnapshotNoBuild, AsyncBuild: !bc.cacheConfig.SnapshotWait, - Verkle: chainConfig.IsCancun(head.Number, head.Time), + Verkle: chainConfig.IsVerkle(head.Number, head.Time), } bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root) } diff --git a/core/chain_makers.go b/core/chain_makers.go index 87dff7b564d3..03f98bade68f 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -357,7 +357,7 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, if err != nil { panic(err) } - if genesis.Config != nil && genesis.Config.IsCancun(genesis.ToBlock().Number(), genesis.ToBlock().Time()) { + if genesis.Config != nil && genesis.Config.IsVerkle(genesis.ToBlock().Number(), genesis.ToBlock().Time()) { blocks, receipts, _, _ := GenerateVerkleChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen) return db, blocks, receipts } diff --git a/core/genesis.go b/core/genesis.go index 4dbc3ad96347..acfd613fe55f 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -127,7 +127,7 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c db := state.NewDatabase(rawdb.NewMemoryDatabase()) // XXX check this is the case // TODO remove the nil config check once we have rebased, it should never be nil - if cfg != nil && cfg.IsCancun(big.NewInt(int64(0)), timestamp) { + if cfg != nil && cfg.IsVerkle(big.NewInt(int64(0)), timestamp) { db.EndVerkleTransition() } statedb, err := state.New(types.EmptyRootHash, db, nil) @@ -547,7 +547,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // Note the state changes will be committed in hash-based scheme, use Commit // if path-scheme is preferred. func (g *Genesis) MustCommit(db ethdb.Database) *types.Block { - triedb := trie.NewDatabaseWithConfig(db, &trie.Config{Verkle: g.Config != nil && g.Config.IsCancun(big.NewInt(int64(g.Number)), g.Timestamp)}) + triedb := trie.NewDatabaseWithConfig(db, &trie.Config{Verkle: g.Config != nil && g.Config.IsVerkle(big.NewInt(int64(g.Number)), g.Timestamp)}) block, err := g.Commit(db, triedb) if err != nil { panic(err) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 57f17d4b5310..4d56365d8b9f 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -437,24 +437,24 @@ var ( func TestProcessVerkle(t *testing.T) { var ( - cancuntime uint64 = 0 - shanghaiTime uint64 = 0 - config = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - Ethash: new(params.EthashConfig), - ShanghaiTime: &shanghaiTime, - CancunTime: &cancuntime, + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + VerkleTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, } signer = types.LatestSigner(config) testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") diff --git a/core/state_transition.go b/core/state_transition.go index a5fb83084a96..14dd86e4987d 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -403,7 +403,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } st.gasRemaining -= gas - if rules.IsCancun { + if rules.IsVerkle { targetAddr := msg.To originAddr := msg.From diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 6041be6c9f49..2942755f3fae 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -150,6 +150,8 @@ func init() { // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { + case rules.IsVerkle: + return PrecompiledAddressesBerlin case rules.IsCancun: return PrecompiledAddressesCancun case rules.IsBerlin: diff --git a/core/vm/evm.go b/core/vm/evm.go index a35f1094bacc..f8f67c4ef7e7 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -41,6 +41,8 @@ type ( func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { + case evm.chainRules.IsVerkle: + precompiles = PrecompiledContractsBerlin case evm.chainRules.IsCancun: precompiles = PrecompiledContractsCancun case evm.chainRules.IsBerlin: @@ -135,7 +137,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig chainConfig: chainConfig, chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time), } - if txCtx.Accesses == nil && chainConfig.IsCancun(blockCtx.BlockNumber, blockCtx.Time) { + if txCtx.Accesses == nil && chainConfig.IsVerkle(blockCtx.BlockNumber, blockCtx.Time) { txCtx.Accesses = state.NewAccessWitness(evm.StateDB.(*state.StateDB)) } evm.interpreter = NewEVMInterpreter(evm) @@ -145,7 +147,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig // Reset resets the EVM with a new transaction context.Reset // This is not threadsafe and should only be done very cautiously. func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { - if txCtx.Accesses == nil && evm.chainRules.IsCancun { + if txCtx.Accesses == nil && evm.chainRules.IsVerkle { txCtx.Accesses = state.NewAccessWitness(evm.StateDB.(*state.StateDB)) } evm.TxContext = txCtx @@ -210,7 +212,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas var creation bool if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { // proof of absence tryConsumeGas(&gas, evm.Accesses.TouchAndChargeProofOfAbsence(caller.Address().Bytes())) } @@ -527,7 +529,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } - if err == nil && evm.chainRules.IsCancun { + if err == nil && evm.chainRules.IsVerkle { if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:], value.Sign() != 0)) { evm.StateDB.RevertToSnapshot(snapshot) err = ErrOutOfGas diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 899551d186df..780b05182638 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -100,7 +100,7 @@ var ( func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { usedGas := uint64(0) slot := stack.Back(0) - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { index := trieUtils.GetTreeKeyCodeSize(slot.Bytes()) usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(index) } @@ -111,7 +111,7 @@ func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, mem func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { usedGas := uint64(0) - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { where := stack.Back(0) index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(contract.AddressPoint(), where.Bytes()) usedGas += evm.Accesses.TouchAddressOnReadAndComputeGas(index) @@ -423,7 +423,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { if _, isPrecompile := evm.precompile(address); !isPrecompile { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes()[:])) if overflow { @@ -463,7 +463,7 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { address := common.Address(stack.Back(1).Bytes20()) if _, isPrecompile := evm.precompile(address); !isPrecompile { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) @@ -488,7 +488,7 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { address := common.Address(stack.Back(1).Bytes20()) if _, isPrecompile := evm.precompile(address); !isPrecompile { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) @@ -513,7 +513,7 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { address := common.Address(stack.Back(1).Bytes20()) if _, isPrecompile := evm.precompile(address); !isPrecompile { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) @@ -542,7 +542,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } } - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { // TODO turn this into a panic (when we are sure this method // will never execute when verkle is enabled) log.Warn("verkle witness accumulation not supported for selfdestruct") diff --git a/core/vm/instructions.go b/core/vm/instructions.go index eeb9fc64921b..871922c1012b 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -346,7 +346,7 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) - if interpreter.evm.chainRules.IsCancun { + if interpreter.evm.chainRules.IsVerkle { index := trieUtils.GetTreeKeyCodeSize(slot.Bytes()) statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(index) scope.Contract.UseGas(statelessGas) @@ -374,7 +374,7 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) - if interpreter.evm.chainRules.IsCancun { + if interpreter.evm.chainRules.IsVerkle { scope.Contract.UseGas(touchEachChunksOnReadAndChargeGas(copyOffset, nonPaddedCopyLength, scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment)) } scope.Memory.Set(memOffset.Uint64(), uint64(len(paddedCodeCopy)), paddedCodeCopy) @@ -465,7 +465,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) uint64CodeOffset = 0xffffffffffffffff } addr := common.Address(a.Bytes20()) - if interpreter.evm.chainRules.IsCancun { + if interpreter.evm.chainRules.IsVerkle { code := interpreter.evm.StateDB.GetCode(addr) contract := &Contract{ Code: code, @@ -680,7 +680,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) - if interpreter.evm.chainRules.IsCancun { + if interpreter.evm.chainRules.IsVerkle { contractAddress := crypto.CreateAddress(scope.Contract.Address(), interpreter.evm.StateDB.GetNonce(scope.Contract.Address())) statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], value.Sign() != 0) if !tryConsumeGas(&gas, statelessGas) { @@ -734,7 +734,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) - if interpreter.evm.chainRules.IsCancun { + if interpreter.evm.chainRules.IsVerkle { codeAndHash := &codeAndHash{code: input} contractAddress := crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes()) statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], endowment.Sign() != 0) @@ -998,7 +998,7 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by if *pc < codeLen { scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) - if interpreter.evm.chainRules.IsCancun && *pc%31 == 0 { + if interpreter.evm.chainRules.IsVerkle && *pc%31 == 0 { // touch next chunk if PUSH1 is at the boundary. if so, *pc has // advanced past this boundary. statelessGas := touchEachChunksOnReadAndChargeGas(*pc+1, uint64(1), scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) @@ -1025,7 +1025,7 @@ func makePush(size uint64, pushByteSize int) executionFunc { endMin = startMin + pushByteSize } - if interpreter.evm.chainRules.IsCancun { + if interpreter.evm.chainRules.IsVerkle { statelessGas := touchEachChunksOnReadAndChargeGas(uint64(startMin), uint64(pushByteSize), scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) scope.Contract.UseGas(statelessGas) } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 9050addbcaec..6563b17bd9a1 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -60,6 +60,9 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. var table *JumpTable switch { + case evm.chainRules.IsVerkle: + // TODO replace with prooper instruction set when fork is specified + table = &shanghaiInstructionSet case evm.chainRules.IsCancun: table = &cancunInstructionSet case evm.chainRules.IsShanghai: diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go index 6ea47d63a281..75bcb8d5bf9e 100644 --- a/core/vm/jump_table_export.go +++ b/core/vm/jump_table_export.go @@ -27,7 +27,8 @@ import ( func LookupInstructionSet(rules params.Rules) (JumpTable, error) { switch { case rules.IsVerkle: - return newCancunInstructionSet(), errors.New("verkle-fork not defined yet") + // TODO set to newCancunInstructionSet() when verkle-fork is defined + return newShanghaiInstructionSet(), errors.New("verkle-fork not defined yet") case rules.IsPrague: return newCancunInstructionSet(), errors.New("prague-fork not defined yet") case rules.IsCancun: diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 114769abda89..fe8446be3b08 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -52,7 +52,7 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { } value := common.Hash(y.Bytes32()) - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(contract.AddressPoint(), x.Bytes()) cost += evm.Accesses.TouchAddressOnWriteAndComputeGas(index) } @@ -111,7 +111,7 @@ func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me slot := common.Hash(loc.Bytes32()) var gasUsed uint64 - if evm.chainRules.IsCancun { + if evm.chainRules.IsVerkle { where := stack.Back(0) addr := contract.Address() index := trieUtils.GetTreeKeyStorageSlot(addr[:], where) From 4b54c4b57fabaf1fb1bc11a0bfd65dd7c5fb9aa0 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 10 Aug 2023 18:26:03 +0200 Subject: [PATCH 10/99] deactivate snapshot for tests --- core/state/snapshot/snapshot.go | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index ed01170941c1..7880a8799753 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -23,7 +23,6 @@ import ( "fmt" "sync" - "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" @@ -216,15 +215,17 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root log.Warn("Failed to load snapshot", "err", err) if !config.NoBuild { if config.Verkle { - snap.layers = map[common.Hash]snapshot{ - root: &diskLayer{ - diskdb: diskdb, - triedb: triedb, - root: root, - cache: fastcache.New(config.CacheSize * 1024 * 1024), - }, - } - return snap, nil + // TODO update the Rebuild function + // snap.layers = map[common.Hash]snapshot{ + // root: &diskLayer{ + // diskdb: diskdb, + // triedb: triedb, + // root: root, + // cache: fastcache.New(config.CacheSize * 1024 * 1024), + // }, + // } + // return snap, nil + return nil, nil } log.Warn("Failed to load snapshot, regenerating", "err", err) snap.Rebuild(root) From e237c1ecf7cc5cdef2d0886abb969b13cff545f8 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 10 Aug 2023 18:28:07 +0200 Subject: [PATCH 11/99] enable proof of stake in verkle test --- core/state_processor_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 4d56365d8b9f..bbbda80157a1 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -474,7 +474,7 @@ func TestProcessVerkle(t *testing.T) { // Verkle trees use the snapshot, which must be enabled before the // data is saved into the tree+database. genesis := gspec.MustCommit(bcdb) - blockchain, _ := NewBlockChain(bcdb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(bcdb, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) defer blockchain.Stop() // Commit the genesis block to the block-generation database as it @@ -489,7 +489,9 @@ func TestProcessVerkle(t *testing.T) { txCost1*2 + txCost2, txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, } - chain, _, proofs, keyvals := GenerateVerkleChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 2, func(i int, gen *BlockGen) { + // TODO utiliser GenerateChainWithGenesis pour le rendre plus pratique + chain, _, proofs, keyvals := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 2, func(i int, gen *BlockGen) { + gen.SetPoS() // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) gen.AddTx(tx) From 29c8f983293538eb485c055b53f574c959a97ea3 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 10 Aug 2023 21:38:07 +0200 Subject: [PATCH 12/99] save last MPT root for transition --- core/state/database.go | 11 +++++++++++ core/state/statedb.go | 9 ++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/core/state/database.go b/core/state/database.go index c6ebbd1fbac3..7ee8489d3f62 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -92,6 +92,8 @@ type Database interface { SetCurrentPreimageOffset(int64) AddRootTranslation(originalRoot, translatedRoot common.Hash) + + SetLastMerkleRoot(root common.Hash) } // Trie is a Ethereum Merkle Patricia trie. @@ -272,6 +274,7 @@ type cachingDB struct { origRoots [32]common.Hash translationIndex int translatedRootsLock sync.RWMutex + LastMerkleRoot common.Hash // root hash of the read-only base tree addrToPoint *utils.PointCache @@ -379,6 +382,10 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre } } if db.started { + mpt, err := db.openStorageMPTrie(db.LastMerkleRoot, address, root, nil) + if err != nil { + return nil, err + } // Return a "storage trie" that is an adapter between the storge MPT // and the unique verkle tree. switch self := self.(type) { @@ -506,3 +513,7 @@ func (db *cachingDB) GetStorageProcessed() bool { func (db *cachingDB) AddRootTranslation(originalRoot, translatedRoot common.Hash) { db.AddTranslation(originalRoot, translatedRoot) } + +func (db *cachingDB) SetLastMerkleRoot(root common.Hash) { + db.LastMerkleRoot = root +} diff --git a/core/state/statedb.go b/core/state/statedb.go index a3d320e6f863..a3616e7a58db 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1051,7 +1051,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { if metrics.EnabledExpensive { defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) } - return s.trie.Hash() + root := s.trie.Hash() + + // Save the root of the MPT so that it can be used during the transition + if !s.Database().InTransition() && !s.Database().Transitioned() { + s.Database().SetLastMerkleRoot(root) + } + + return root } // SetTxContext sets the current transaction hash and index which are From bea2b7ea8caffe08cbf2de7606c3cfe53c977739 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 11 Aug 2023 07:42:58 +0200 Subject: [PATCH 13/99] remove unnecessary snapshot Cap in flush --- core/genesis.go | 2 +- core/state/statedb.go | 10 ---------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index acfd613fe55f..1a3da935f50a 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -184,7 +184,7 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas } rawdb.WriteGenesisStateSpec(db, blockhash, blob) - return statedb.Cap(root) // XXX check this is still necessary + return nil } // CommitGenesisState loads the stored genesis state with the given block diff --git a/core/state/statedb.go b/core/state/statedb.go index a3616e7a58db..f0ec65d1d2a8 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1215,16 +1215,6 @@ func (s *StateDB) GetTrie() Trie { return s.trie } -// XXX check it's still needed -func (s *StateDB) Cap(root common.Hash) error { - if s.snaps != nil { - return s.snaps.Cap(root, 0) - } - // pre-verkle path: noop if s.snaps hasn't been - // initialized. - return nil -} - // Commit writes the state to the underlying in-memory trie database. // Once the state is committed, tries cached in stateDB (including account // trie, storage tries) will no longer be functional. A new state instance From 0decbd7ca329e02a5d51add021cb7a4a76fad100 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 11 Aug 2023 14:00:55 +0200 Subject: [PATCH 14/99] fix test: include EIP-3860 --- core/state_processor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index bbbda80157a1..cc2a93405407 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -484,7 +484,7 @@ func TestProcessVerkle(t *testing.T) { txCost1 := params.WitnessBranchWriteCost*2 + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*3 + params.WitnessChunkReadCost*10 + params.TxGas txCost2 := params.WitnessBranchWriteCost + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*2 + params.WitnessChunkReadCost*10 + params.TxGas contractCreationCost := intrinsicContractCreationGas + uint64(6900 /* from */ +7700 /* creation */ +2939 /* execution costs */) - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(6900 /* from */ +7000 /* creation */ +315894 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(6900 /* from */ +7000 /* creation */ +315944 /* execution costs */) blockGasUsagesExpected := []uint64{ txCost1*2 + txCost2, txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, From 66dd866e897f4b5ae288049c6428ad2743b9d65f Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 11 Aug 2023 16:44:08 +0200 Subject: [PATCH 15/99] implement missing odrDatabase function --- light/trie.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/light/trie.go b/light/trie.go index 46f073d66a6e..e0a283fdc1d7 100644 --- a/light/trie.go +++ b/light/trie.go @@ -157,6 +157,10 @@ func (db *odrDatabase) AddRootTranslation(originalRoot common.Hash, translatedRo panic("not implemented") // TODO: Implement } +func (db *odrDatabase) SetLastMerkleRoot(root common.Hash) { + panic("not implemented") // TODO: Implement +} + type odrTrie struct { db *odrDatabase id *TrieID From 54e060f0943181a7aec67e154f3bf7f88cae111d Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 11 Aug 2023 21:01:00 +0200 Subject: [PATCH 16/99] fix incorrect equality condition is HasAccount --- core/types/state_account.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/types/state_account.go b/core/types/state_account.go index 75c188fea259..0cb751685afe 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -60,7 +60,7 @@ func (acct *StateAccount) Copy() *StateAccount { // HasStorage returns true if the account has a non-empty storage tree. func (acc *StateAccount) HasStorage() bool { - return len(acc.Root) == 32 && acc.Root == EmptyRootHash + return len(acc.Root) == 32 && acc.Root != EmptyRootHash } // SlimAccount is a modified version of an Account, where the root is replaced From a80c2aa8b5829c2e762250091344375aa3c92ce0 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 14 Aug 2023 16:56:43 +0200 Subject: [PATCH 17/99] fixes to replay ~500 blocks --- core/blockchain.go | 1 + core/state/statedb.go | 4 ++++ trie/transition.go | 28 ++++++++++------------------ 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 1c7dad957e98..b3513ff4cd72 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1746,6 +1746,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) if parent.Number.Uint64() == conversionBlock { bc.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), &parent.Time) + bc.stateCache.SetLastMerkleRoot(parent.Root) } statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) if err != nil { diff --git a/core/state/statedb.go b/core/state/statedb.go index f0ec65d1d2a8..1a64ba0bb5d7 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1080,6 +1080,10 @@ func (s *StateDB) clearJournalAndRefund() { // deleteStorage iterates the storage trie belongs to the account and mark all // slots inside as deleted. func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { + // verkle: a deletion is akin to overwriting with 0s + if s.GetTrie().IsVerkle() { + return false, nil, nil, nil + } start := time.Now() tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) // XXX NOTE: it might just be possible to use an empty trie here, as verkle will not diff --git a/trie/transition.go b/trie/transition.go index 514d3e99825b..ad5a7dc70152 100644 --- a/trie/transition.go +++ b/trie/transition.go @@ -20,7 +20,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/gballet/go-verkle" ) @@ -67,22 +66,7 @@ func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, er return val, nil } // TODO also insert value into overlay - rlpval, err := t.base.GetStorage(addr, key) - if err != nil { - return nil, err - } - if len(rlpval) == 0 { - return nil, nil - } - // the value will come as RLP, decode it so that the - // interface is consistent. - _, content, _, err := rlp.Split(rlpval) - if err != nil || len(content) == 0 { - return nil, err - } - var v [32]byte - copy(v[32-len(content):], content) - return v[:], nil + return t.base.GetStorage(addr, key) } // GetAccount abstract an account read from the trie. @@ -111,7 +95,15 @@ func (t *TransitionTrie) GetAccount(address common.Address) (*types.StateAccount // by the caller while they are stored in the trie. If a node was not found in the // database, a trie.MissingNodeError is returned. func (t *TransitionTrie) UpdateStorage(address common.Address, key []byte, value []byte) error { - return t.overlay.UpdateStorage(address, key, value) + var v []byte + if len(value) >= 32 { + v = value[:32] + } else { + var val [32]byte + copy(val[32-len(value):], value[:]) + v = val[:] + } + return t.overlay.UpdateStorage(address, key, v) } // UpdateAccount abstract an account write to the trie. From a72e7d94a237b44e65c84a3d3dc6481180d4b901 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 14 Aug 2023 17:10:35 +0200 Subject: [PATCH 18/99] fix to replay more blocks --- core/state/statedb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 1a64ba0bb5d7..d500ef3862a7 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1082,7 +1082,7 @@ func (s *StateDB) clearJournalAndRefund() { func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { // verkle: a deletion is akin to overwriting with 0s if s.GetTrie().IsVerkle() { - return false, nil, nil, nil + return false, nil, trienode.NewNodeSet(addrHash), nil } start := time.Now() tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) From 5ba9fb78559bbdce526b7f2cfa40d695b924a1ba Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 15 Aug 2023 10:19:37 +0200 Subject: [PATCH 19/99] fix preimage issue in conversion --- core/state_processor.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index 744538d03e1a..4ccb06ecfb4f 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -216,23 +216,25 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } copy(safeValue[32-len(value):], value) - var slotnr [32]byte + var slotnr []byte if hasPreimagesBin { - if _, err := io.ReadFull(fpreimages, slotnr[:]); err != nil { + var s [32]byte + slotnr = s[:] + if _, err := io.ReadFull(fpreimages, slotnr); err != nil { return nil, nil, 0, fmt.Errorf("reading preimage file: %s", err) } } else { - slotnr := rawdb.ReadPreimage(migrdb.DiskDB(), stIt.Hash()) + slotnr = rawdb.ReadPreimage(migrdb.DiskDB(), stIt.Hash()) if len(slotnr) != 32 { return nil, nil, 0, fmt.Errorf("slotnr len is zero is not 32: %d", len(slotnr)) } } if crypto.Keccak256Hash(slotnr[:]) != stIt.Hash() { - return nil, nil, 0, fmt.Errorf("preimage file does not match storage hash: %s!=%s", crypto.Keccak256Hash(slotnr[:]), stIt.Hash()) + return nil, nil, 0, fmt.Errorf("preimage file does not match storage hash: %s!=%s", crypto.Keccak256Hash(slotnr), stIt.Hash()) } preimageSeek += int64(len(slotnr)) - mkv.addStorageSlot(migrdb.GetCurrentAccountAddress().Bytes(), slotnr[:], safeValue[:]) + mkv.addStorageSlot(migrdb.GetCurrentAccountAddress().Bytes(), slotnr, safeValue[:]) // advance the storage iterator migrdb.SetStorageProcessed(!stIt.Next()) From 9bb7f41c2d012de67f677302620391b7b17e9423 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 15 Aug 2023 16:17:32 +0200 Subject: [PATCH 20/99] code cleanup: remove a lot of TODOs & friends --- core/genesis.go | 4 +--- core/state/iterator.go | 9 --------- core/state_processor_test.go | 1 - miner/worker.go | 5 ----- 4 files changed, 1 insertion(+), 18 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 1a3da935f50a..6b521369bcbe 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -125,9 +125,7 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c // Create an ephemeral in-memory database for computing hash, // all the derived states will be discarded to not pollute disk. db := state.NewDatabase(rawdb.NewMemoryDatabase()) - // XXX check this is the case - // TODO remove the nil config check once we have rebased, it should never be nil - if cfg != nil && cfg.IsVerkle(big.NewInt(int64(0)), timestamp) { + if cfg.IsVerkle(big.NewInt(int64(0)), timestamp) { db.EndVerkleTransition() } statedb, err := state.New(types.EmptyRootHash, db, nil) diff --git a/core/state/iterator.go b/core/state/iterator.go index 26846730d10e..bf00fc0e7e1e 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -82,15 +82,6 @@ func (it *nodeIterator) step() error { if err != nil { return err } - - // If the trie is a verkle trie, then the data and state - // are the same tree, and as a result both iterators are - // the same. This is a hack meant for both tree types to - // work. - // XXX check if this is still needed - if _, ok := it.state.trie.(*trie.VerkleTrie); ok { - it.dataIt = it.stateIt - } } // If we had data nodes previously, we surely have at least state nodes if it.dataIt != nil { diff --git a/core/state_processor_test.go b/core/state_processor_test.go index cc2a93405407..ffb3285b8f91 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -432,7 +432,6 @@ var ( intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, true, true, true, true) codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`) intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, true, true, true, true) - // XXX if the last true in IntringsicGas makes for an invalid gas, try false ) func TestProcessVerkle(t *testing.T) { diff --git a/miner/worker.go b/miner/worker.go index b23fbdeaff33..81aeb1d81388 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -89,9 +89,6 @@ type environment struct { header *types.Header txs []*types.Transaction receipts []*types.Receipt - - // XXX check if this is still necessary - preRoot common.Hash } // copy creates a deep copy of environment. @@ -103,7 +100,6 @@ func (env *environment) copy() *environment { coinbase: env.coinbase, header: types.CopyHeader(env.header), receipts: copyReceipts(env.receipts), - preRoot: env.preRoot, } if env.gasPool != nil { gasPool := *env.gasPool @@ -716,7 +712,6 @@ func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase co state: state, coinbase: coinbase, header: header, - preRoot: parent.Root, } // Keep track of transactions which return errors so they can be removed env.tcount = 0 From 12c65cb520461e8d115b4f6f2a8e5092e0c08091 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 15 Aug 2023 16:19:42 +0200 Subject: [PATCH 21/99] more code cleanup --- cmd/utils/cmd.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 24da4911bc14..de25fd1a146d 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -176,17 +176,6 @@ func ImportChain(chain *core.BlockChain, fn string) error { return err } } - // cpuProfile, err := os.Create("cpu.out") - // if err != nil { - // return fmt.Errorf("Error creating CPU profile: %v", err) - // } - // defer cpuProfile.Close() - // err = pprof.StartCPUProfile(cpuProfile) - // if err != nil { - // return fmt.Errorf("Error starting CPU profile: %v", err) - // } - // defer pprof.StopCPUProfile() - // params.ClearVerkleWitnessCosts() stream := rlp.NewStream(reader, 0) From a926f606998b3007f3183a25b81fbbdd986b8b44 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 17 Aug 2023 15:58:41 +0200 Subject: [PATCH 22/99] fix: alignment of values whose len is < 32 --- trie/verkle.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/trie/verkle.go b/trie/verkle.go index 385af5287e16..fcf9e114cfc0 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -205,7 +205,11 @@ func (trie *VerkleTrie) UpdateStem(key []byte, values [][]byte) error { func (trie *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error { k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(trie.pointCache.GetTreeKeyHeader(address[:]), key) var v [32]byte - copy(v[:], value[:]) + if len(value) >= 32 { + copy(v[:], value[:32]) + } else { + copy(v[32-len(value):], value[:]) + } return trie.root.Insert(k, v[:], trie.flatdbNodeResolver) } From a0e1995bc41d19d2f65683bf3618f42968f75d5c Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Tue, 29 Aug 2023 15:59:06 -0300 Subject: [PATCH 23/99] New access-witness module (#235) Use plain addresses/slot numbers instead of hashing them in the witness --- consensus/beacon/consensus.go | 4 +- consensus/ethash/consensus.go | 16 +- core/state/access_witness.go | 466 +++++++++++----------------------- core/state/statedb.go | 8 +- core/state_processor.go | 2 +- core/vm/contract.go | 13 - core/vm/evm.go | 6 +- core/vm/gas_table.go | 8 +- core/vm/instructions.go | 93 ++----- core/vm/interpreter.go | 26 +- core/vm/operations_acl.go | 10 +- trie/utils/verkle.go | 13 +- trie/verkle_test.go | 6 - 13 files changed, 223 insertions(+), 448 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index d8a29f42fe89..64be7b0005d3 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -30,6 +30,8 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" ) // Proof-of-stake protocol constants. @@ -354,7 +356,7 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. state.AddBalance(w.Address, amount) // The returned gas is not charged - state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:]) + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.BalanceLeafKey) } // No block reward which is issued by consensus layer instead. } diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 00bc136872dc..92f8100f6e63 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) @@ -568,8 +569,7 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header // This should not happen, but it's useful for replay tests if config.IsVerkle(header.Number, header.Time) { - uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes()) - state.Witness().TouchAddressOnReadAndComputeGas(uncleCoinbase) + state.Witness().TouchAddressOnReadAndComputeGas(uncle.Coinbase.Bytes(), uint256.Int{}, utils.BalanceLeafKey) } state.AddBalance(uncle.Coinbase, r) @@ -577,14 +577,10 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header reward.Add(reward, r) } if config.IsVerkle(header.Number, header.Time) { - coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes()) - state.Witness().TouchAddressOnReadAndComputeGas(coinbase) - coinbase[31] = utils.VersionLeafKey // mark version - state.Witness().TouchAddressOnReadAndComputeGas(coinbase) - coinbase[31] = utils.NonceLeafKey // mark nonce - state.Witness().TouchAddressOnReadAndComputeGas(coinbase) - coinbase[31] = utils.CodeKeccakLeafKey // mark code keccak - state.Witness().TouchAddressOnReadAndComputeGas(coinbase) + state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.BalanceLeafKey) + state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.VersionLeafKey) + state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.NonceLeafKey) + state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.CodeKeccakLeafKey) } state.AddBalance(header.Coinbase, reward) } diff --git a/core/state/access_witness.go b/core/state/access_witness.go index 522b5f308096..8b03cf371a60 100644 --- a/core/state/access_witness.go +++ b/core/state/access_witness.go @@ -20,391 +20,235 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" ) -type VerkleStem [31]byte - -// Mode specifies how a tree location has been accessed +// mode specifies how a tree location has been accessed // for the byte value: // * the first bit is set if the branch has been edited // * the second bit is set if the branch has been read -type Mode byte +type mode byte const ( - AccessWitnessReadFlag = Mode(1) - AccessWitnessWriteFlag = Mode(2) + AccessWitnessReadFlag = mode(1) + AccessWitnessWriteFlag = mode(2) ) +var zeroTreeIndex uint256.Int + // AccessWitness lists the locations of the state that are being accessed // during the production of a block. type AccessWitness struct { - // Branches flags if a given branch has been loaded - Branches map[VerkleStem]Mode - - // Chunks contains the initial value of each address - Chunks map[common.Hash]Mode - - // InitialValue contains either `nil` if the location - // didn't exist before it was accessed, or the value - // that a location had before the execution of this - // block. - InitialValue map[string][]byte - - // Caches which code chunks have been accessed, in order - // to reduce the number of times that GetTreeKeyCodeChunk - // is called. - CodeLocations map[string]map[uint64]struct{} + branches map[branchAccessKey]mode + chunks map[chunkAccessKey]mode - statedb *StateDB + pointCache *utils.PointCache } -func NewAccessWitness(statedb *StateDB) *AccessWitness { +func NewAccessWitness(pointCache *utils.PointCache) *AccessWitness { return &AccessWitness{ - Branches: make(map[VerkleStem]Mode), - Chunks: make(map[common.Hash]Mode), - InitialValue: make(map[string][]byte), - CodeLocations: make(map[string]map[uint64]struct{}), - statedb: statedb, + branches: make(map[branchAccessKey]mode), + chunks: make(map[chunkAccessKey]mode), + pointCache: pointCache, } } -func (aw *AccessWitness) HasCodeChunk(addr []byte, chunknr uint64) bool { - if locs, ok := aw.CodeLocations[string(addr)]; ok { - if _, ok = locs[chunknr]; ok { - return true - } - } - - return false -} - -// SetCodeLeafValue does the same thing as SetLeafValue, but for code chunks. It -// maintains a cache of which (address, chunk) were calculated, in order to avoid -// calling GetTreeKey more than once per chunk. -func (aw *AccessWitness) SetCachedCodeChunk(addr []byte, chunknr uint64) { - if locs, ok := aw.CodeLocations[string(addr)]; ok { - if _, ok = locs[chunknr]; ok { - return - } - } else { - aw.CodeLocations[string(addr)] = map[uint64]struct{}{} - } - - aw.CodeLocations[string(addr)][chunknr] = struct{}{} -} - -func (aw *AccessWitness) touchAddressOnWrite(addr []byte) (bool, bool, bool) { - var stem VerkleStem - var stemWrite, chunkWrite, chunkFill bool - copy(stem[:], addr[:31]) - - // NOTE: stem, selector access flags already exist in their - // respective maps because this function is called at the end of - // processing a read access event - - if (aw.Branches[stem] & AccessWitnessWriteFlag) == 0 { - stemWrite = true - aw.Branches[stem] |= AccessWitnessWriteFlag - } - - chunkValue := aw.Chunks[common.BytesToHash(addr)] - // if chunkValue.mode XOR AccessWitnessWriteFlag - if ((chunkValue & AccessWitnessWriteFlag) == 0) && ((chunkValue | AccessWitnessWriteFlag) != 0) { - chunkWrite = true - chunkValue |= AccessWitnessWriteFlag - aw.Chunks[common.BytesToHash(addr)] = chunkValue - } - - // TODO charge chunk filling costs if the leaf was previously empty in the state - /* - if chunkWrite { - if _, err := verkleDb.TryGet(addr); err != nil { - chunkFill = true - } - } - */ - - return stemWrite, chunkWrite, chunkFill -} - -// TouchAddress adds any missing addr to the witness and returns respectively -// true if the stem or the stub weren't arleady present. -func (aw *AccessWitness) touchAddress(addr []byte, isWrite bool) (bool, bool, bool, bool, bool) { - var ( - stem [31]byte - stemRead, selectorRead bool - stemWrite, selectorWrite, chunkFill bool - ) - copy(stem[:], addr[:31]) - - // Check for the presence of the stem - if _, hasStem := aw.Branches[stem]; !hasStem { - stemRead = true - aw.Branches[stem] = AccessWitnessReadFlag - } - - // Check for the presence of the leaf selector - if _, hasSelector := aw.Chunks[common.BytesToHash(addr)]; !hasSelector { - selectorRead = true - aw.Chunks[common.BytesToHash(addr)] = AccessWitnessReadFlag - } - - if isWrite { - stemWrite, selectorWrite, chunkFill = aw.touchAddressOnWrite(addr) - } - - return stemRead, selectorRead, stemWrite, selectorWrite, chunkFill -} - -func (aw *AccessWitness) touchAddressAndChargeGas(addr []byte, isWrite bool) uint64 { - var gas uint64 - - stemRead, selectorRead, stemWrite, selectorWrite, selectorFill := aw.touchAddress(addr, isWrite) - - if stemRead { - gas += params.WitnessBranchReadCost - } - if selectorRead { - gas += params.WitnessChunkReadCost - } - if stemWrite { - gas += params.WitnessBranchWriteCost - } - if selectorWrite { - gas += params.WitnessChunkWriteCost - } - if selectorFill { - gas += params.WitnessChunkFillCost - } - - return gas -} - -func (aw *AccessWitness) TouchAddressOnWriteAndComputeGas(addr []byte) uint64 { - return aw.touchAddressAndChargeGas(addr, true) -} - -func (aw *AccessWitness) TouchAddressOnReadAndComputeGas(addr []byte) uint64 { - return aw.touchAddressAndChargeGas(addr, false) -} - // Merge is used to merge the witness that got generated during the execution // of a tx, with the accumulation of witnesses that were generated during the // execution of all the txs preceding this one in a given block. func (aw *AccessWitness) Merge(other *AccessWitness) { - for k := range other.Branches { - if _, ok := aw.Branches[k]; !ok { - aw.Branches[k] = other.Branches[k] - } - } - - for k, chunk := range other.Chunks { - if _, ok := aw.Chunks[k]; !ok { - aw.Chunks[k] = chunk - } + for k := range other.branches { + aw.branches[k] |= other.branches[k] } - - for k, v := range other.InitialValue { - if _, ok := aw.InitialValue[k]; !ok { - aw.InitialValue[k] = v - } + for k, chunk := range other.chunks { + aw.chunks[k] |= chunk } - - // TODO see if merging improves performance - //for k, v := range other.addrToPoint { - //if _, ok := aw.addrToPoint[k]; !ok { - //aw.addrToPoint[k] = v - //} - //} } // Key returns, predictably, the list of keys that were touched during the // buildup of the access witness. func (aw *AccessWitness) Keys() [][]byte { - keys := make([][]byte, 0, len(aw.Chunks)) - for key := range aw.Chunks { - var k [32]byte - copy(k[:], key[:]) - keys = append(keys, k[:]) + // TODO: consider if parallelizing this is worth it, probably depending on len(aw.chunks). + keys := make([][]byte, 0, len(aw.chunks)) + for chunk := range aw.chunks { + basePoint := aw.pointCache.GetTreeKeyHeader(chunk.addr[:]) + key := utils.GetTreeKeyWithEvaluatedAddess(basePoint, &chunk.treeIndex, chunk.leafKey) + keys = append(keys, key) } return keys } -func (aw *AccessWitness) KeyVals() map[string][]byte { - result := make(map[string][]byte) - for k, v := range aw.InitialValue { - result[k] = v - } - return result -} - func (aw *AccessWitness) Copy() *AccessWitness { naw := &AccessWitness{ - Branches: make(map[VerkleStem]Mode), - Chunks: make(map[common.Hash]Mode), - InitialValue: make(map[string][]byte), + branches: make(map[branchAccessKey]mode), + chunks: make(map[chunkAccessKey]mode), + pointCache: aw.pointCache, } - naw.Merge(aw) - return naw } -func (aw *AccessWitness) GetTreeKeyVersionCached(addr []byte) []byte { - return aw.statedb.db.(*cachingDB).addrToPoint.GetTreeKeyVersionCached(addr) -} - func (aw *AccessWitness) TouchAndChargeProofOfAbsence(addr []byte) uint64 { - var ( - balancekey, cskey, ckkey, noncekey [32]byte - gas uint64 - ) - - // Only evaluate the polynomial once - versionkey := aw.GetTreeKeyVersionCached(addr[:]) - copy(balancekey[:], versionkey) - balancekey[31] = utils.BalanceLeafKey - copy(noncekey[:], versionkey) - noncekey[31] = utils.NonceLeafKey - copy(cskey[:], versionkey) - cskey[31] = utils.CodeSizeLeafKey - copy(ckkey[:], versionkey) - ckkey[31] = utils.CodeKeccakLeafKey - - gas += aw.TouchAddressOnReadAndComputeGas(versionkey) - gas += aw.TouchAddressOnReadAndComputeGas(balancekey[:]) - gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) - gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) - gas += aw.TouchAddressOnReadAndComputeGas(noncekey[:]) + var gas uint64 + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) return gas } func (aw *AccessWitness) TouchAndChargeMessageCall(addr []byte) uint64 { - var ( - gas uint64 - cskey [32]byte - ) - // Only evaluate the polynomial once - versionkey := aw.GetTreeKeyVersionCached(addr[:]) - copy(cskey[:], versionkey) - cskey[31] = utils.CodeSizeLeafKey - gas += aw.TouchAddressOnReadAndComputeGas(versionkey) - gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) + var gas uint64 + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) return gas } func (aw *AccessWitness) TouchAndChargeValueTransfer(callerAddr, targetAddr []byte) uint64 { var gas uint64 - gas += aw.TouchAddressOnWriteAndComputeGas(utils.GetTreeKeyBalance(callerAddr[:])) - gas += aw.TouchAddressOnWriteAndComputeGas(utils.GetTreeKeyBalance(targetAddr[:])) + gas += aw.TouchAddressOnWriteAndComputeGas(callerAddr, zeroTreeIndex, utils.BalanceLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) return gas } // TouchAndChargeContractCreateInit charges access costs to initiate // a contract creation func (aw *AccessWitness) TouchAndChargeContractCreateInit(addr []byte, createSendsValue bool) uint64 { - var ( - balancekey, ckkey, noncekey [32]byte - gas uint64 - ) - - // Only evaluate the polynomial once - versionkey := aw.GetTreeKeyVersionCached(addr[:]) - copy(balancekey[:], versionkey) - balancekey[31] = utils.BalanceLeafKey - copy(noncekey[:], versionkey) - noncekey[31] = utils.NonceLeafKey - copy(ckkey[:], versionkey) - ckkey[31] = utils.CodeKeccakLeafKey - - gas += aw.TouchAddressOnWriteAndComputeGas(versionkey) - gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) + var gas uint64 + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) if createSendsValue { - gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) } - gas += aw.TouchAddressOnWriteAndComputeGas(ckkey[:]) return gas } // TouchAndChargeContractCreateCompleted charges access access costs after // the completion of a contract creation to populate the created account in // the tree -func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte, withValue bool) uint64 { - var ( - balancekey, cskey, ckkey, noncekey [32]byte - gas uint64 - ) - - // Only evaluate the polynomial once - versionkey := aw.GetTreeKeyVersionCached(addr[:]) - copy(balancekey[:], versionkey) - balancekey[31] = utils.BalanceLeafKey - copy(noncekey[:], versionkey) - noncekey[31] = utils.NonceLeafKey - copy(cskey[:], versionkey) - cskey[31] = utils.CodeSizeLeafKey - copy(ckkey[:], versionkey) - ckkey[31] = utils.CodeKeccakLeafKey - - gas += aw.TouchAddressOnWriteAndComputeGas(versionkey) - gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) - gas += aw.TouchAddressOnWriteAndComputeGas(cskey[:]) - gas += aw.TouchAddressOnWriteAndComputeGas(ckkey[:]) - gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) +func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte) uint64 { + var gas uint64 + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) return gas } func (aw *AccessWitness) TouchTxOriginAndComputeGas(originAddr []byte) uint64 { - var ( - balancekey, cskey, ckkey, noncekey [32]byte - gas uint64 - ) - - // Only evaluate the polynomial once - versionkey := aw.GetTreeKeyVersionCached(originAddr[:]) - copy(balancekey[:], versionkey) - balancekey[31] = utils.BalanceLeafKey - copy(noncekey[:], versionkey) - noncekey[31] = utils.NonceLeafKey - copy(cskey[:], versionkey) - cskey[31] = utils.CodeSizeLeafKey - copy(ckkey[:], versionkey) - ckkey[31] = utils.CodeKeccakLeafKey - - gas += aw.TouchAddressOnReadAndComputeGas(versionkey) - gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) - gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) - gas += aw.TouchAddressOnWriteAndComputeGas(noncekey[:]) - gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) - + var gas uint64 + gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey) return gas } func (aw *AccessWitness) TouchTxExistingAndComputeGas(targetAddr []byte, sendsValue bool) uint64 { - var ( - balancekey, cskey, ckkey, noncekey [32]byte - gas uint64 - ) - - // Only evaluate the polynomial once - versionkey := aw.GetTreeKeyVersionCached(targetAddr[:]) - copy(balancekey[:], versionkey) - balancekey[31] = utils.BalanceLeafKey - copy(noncekey[:], versionkey) - noncekey[31] = utils.NonceLeafKey - copy(cskey[:], versionkey) - cskey[31] = utils.CodeSizeLeafKey - copy(ckkey[:], versionkey) - ckkey[31] = utils.CodeKeccakLeafKey - - gas += aw.TouchAddressOnReadAndComputeGas(versionkey) - gas += aw.TouchAddressOnReadAndComputeGas(cskey[:]) - gas += aw.TouchAddressOnReadAndComputeGas(ckkey[:]) - gas += aw.TouchAddressOnReadAndComputeGas(noncekey[:]) - gas += aw.TouchAddressOnReadAndComputeGas(balancekey[:]) - + var gas uint64 + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.NonceLeafKey) if sendsValue { - gas += aw.TouchAddressOnWriteAndComputeGas(balancekey[:]) + gas += aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + } else { + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) } return gas } + +func (aw *AccessWitness) TouchAddressOnWriteAndComputeGas(addr []byte, treeIndex uint256.Int, subIndex byte) uint64 { + return aw.touchAddressAndChargeGas(addr, treeIndex, subIndex, true) +} + +func (aw *AccessWitness) TouchAddressOnReadAndComputeGas(addr []byte, treeIndex uint256.Int, subIndex byte) uint64 { + return aw.touchAddressAndChargeGas(addr, treeIndex, subIndex, false) +} + +func (aw *AccessWitness) touchAddressAndChargeGas(addr []byte, treeIndex uint256.Int, subIndex byte, isWrite bool) uint64 { + stemRead, selectorRead, stemWrite, selectorWrite, selectorFill := aw.touchAddress(addr, treeIndex, subIndex, isWrite) + + var gas uint64 + if stemRead { + gas += params.WitnessBranchReadCost + } + if selectorRead { + gas += params.WitnessChunkReadCost + } + if stemWrite { + gas += params.WitnessBranchWriteCost + } + if selectorWrite { + gas += params.WitnessChunkWriteCost + } + if selectorFill { + gas += params.WitnessChunkFillCost + } + + return gas +} + +// touchAddress adds any missing access event to the witness. +func (aw *AccessWitness) touchAddress(addr []byte, treeIndex uint256.Int, subIndex byte, isWrite bool) (bool, bool, bool, bool, bool) { + branchKey := newBranchAccessKey(addr, treeIndex) + chunkKey := newChunkAccessKey(branchKey, subIndex) + + // Read access. + var branchRead, chunkRead bool + if _, hasStem := aw.branches[branchKey]; !hasStem { + branchRead = true + aw.branches[branchKey] = AccessWitnessReadFlag + } + if _, hasSelector := aw.chunks[chunkKey]; !hasSelector { + chunkRead = true + aw.chunks[chunkKey] = AccessWitnessReadFlag + } + + // Write access. + var branchWrite, chunkWrite, chunkFill bool + if isWrite { + if (aw.branches[branchKey] & AccessWitnessWriteFlag) == 0 { + branchWrite = true + aw.branches[branchKey] |= AccessWitnessWriteFlag + } + + chunkValue := aw.chunks[chunkKey] + if (chunkValue & AccessWitnessWriteFlag) == 0 { + chunkWrite = true + aw.chunks[chunkKey] |= AccessWitnessWriteFlag + } + + // TODO: charge chunk filling costs if the leaf was previously empty in the state + } + + return branchRead, chunkRead, branchWrite, chunkWrite, chunkFill +} + +type branchAccessKey struct { + addr common.Address + treeIndex uint256.Int +} + +func newBranchAccessKey(addr []byte, treeIndex uint256.Int) branchAccessKey { + var sk branchAccessKey + copy(sk.addr[:], addr) + sk.treeIndex = treeIndex + return sk +} + +type chunkAccessKey struct { + branchAccessKey + leafKey byte +} + +func newChunkAccessKey(branchKey branchAccessKey, leafKey byte) chunkAccessKey { + var lk chunkAccessKey + lk.branchAccessKey = branchKey + lk.leafKey = leafKey + return lk +} diff --git a/core/state/statedb.go b/core/state/statedb.go index d500ef3862a7..48d2a8e509db 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -174,7 +174,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) hasher: crypto.NewKeccakState(), } if tr.IsVerkle() { - sdb.witness = NewAccessWitness(sdb) + sdb.witness = sdb.NewAccessWitness() // if sdb.snaps == nil { // snapconfig := snapshot.Config{ // CacheSize: 256, @@ -206,9 +206,13 @@ func (s *StateDB) Snaps() *snapshot.Tree { return s.snaps } +func (s *StateDB) NewAccessWitness() *AccessWitness { + return NewAccessWitness(s.db.(*cachingDB).addrToPoint) +} + func (s *StateDB) Witness() *AccessWitness { if s.witness == nil { - s.witness = NewAccessWitness(s) + s.witness = s.NewAccessWitness() } return s.witness } diff --git a/core/state_processor.go b/core/state_processor.go index 4ccb06ecfb4f..c66c0049d59c 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -320,7 +320,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) - txContext.Accesses = state.NewAccessWitness(statedb) + txContext.Accesses = statedb.NewAccessWitness() evm.Reset(txContext, statedb) // Apply the transaction to the current state (included in the env). diff --git a/core/vm/contract.go b/core/vm/contract.go index caaaa8e455f4..1aa650d7d4f4 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -20,9 +20,6 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" "github.com/holiman/uint256" ) @@ -52,13 +49,11 @@ type Contract struct { CallerAddress common.Address caller ContractRef self ContractRef - addressPoint *verkle.Point jumpdests map[common.Hash]bitvec // Aggregated result of JUMPDEST analysis. analysis bitvec // Locally cached result of JUMPDEST analysis Code []byte - Chunks trie.ChunkedCode CodeHash common.Hash CodeAddr *common.Address Input []byte @@ -180,14 +175,6 @@ func (c *Contract) Address() common.Address { return c.self.Address() } -func (c *Contract) AddressPoint() *verkle.Point { - if c.addressPoint == nil { - c.addressPoint = utils.EvaluateAddressPoint(c.Address().Bytes()) - } - - return c.addressPoint -} - // Value returns the contract's value (sent to it from it's caller) func (c *Contract) Value() *big.Int { return c.value diff --git a/core/vm/evm.go b/core/vm/evm.go index f8f67c4ef7e7..73c3c2150617 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -138,7 +138,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time), } if txCtx.Accesses == nil && chainConfig.IsVerkle(blockCtx.BlockNumber, blockCtx.Time) { - txCtx.Accesses = state.NewAccessWitness(evm.StateDB.(*state.StateDB)) + txCtx.Accesses = evm.StateDB.(*state.StateDB).NewAccessWitness() } evm.interpreter = NewEVMInterpreter(evm) return evm @@ -148,7 +148,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig // This is not threadsafe and should only be done very cautiously. func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { if txCtx.Accesses == nil && evm.chainRules.IsVerkle { - txCtx.Accesses = state.NewAccessWitness(evm.StateDB.(*state.StateDB)) + txCtx.Accesses = evm.StateDB.(*state.StateDB).NewAccessWitness() } evm.TxContext = txCtx evm.StateDB = statedb @@ -530,7 +530,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } if err == nil && evm.chainRules.IsVerkle { - if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:], value.Sign() != 0)) { + if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:])) { evm.StateDB.RevertToSnapshot(snapshot) err = ErrOutOfGas } diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 780b05182638..dc307b82904d 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" trieUtils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" ) // memoryGasCost calculates the quadratic gas for memory expansion. It does so @@ -101,8 +102,7 @@ func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, mem usedGas := uint64(0) slot := stack.Back(0) if evm.chainRules.IsVerkle { - index := trieUtils.GetTreeKeyCodeSize(slot.Bytes()) - usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(index) + usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) } return usedGas, nil @@ -113,8 +113,8 @@ func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySiz if evm.chainRules.IsVerkle { where := stack.Back(0) - index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(contract.AddressPoint(), where.Bytes()) - usedGas += evm.Accesses.TouchAddressOnReadAndComputeGas(index) + treeIndex, subIndex := trieUtils.GetTreeKeyStorageSlotTreeIndexes(where.Bytes()) + usedGas += evm.Accesses.TouchAddressOnReadAndComputeGas(contract.Address().Bytes(), *treeIndex, subIndex) } return usedGas, nil diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 871922c1012b..13252eda9df9 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -23,7 +23,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" trieUtils "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -347,8 +346,7 @@ func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) slot := scope.Stack.peek() cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) if interpreter.evm.chainRules.IsVerkle { - index := trieUtils.GetTreeKeyCodeSize(slot.Bytes()) - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(index) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) scope.Contract.UseGas(statelessGas) } slot.SetUint64(cs) @@ -373,79 +371,42 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ uint64CodeOffset = 0xffffffffffffffff } + contractAddr := scope.Contract.Address() paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) if interpreter.evm.chainRules.IsVerkle { - scope.Contract.UseGas(touchEachChunksOnReadAndChargeGas(copyOffset, nonPaddedCopyLength, scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment)) + scope.Contract.UseGas(touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], copyOffset, nonPaddedCopyLength, uint64(len(scope.Contract.Code)), interpreter.evm.Accesses)) } scope.Memory.Set(memOffset.Uint64(), uint64(len(paddedCodeCopy)), paddedCodeCopy) return nil, nil } -// touchChunkOnReadAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs -func touchChunkOnReadAndChargeGas(chunks trie.ChunkedCode, offset uint64, evals [][]byte, code []byte, accesses *state.AccessWitness, deployment bool) uint64 { - // note that in the case where the executed code is outside the range of - // the contract code but touches the last leaf with contract code in it, - // we don't include the last leaf of code in the AccessWitness. The - // reason that we do not need the last leaf is the account's code size - // is already in the AccessWitness so a stateless verifier can see that - // the code from the last leaf is not needed. - if code != nil && offset > uint64(len(code)) { - return 0 - } - var ( - chunknr = offset / 31 - statelessGasCharged uint64 - ) - - // Build the chunk address from the evaluated address of its whole group - var index [32]byte - copy(index[:], evals[chunknr/256]) - index[31] = byte((128 + chunknr) % 256) - - var overflow bool - statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, accesses.TouchAddressOnReadAndComputeGas(index[:])) - if overflow { - panic("overflow when adding gas") - } - - return statelessGasCharged -} - -// touchEachChunksOnReadAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs -func touchEachChunksOnReadAndChargeGas(offset, size uint64, contract *Contract, code []byte, accesses *state.AccessWitness, deployment bool) uint64 { +// touchCodeChunksRangeOnReadAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs +func touchCodeChunksRangeOnReadAndChargeGas(contractAddr []byte, startPC, size uint64, codeLen uint64, accesses *state.AccessWitness) uint64 { // note that in the case where the copied code is outside the range of the // contract code but touches the last leaf with contract code in it, // we don't include the last leaf of code in the AccessWitness. The // reason that we do not need the last leaf is the account's code size // is already in the AccessWitness so a stateless verifier can see that // the code from the last leaf is not needed. - if len(code) == 0 && size == 0 || offset > uint64(len(code)) { + if (codeLen == 0 && size == 0) || startPC > codeLen { return 0 } - var ( - statelessGasCharged uint64 - endOffset uint64 - ) - if code != nil && offset+size > uint64(len(code)) { - endOffset = uint64(len(code)) - } else { - endOffset = offset + size - } - // endOffset - 1 since if the end offset is aligned on a chunk boundary, - // the last chunk should not be included. - for i := offset / 31; i <= (endOffset-1)/31; i++ { - // only charge for+cache the chunk if it isn't already present - if !accesses.HasCodeChunk(contract.Address().Bytes(), i) { - index := trieUtils.GetTreeKeyCodeChunkWithEvaluatedAddress(contract.AddressPoint(), uint256.NewInt(i)) - - var overflow bool - statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, accesses.TouchAddressOnReadAndComputeGas(index)) - if overflow { - panic("overflow when adding gas") - } + // endPC is the last PC that must be touched. + endPC := startPC + size - 1 + if startPC+size > codeLen { + endPC = codeLen + } - accesses.SetCachedCodeChunk(contract.Address().Bytes(), i) + var statelessGasCharged uint64 + for chunkNumber := startPC / 31; chunkNumber <= endPC/31; chunkNumber++ { + treeIndex := *uint256.NewInt((chunkNumber + 128) / 256) + subIndex := byte((chunkNumber + 128) % 256) + gas := accesses.TouchAddressOnReadAndComputeGas(contractAddr, treeIndex, subIndex) + var overflow bool + statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, gas) + if overflow { + panic("overflow when adding gas") } } @@ -468,12 +429,12 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) if interpreter.evm.chainRules.IsVerkle { code := interpreter.evm.StateDB.GetCode(addr) contract := &Contract{ - Code: code, - Chunks: trie.ChunkedCode(code), - self: AccountRef(addr), + Code: code, + self: AccountRef(addr), } paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) - touchEachChunksOnReadAndChargeGas(copyOffset, nonPaddedCopyLength, contract, code, interpreter.evm.Accesses, false) + gas := touchCodeChunksRangeOnReadAndChargeGas(addr[:], copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), interpreter.evm.Accesses) + scope.Contract.UseGas(gas) scope.Memory.Set(memOffset.Uint64(), length.Uint64(), paddedCodeCopy) } else { codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) @@ -1001,7 +962,8 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by if interpreter.evm.chainRules.IsVerkle && *pc%31 == 0 { // touch next chunk if PUSH1 is at the boundary. if so, *pc has // advanced past this boundary. - statelessGas := touchEachChunksOnReadAndChargeGas(*pc+1, uint64(1), scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) + contractAddr := scope.Contract.Address() + statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], *pc+1, uint64(1), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) scope.Contract.UseGas(statelessGas) } } else { @@ -1026,7 +988,8 @@ func makePush(size uint64, pushByteSize int) executionFunc { } if interpreter.evm.chainRules.IsVerkle { - statelessGas := touchEachChunksOnReadAndChargeGas(uint64(startMin), uint64(pushByteSize), scope.Contract, scope.Contract.Code, interpreter.evm.Accesses, scope.Contract.IsDeployment) + contractAddr := scope.Contract.Address() + statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], uint64(startMin), uint64(pushByteSize), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) scope.Contract.UseGas(statelessGas) } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 6563b17bd9a1..ad4222b447b7 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -21,10 +21,6 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" - "github.com/holiman/uint256" ) // Config are the configuration options for the Interpreter @@ -152,8 +148,6 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( logged bool // deferred EVMLogger should ignore already logged steps res []byte // result of the opcode execution function debug = in.evm.Config.Tracer != nil - - chunkEvals [][]byte ) // Don't move this deferred function, it's placed before the capturestate-deferred method, // so that it get's executed _after_: the capturestate needs the stacks before @@ -175,21 +169,6 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( }() } - // Evaluate one address per group of 256, 31-byte chunks - if in.evm.chainRules.IsCancun && !contract.IsDeployment { - contract.Chunks = trie.ChunkifyCode(contract.Code) - - // number of extra stems to evaluate after the header stem - extraEvals := (len(contract.Chunks) + 127) / verkle.NodeWidth - - chunkEvals = make([][]byte, extraEvals+1) - for i := 1; i < extraEvals+1; i++ { - chunkEvals[i] = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(contract.AddressPoint(), uint256.NewInt(uint64(i)*256)) - } - // Header account is already known, it's the header account - chunkEvals[0] = utils.GetTreeKeyVersionWithEvaluatedAddress(contract.AddressPoint()) - } - // The Interpreter main run loop (contextual). This loop runs until either an // explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during // the execution of one of the operations or until the done flag is set by the @@ -200,10 +179,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( logged, pcCopy, gasCopy = false, pc, contract.Gas } - if contract.Chunks != nil { + if in.evm.chainRules.IsCancun && !contract.IsDeployment { // if the PC ends up in a new "chunk" of verkleized code, charge the // associated costs. - contract.Gas -= touchChunkOnReadAndChargeGas(contract.Chunks, pc, chunkEvals, contract.Code, in.evm.TxContext.Accesses, contract.IsDeployment) + contractAddr := contract.Address() + contract.Gas -= touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], pc, 1, uint64(len(contract.Code)), in.evm.TxContext.Accesses) } // Get the operation from the jump table and validate the stack to ensure there are diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index fe8446be3b08..7d2296c4dc01 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -22,7 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" - trieUtils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/ethereum/go-ethereum/trie/utils" ) func makeGasSStoreFunc(clearingRefund uint64) gasFunc { @@ -53,8 +53,8 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { value := common.Hash(y.Bytes32()) if evm.chainRules.IsVerkle { - index := trieUtils.GetTreeKeyStorageSlotWithEvaluatedAddress(contract.AddressPoint(), x.Bytes()) - cost += evm.Accesses.TouchAddressOnWriteAndComputeGas(index) + treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(x.Bytes()) + cost += evm.Accesses.TouchAddressOnWriteAndComputeGas(contract.Address().Bytes(), *treeIndex, subIndex) } if current == value { // noop (1) @@ -113,9 +113,9 @@ func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me if evm.chainRules.IsVerkle { where := stack.Back(0) + treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(where.Bytes()) addr := contract.Address() - index := trieUtils.GetTreeKeyStorageSlot(addr[:], where) - gasUsed += evm.Accesses.TouchAddressOnReadAndComputeGas(index) + gasUsed += evm.Accesses.TouchAddressOnReadAndComputeGas(addr.Bytes(), *treeIndex, subIndex) } // Check slot presence in the access list diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index c06c189b99b2..85e479b641e9 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -144,7 +144,7 @@ func GetTreeKeyVersion(address []byte) []byte { } func GetTreeKeyVersionWithEvaluatedAddress(addrp *verkle.Point) []byte { - return getTreeKeyWithEvaluatedAddess(addrp, zero, VersionLeafKey) + return GetTreeKeyWithEvaluatedAddess(addrp, zero, VersionLeafKey) } func GetTreeKeyBalance(address []byte) []byte { @@ -182,7 +182,7 @@ func GetTreeKeyCodeChunkWithEvaluatedAddress(addressPoint *verkle.Point, chunk * if len(subIndexMod) != 0 { subIndex = byte(subIndexMod[0]) } - return getTreeKeyWithEvaluatedAddess(addressPoint, treeIndex, subIndex) + return GetTreeKeyWithEvaluatedAddess(addressPoint, treeIndex, subIndex) } func GetTreeKeyStorageSlot(address []byte, storageKey *uint256.Int) []byte { @@ -221,7 +221,7 @@ func PointToHash(evaluated *verkle.Point, suffix byte) []byte { return retb[:] } -func getTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte { +func GetTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte { var poly [5]fr.Element poly[0].SetZero() @@ -269,6 +269,11 @@ func EvaluateAddressPoint(address []byte) *verkle.Point { } func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte { + treeIndex, subIndex := GetTreeKeyStorageSlotTreeIndexes(storageKey) + return GetTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex) +} + +func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) { // Note that `pos` must be a big.Int and not a uint256.Int, because the subsequent // arithmetics operations could overflow. (e.g: imagine if storageKey is 2^256-1) pos := new(big.Int).SetBytes(storageKey) @@ -286,5 +291,5 @@ func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageK posBytes := pos.Bytes() subIndex := posBytes[len(posBytes)-1] - return getTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex) + return treeIndex, subIndex } diff --git a/trie/verkle_test.go b/trie/verkle_test.go index 5c9e1f03330d..4e21ee501a21 100644 --- a/trie/verkle_test.go +++ b/trie/verkle_test.go @@ -65,11 +65,9 @@ func TestReproduceTree(t *testing.T) { } root := verkle.New() - kv := make(map[string][]byte) for i, key := range presentKeys { root.Insert(key, values[i], nil) - kv[string(key)] = values[i] } proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) @@ -286,11 +284,9 @@ func TestReproduceCondrieuStemAggregationInProofOfAbsence(t *testing.T) { } root := verkle.New() - kv := make(map[string][]byte) for i, key := range presentKeys { root.Insert(key, values[i], nil) - kv[string(key)] = values[i] } proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) @@ -333,11 +329,9 @@ func TestReproduceCondrieuPoAStemConflictWithAnotherStem(t *testing.T) { } root := verkle.New() - kv := make(map[string][]byte) for i, key := range presentKeys { root.Insert(key, values[i], nil) - kv[string(key)] = values[i] } proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) From f090ddbe9d15287a43b8c1c9e8d01e6914b0ea83 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 30 Aug 2023 17:49:09 +0200 Subject: [PATCH 24/99] remove unused map param in MakeVerkleMultiProof (#261) --- core/chain_makers.go | 2 +- trie/verkle.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/chain_makers.go b/core/chain_makers.go index 03f98bade68f..d2aaef260971 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -442,7 +442,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine } vtr.Hash() - p, k, err := preStateTrie.ProveAndSerialize(statedb.Witness().Keys(), kvs) + p, k, err := preStateTrie.ProveAndSerialize(statedb.Witness().Keys()) if err != nil { panic(err) } diff --git a/trie/verkle.go b/trie/verkle.go index fcf9e114cfc0..baf9fde541ac 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -320,7 +320,7 @@ func (trie *VerkleTrie) IsVerkle() bool { return true } -func (trie *VerkleTrie) ProveAndSerialize(keys [][]byte, kv map[string][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) { +func (trie *VerkleTrie) ProveAndSerialize(keys [][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) { proof, _, _, _, err := verkle.MakeVerkleMultiProof(trie.root, keys) if err != nil { return nil, nil, err From ed36c23480adeed2fc3738c1c6f27a6b0ce924a1 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Thu, 31 Aug 2023 12:29:58 -0300 Subject: [PATCH 25/99] core/state: rewrite a new optimized keyValueMigrator (#256) * trie/utils: add helper to calculate code tree indices * core/state: rewrite optimized version of keyValueMigrator Signed-off-by: Ignacio Hagopian * trie/verkle: remove uint256 allocs (#257) Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- core/state_processor.go | 177 ++++++++++++++++++++++++++++------------ trie/utils/verkle.go | 70 ++++++++-------- 2 files changed, 163 insertions(+), 84 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index c66c0049d59c..8dd4de5436ba 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -25,6 +25,8 @@ import ( "io" "math/big" "os" + "runtime" + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -170,7 +172,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg // mkv will be assiting in the collection of up to maxMovedCount key values to be migrated to the VKT. // It has internal caches to do efficient MPT->VKT key calculations, which will be discarded after // this function. - mkv := &keyValueMigrator{vktLeafData: make(map[string]*verkle.BatchNewLeafNodeData)} + mkv := newKeyValueMigrator() // move maxCount accounts into the verkle tree, starting with the // slots from the previous account. count := 0 @@ -297,8 +299,17 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } migrdb.SetCurrentPreimageOffset(preimageSeek) - log.Info("Collected and prepared key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash()) - + log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash()) + + // Take all the collected key-values and prepare the new leaf values. + // This fires a background routine that will start doing the work that + // migrateCollectedKeyValues() will use to insert into the tree. + // + // TODO: Now both prepare() and migrateCollectedKeyValues() are next to each other, but + // after we fix an existing bug, we can call prepare() before the block execution and + // let it do the work in the background. After the block execution and finalization + // finish, we can call migrateCollectedKeyValues() which should already find everything ready. + mkv.prepare() now = time.Now() if err := mkv.migrateCollectedKeyValues(tt.Overlay()); err != nil { return nil, nil, 0, fmt.Errorf("could not migrate key values: %w", err) @@ -380,30 +391,60 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } -// keyValueMigrator is a helper struct that collects key-values from the base tree. -// The walk is done in account order, so **we assume** the APIs hold this invariant. This is -// useful to be smart about caching banderwagon.Points to make VKT key calculations faster. +var zeroTreeIndex uint256.Int + +// keyValueMigrator is a helper module that collects key-values from the overlay-tree migration for Verkle Trees. +// It assumes that the walk of the base tree is done in address-order, so it exploit that fact to +// collect the key-values in a way that is efficient. type keyValueMigrator struct { - currAddr []byte - currAddrPoint *verkle.Point + // leafData contains the values for the future leaf for a particular VKT branch. + leafData []migratedKeyValue + + // When prepare() is called, it will start a background routine that will process the leafData + // saving the result in newLeaves to be used by migrateCollectedKeyValues(). The background + // routine signals that it is done by closing processingReady. + processingReady chan struct{} + newLeaves []verkle.LeafNode + prepareErr error +} - vktLeafData map[string]*verkle.BatchNewLeafNodeData +func newKeyValueMigrator() *keyValueMigrator { + // We do initialize the VKT config since prepare() might indirectly make multiple GetConfig() calls + // in different goroutines when we never called GetConfig() before, causing a race considering the way + // that `config` is designed in go-verkle. + // TODO: jsign as a fix for this in the PR where we move to a file-less precomp, since it allows safe + // concurrent calls to GetConfig(). When that gets merged, we can remove this line. + _ = verkle.GetConfig() + return &keyValueMigrator{ + processingReady: make(chan struct{}), + leafData: make([]migratedKeyValue, 0, 10_000), + } } -func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { - addrPoint := kvm.getAddrPoint(addr) +type migratedKeyValue struct { + branchKey branchKey + leafNodeData verkle.BatchNewLeafNodeData +} +type branchKey struct { + addr common.Address + treeIndex uint256.Int +} - vktKey := tutils.GetTreeKeyStorageSlotWithEvaluatedAddress(addrPoint, slotNumber) - leafNodeData := kvm.getOrInitLeafNodeData(vktKey) +func newBranchKey(addr []byte, treeIndex *uint256.Int) branchKey { + var sk branchKey + copy(sk.addr[:], addr) + sk.treeIndex = *treeIndex + return sk +} - leafNodeData.Values[vktKey[verkle.StemSize]] = slotValue +func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { + treeIndex, subIndex := tutils.GetTreeKeyStorageSlotTreeIndexes(slotNumber) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) + leafNodeData.Values[subIndex] = slotValue } func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { - addrPoint := kvm.getAddrPoint(addr) - - vktKey := tutils.GetTreeKeyVersionWithEvaluatedAddress(addrPoint) - leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) var version [verkle.LeafValueSize]byte leafNodeData.Values[tutils.VersionLeafKey] = version[:] @@ -419,16 +460,10 @@ func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { leafNodeData.Values[tutils.NonceLeafKey] = nonce[:] leafNodeData.Values[tutils.CodeKeccakLeafKey] = acc.CodeHash[:] - - // Code size is ignored here. If this isn't an EOA, the tree-walk will call - // addAccountCode with this information. } func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { - addrPoint := kvm.getAddrPoint(addr) - - vktKey := tutils.GetTreeKeyVersionWithEvaluatedAddress(addrPoint) - leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) // Save the code size. var codeSizeBytes [verkle.LeafValueSize]byte @@ -442,8 +477,8 @@ func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks // Potential further chunks, have their own leaf nodes. for i := 128; i < len(chunks)/32; { - vktKey := tutils.GetTreeKeyCodeChunkWithEvaluatedAddress(addrPoint, uint256.NewInt(uint64(i))) - leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + treeIndex, _ := tutils.GetTreeKeyCodeChunkIndices(uint256.NewInt(uint64(i))) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) j := i for ; (j-i) < 256 && j < len(chunks)/32; j++ { @@ -453,41 +488,79 @@ func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks } } -func (kvm *keyValueMigrator) getAddrPoint(addr []byte) *verkle.Point { - if bytes.Equal(addr, kvm.currAddr) { - return kvm.currAddrPoint +func (kvm *keyValueMigrator) getOrInitLeafNodeData(bk branchKey) *verkle.BatchNewLeafNodeData { + // Remember that keyValueMigration receives actions ordered by (address, subtreeIndex). + // This means that we can assume that the last element of leafData is the one that we + // are looking for, or that we need to create a new one. + if len(kvm.leafData) == 0 || kvm.leafData[len(kvm.leafData)-1].branchKey != bk { + kvm.leafData = append(kvm.leafData, migratedKeyValue{ + branchKey: bk, + leafNodeData: verkle.BatchNewLeafNodeData{ + Stem: nil, // It will be calculated in the prepare() phase, since it's CPU heavy. + Values: make(map[byte][]byte), + }, + }) } - kvm.currAddr = addr - kvm.currAddrPoint = tutils.EvaluateAddressPoint(addr) - return kvm.currAddrPoint + return &kvm.leafData[len(kvm.leafData)-1].leafNodeData } -func (kvm *keyValueMigrator) getOrInitLeafNodeData(stem []byte) *verkle.BatchNewLeafNodeData { - stemStr := string(stem) - if _, ok := kvm.vktLeafData[stemStr]; !ok { - kvm.vktLeafData[stemStr] = &verkle.BatchNewLeafNodeData{ - Stem: stem[:verkle.StemSize], - Values: make(map[byte][]byte), +func (kvm *keyValueMigrator) prepare() { + // We fire a background routine to process the leafData and save the result in newLeaves. + // The background routine signals that it is done by closing processingReady. + go func() { + // Step 1: We split kvm.leafData in numBatches batches, and we process each batch in a separate goroutine. + // This fills each leafNodeData.Stem with the correct value. + var wg sync.WaitGroup + batchNum := runtime.NumCPU() + batchSize := (len(kvm.leafData) + batchNum - 1) / batchNum + for i := 0; i < len(kvm.leafData); i += batchSize { + start := i + end := i + batchSize + if end > len(kvm.leafData) { + end = len(kvm.leafData) + } + wg.Add(1) + + batch := kvm.leafData[start:end] + go func() { + defer wg.Done() + var currAddr common.Address + var currPoint *verkle.Point + for i := range batch { + if batch[i].branchKey.addr != currAddr { + currAddr = batch[i].branchKey.addr + currPoint = tutils.EvaluateAddressPoint(currAddr[:]) + } + stem := tutils.GetTreeKeyWithEvaluatedAddess(currPoint, &batch[i].branchKey.treeIndex, 0) + stem = stem[:verkle.StemSize] + batch[i].leafNodeData.Stem = stem + } + }() } - } - return kvm.vktLeafData[stemStr] + wg.Wait() + + // Step 2: Now that we have all stems (i.e: tree keys) calcualted, we can create the new leaves. + nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData)) + for i := range kvm.leafData { + nodeValues[i] = kvm.leafData[i].leafNodeData + } + + // Create all leaves in batch mode so we can optimize cryptography operations. + kvm.newLeaves, kvm.prepareErr = verkle.BatchNewLeafNode(nodeValues) + close(kvm.processingReady) + }() } func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error { - // Transform the map into a slice. - nodeValues := make([]verkle.BatchNewLeafNodeData, 0, len(kvm.vktLeafData)) - for _, vld := range kvm.vktLeafData { - nodeValues = append(nodeValues, *vld) - } - - // Create all leaves in batch mode so we can optimize cryptography operations. - newLeaves, err := verkle.BatchNewLeafNode(nodeValues) - if err != nil { - return fmt.Errorf("failed to batch-create new leaf nodes") + now := time.Now() + <-kvm.processingReady + if kvm.prepareErr != nil { + return fmt.Errorf("failed to prepare key values: %w", kvm.prepareErr) } + log.Info("Prepared key values from base tree", "duration", time.Since(now)) // Insert into the tree. - if err := tree.InsertMigratedLeaves(newLeaves); err != nil { + if err := tree.InsertMigratedLeaves(kvm.newLeaves); err != nil { return fmt.Errorf("failed to insert migrated leaves: %w", err) } diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 85e479b641e9..07949ec65e98 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -17,7 +17,7 @@ package utils import ( - "math/big" + "encoding/binary" "sync" "github.com/crate-crypto/go-ipa/bandersnatch/fr" @@ -34,18 +34,14 @@ const ( ) var ( - zero = uint256.NewInt(0) - HeaderStorageOffset = uint256.NewInt(64) - CodeOffset = uint256.NewInt(128) - MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(256), 31) - VerkleNodeWidth = uint256.NewInt(256) - codeStorageDelta = uint256.NewInt(0).Sub(CodeOffset, HeaderStorageOffset) - - // BigInt versions of the above. - headerStorageOffsetBig = HeaderStorageOffset.ToBig() - mainStorageOffsetBig = MainStorageOffset.ToBig() - verkleNodeWidthBig = VerkleNodeWidth.ToBig() - codeStorageDeltaBig = codeStorageDelta.ToBig() + zero = uint256.NewInt(0) + VerkleNodeWidthLog2 = 8 + HeaderStorageOffset = uint256.NewInt(64) + mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(256), 31-uint(VerkleNodeWidthLog2)) + CodeOffset = uint256.NewInt(128) + MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(256), 31) + VerkleNodeWidth = uint256.NewInt(256) + codeStorageDelta = uint256.NewInt(0).Sub(CodeOffset, HeaderStorageOffset) getTreePolyIndex0Point *verkle.Point ) @@ -164,6 +160,11 @@ func GetTreeKeyCodeSize(address []byte) []byte { } func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { + treeIndex, subIndex := GetTreeKeyCodeChunkIndices(chunk) + return GetTreeKey(address, treeIndex, subIndex) +} + +func GetTreeKeyCodeChunkIndices(chunk *uint256.Int) (*uint256.Int, byte) { chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth) @@ -171,7 +172,7 @@ func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { if len(subIndexMod) != 0 { subIndex = byte(subIndexMod[0]) } - return GetTreeKey(address, treeIndex, subIndex) + return treeIndex, subIndex } func GetTreeKeyCodeChunkWithEvaluatedAddress(addressPoint *verkle.Point, chunk *uint256.Int) []byte { @@ -230,8 +231,8 @@ func GetTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.I // little-endian, 32-byte aligned treeIndex var index [32]byte - for i, b := range treeIndex.Bytes() { - index[len(treeIndex.Bytes())-1-i] = b + for i := 0; i < len(treeIndex); i++ { + binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i]) } verkle.FromLEBytes(&poly[3], index[:16]) verkle.FromLEBytes(&poly[4], index[16:]) @@ -274,22 +275,27 @@ func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageK } func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) { - // Note that `pos` must be a big.Int and not a uint256.Int, because the subsequent - // arithmetics operations could overflow. (e.g: imagine if storageKey is 2^256-1) - pos := new(big.Int).SetBytes(storageKey) - if pos.Cmp(codeStorageDeltaBig) < 0 { - pos.Add(headerStorageOffsetBig, pos) - } else { - pos.Add(mainStorageOffsetBig, pos) - } - treeIndex, overflow := uint256.FromBig(big.NewInt(0).Div(pos, verkleNodeWidthBig)) - if overflow { // Must never happen considering the EIP definition. - panic("tree index overflow") + var pos uint256.Int + pos.SetBytes(storageKey) + + // If the storage slot is in the header, we need to add the header offset. + if pos.Cmp(codeStorageDelta) < 0 { + // This addition is always safe; it can't ever overflow since pos Date: Sat, 9 Sep 2023 21:39:29 +0200 Subject: [PATCH 26/99] core: move overlay conversion code to its own file (#266) --- core/overlay_transition.go | 245 +++++++++++++++++++++++++++++++++++++ core/state_processor.go | 213 +------------------------------- 2 files changed, 248 insertions(+), 210 deletions(-) create mode 100644 core/overlay_transition.go diff --git a/core/overlay_transition.go b/core/overlay_transition.go new file mode 100644 index 000000000000..35c09d22d938 --- /dev/null +++ b/core/overlay_transition.go @@ -0,0 +1,245 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +// OverlayVerkleTransition contains the overlay conversion logic +func OverlayVerkleTransition(statedb *state.StateDB) error { + migrdb := statedb.Database() + + // verkle transition: if the conversion process is in progress, move + // N values from the MPT into the verkle tree. + if migrdb.InTransition() { + var ( + now = time.Now() + tt = statedb.GetTrie().(*trie.TransitionTrie) + mpt = tt.Base() + vkt = tt.Overlay() + hasPreimagesBin = false + preimageSeek = migrdb.GetCurrentPreimageOffset() + fpreimages *bufio.Reader + ) + + // TODO: avoid opening the preimages file here and make it part of, potentially, statedb.Database(). + filePreimages, err := os.Open("preimages.bin") + if err != nil { + // fallback on reading the db + log.Warn("opening preimage file", "error", err) + } else { + defer filePreimages.Close() + if _, err := filePreimages.Seek(preimageSeek, io.SeekStart); err != nil { + return fmt.Errorf("seeking preimage file: %s", err) + } + fpreimages = bufio.NewReader(filePreimages) + hasPreimagesBin = true + } + + accIt, err := statedb.Snaps().AccountIterator(mpt.Hash(), migrdb.GetCurrentAccountHash()) + if err != nil { + return err + } + defer accIt.Release() + accIt.Next() + + // If we're about to start with the migration process, we have to read the first account hash preimage. + if migrdb.GetCurrentAccountAddress() == nil { + var addr common.Address + if hasPreimagesBin { + if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { + return fmt.Errorf("reading preimage file: %s", err) + } + } else { + addr = common.BytesToAddress(rawdb.ReadPreimage(migrdb.DiskDB(), accIt.Hash())) + if len(addr) != 20 { + return fmt.Errorf("addr len is zero is not 32: %d", len(addr)) + } + } + migrdb.SetCurrentAccountAddress(addr) + if migrdb.GetCurrentAccountHash() != accIt.Hash() { + return fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + } + preimageSeek += int64(len(addr)) + } + + const maxMovedCount = 10000 + // mkv will be assiting in the collection of up to maxMovedCount key values to be migrated to the VKT. + // It has internal caches to do efficient MPT->VKT key calculations, which will be discarded after + // this function. + mkv := newKeyValueMigrator() + // move maxCount accounts into the verkle tree, starting with the + // slots from the previous account. + count := 0 + + // if less than maxCount slots were moved, move to the next account + for count < maxMovedCount { + acc, err := types.FullAccount(accIt.Account()) + if err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return err + } + vkt.SetStorageRootConversion(*migrdb.GetCurrentAccountAddress(), acc.Root) + + // Start with processing the storage, because once the account is + // converted, the `stateRoot` field loses its meaning. Which means + // that it opens the door to a situation in which the storage isn't + // converted, but it can not be found since the account was and so + // there is no way to find the MPT storage from the information found + // in the verkle account. + // Note that this issue can still occur if the account gets written + // to during normal block execution. A mitigation strategy has been + // introduced with the `*StorageRootConversion` fields in VerkleDB. + if acc.HasStorage() { + stIt, err := statedb.Snaps().StorageIterator(mpt.Hash(), accIt.Hash(), migrdb.GetCurrentSlotHash()) + if err != nil { + return err + } + stIt.Next() + + // fdb.StorageProcessed will be initialized to `true` if the + // entire storage for an account was not entirely processed + // by the previous block. This is used as a signal to resume + // processing the storage for that account where we left off. + // If the entire storage was processed, then the iterator was + // created in vain, but it's ok as this will not happen often. + for ; !migrdb.GetStorageProcessed() && count < maxMovedCount; count++ { + var ( + value []byte // slot value after RLP decoding + safeValue [32]byte // 32-byte aligned value + ) + if err := rlp.DecodeBytes(stIt.Slot(), &value); err != nil { + return fmt.Errorf("error decoding bytes %x: %w", stIt.Slot(), err) + } + copy(safeValue[32-len(value):], value) + + var slotnr []byte + if hasPreimagesBin { + var s [32]byte + slotnr = s[:] + if _, err := io.ReadFull(fpreimages, slotnr); err != nil { + return fmt.Errorf("reading preimage file: %s", err) + } + } else { + slotnr = rawdb.ReadPreimage(migrdb.DiskDB(), stIt.Hash()) + if len(slotnr) != 32 { + return fmt.Errorf("slotnr len is zero is not 32: %d", len(slotnr)) + } + } + if crypto.Keccak256Hash(slotnr[:]) != stIt.Hash() { + return fmt.Errorf("preimage file does not match storage hash: %s!=%s", crypto.Keccak256Hash(slotnr), stIt.Hash()) + } + preimageSeek += int64(len(slotnr)) + + mkv.addStorageSlot(migrdb.GetCurrentAccountAddress().Bytes(), slotnr, safeValue[:]) + + // advance the storage iterator + migrdb.SetStorageProcessed(!stIt.Next()) + if !migrdb.GetStorageProcessed() { + migrdb.SetCurrentSlotHash(stIt.Hash()) + } + } + stIt.Release() + } + + // If the maximum number of leaves hasn't been reached, then + // it means that the storage has finished processing (or none + // was available for this account) and that the account itself + // can be processed. + if count < maxMovedCount { + count++ // count increase for the account itself + + mkv.addAccount(migrdb.GetCurrentAccountAddress().Bytes(), acc) + vkt.ClearStrorageRootConversion(*migrdb.GetCurrentAccountAddress()) + + // Store the account code if present + if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { + code := rawdb.ReadCode(statedb.Database().DiskDB(), common.BytesToHash(acc.CodeHash)) + chunks := trie.ChunkifyCode(code) + + mkv.addAccountCode(migrdb.GetCurrentAccountAddress().Bytes(), uint64(len(code)), chunks) + } + + // reset storage iterator marker for next account + migrdb.SetStorageProcessed(false) + migrdb.SetCurrentSlotHash(common.Hash{}) + + // Move to the next account, if available - or end + // the transition otherwise. + if accIt.Next() { + var addr common.Address + if hasPreimagesBin { + if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { + return fmt.Errorf("reading preimage file: %s", err) + } + } else { + addr = common.BytesToAddress(rawdb.ReadPreimage(migrdb.DiskDB(), accIt.Hash())) + if len(addr) != 20 { + return fmt.Errorf("account address len is zero is not 20: %d", len(addr)) + } + } + // fmt.Printf("account switch: %s != %s\n", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + if crypto.Keccak256Hash(addr[:]) != accIt.Hash() { + return fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + } + preimageSeek += int64(len(addr)) + migrdb.SetCurrentAccountAddress(addr) + } else { + // case when the account iterator has + // reached the end but count < maxCount + migrdb.EndVerkleTransition() + break + } + } + } + migrdb.SetCurrentPreimageOffset(preimageSeek) + + log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash()) + + // Take all the collected key-values and prepare the new leaf values. + // This fires a background routine that will start doing the work that + // migrateCollectedKeyValues() will use to insert into the tree. + // + // TODO: Now both prepare() and migrateCollectedKeyValues() are next to each other, but + // after we fix an existing bug, we can call prepare() before the block execution and + // let it do the work in the background. After the block execution and finalization + // finish, we can call migrateCollectedKeyValues() which should already find everything ready. + mkv.prepare() + now = time.Now() + if err := mkv.migrateCollectedKeyValues(tt.Overlay()); err != nil { + return fmt.Errorf("could not migrate key values: %w", err) + } + log.Info("Inserted key values in overlay tree", "count", count, "duration", time.Since(now)) + } + + return nil +} diff --git a/core/state_processor.go b/core/state_processor.go index 8dd4de5436ba..6c08bc03f323 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,14 +17,10 @@ package core import ( - "bufio" - "bytes" "encoding/binary" "errors" "fmt" - "io" "math/big" - "os" "runtime" "sync" "time" @@ -32,14 +28,12 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" tutils "github.com/ethereum/go-ethereum/trie/utils" "github.com/gballet/go-verkle" @@ -111,210 +105,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg return nil, nil, 0, errors.New("withdrawals before shanghai") } - // Overlay tree migration logic - migrdb := statedb.Database() - - // verkle transition: if the conversion process is in progress, move - // N values from the MPT into the verkle tree. - if migrdb.InTransition() { - var ( - now = time.Now() - tt = statedb.GetTrie().(*trie.TransitionTrie) - mpt = tt.Base() - vkt = tt.Overlay() - hasPreimagesBin = false - preimageSeek = migrdb.GetCurrentPreimageOffset() - fpreimages *bufio.Reader - ) - - // TODO: avoid opening the preimages file here and make it part of, potentially, statedb.Database(). - filePreimages, err := os.Open("preimages.bin") - if err != nil { - // fallback on reading the db - log.Warn("opening preimage file", "error", err) - } else { - defer filePreimages.Close() - if _, err := filePreimages.Seek(preimageSeek, io.SeekStart); err != nil { - return nil, nil, 0, fmt.Errorf("seeking preimage file: %s", err) - } - fpreimages = bufio.NewReader(filePreimages) - hasPreimagesBin = true - } - - accIt, err := statedb.Snaps().AccountIterator(mpt.Hash(), migrdb.GetCurrentAccountHash()) - if err != nil { - return nil, nil, 0, err - } - defer accIt.Release() - accIt.Next() - - // If we're about to start with the migration process, we have to read the first account hash preimage. - if migrdb.GetCurrentAccountAddress() == nil { - var addr common.Address - if hasPreimagesBin { - if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { - return nil, nil, 0, fmt.Errorf("reading preimage file: %s", err) - } - } else { - addr = common.BytesToAddress(rawdb.ReadPreimage(migrdb.DiskDB(), accIt.Hash())) - if len(addr) != 20 { - return nil, nil, 0, fmt.Errorf("addr len is zero is not 32: %d", len(addr)) - } - } - migrdb.SetCurrentAccountAddress(addr) - if migrdb.GetCurrentAccountHash() != accIt.Hash() { - return nil, nil, 0, fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) - } - preimageSeek += int64(len(addr)) - } - - const maxMovedCount = 10000 - // mkv will be assiting in the collection of up to maxMovedCount key values to be migrated to the VKT. - // It has internal caches to do efficient MPT->VKT key calculations, which will be discarded after - // this function. - mkv := newKeyValueMigrator() - // move maxCount accounts into the verkle tree, starting with the - // slots from the previous account. - count := 0 - - // if less than maxCount slots were moved, move to the next account - for count < maxMovedCount { - acc, err := types.FullAccount(accIt.Account()) - if err != nil { - log.Error("Invalid account encountered during traversal", "error", err) - return nil, nil, 0, err - } - vkt.SetStorageRootConversion(*migrdb.GetCurrentAccountAddress(), acc.Root) - - // Start with processing the storage, because once the account is - // converted, the `stateRoot` field loses its meaning. Which means - // that it opens the door to a situation in which the storage isn't - // converted, but it can not be found since the account was and so - // there is no way to find the MPT storage from the information found - // in the verkle account. - // Note that this issue can still occur if the account gets written - // to during normal block execution. A mitigation strategy has been - // introduced with the `*StorageRootConversion` fields in VerkleDB. - if acc.HasStorage() { - stIt, err := statedb.Snaps().StorageIterator(mpt.Hash(), accIt.Hash(), migrdb.GetCurrentSlotHash()) - if err != nil { - return nil, nil, 0, err - } - stIt.Next() - - // fdb.StorageProcessed will be initialized to `true` if the - // entire storage for an account was not entirely processed - // by the previous block. This is used as a signal to resume - // processing the storage for that account where we left off. - // If the entire storage was processed, then the iterator was - // created in vain, but it's ok as this will not happen often. - for ; !migrdb.GetStorageProcessed() && count < maxMovedCount; count++ { - var ( - value []byte // slot value after RLP decoding - safeValue [32]byte // 32-byte aligned value - ) - if err := rlp.DecodeBytes(stIt.Slot(), &value); err != nil { - return nil, nil, 0, fmt.Errorf("error decoding bytes %x: %w", stIt.Slot(), err) - } - copy(safeValue[32-len(value):], value) - - var slotnr []byte - if hasPreimagesBin { - var s [32]byte - slotnr = s[:] - if _, err := io.ReadFull(fpreimages, slotnr); err != nil { - return nil, nil, 0, fmt.Errorf("reading preimage file: %s", err) - } - } else { - slotnr = rawdb.ReadPreimage(migrdb.DiskDB(), stIt.Hash()) - if len(slotnr) != 32 { - return nil, nil, 0, fmt.Errorf("slotnr len is zero is not 32: %d", len(slotnr)) - } - } - if crypto.Keccak256Hash(slotnr[:]) != stIt.Hash() { - return nil, nil, 0, fmt.Errorf("preimage file does not match storage hash: %s!=%s", crypto.Keccak256Hash(slotnr), stIt.Hash()) - } - preimageSeek += int64(len(slotnr)) - - mkv.addStorageSlot(migrdb.GetCurrentAccountAddress().Bytes(), slotnr, safeValue[:]) - - // advance the storage iterator - migrdb.SetStorageProcessed(!stIt.Next()) - if !migrdb.GetStorageProcessed() { - migrdb.SetCurrentSlotHash(stIt.Hash()) - } - } - stIt.Release() - } - - // If the maximum number of leaves hasn't been reached, then - // it means that the storage has finished processing (or none - // was available for this account) and that the account itself - // can be processed. - if count < maxMovedCount { - count++ // count increase for the account itself - - mkv.addAccount(migrdb.GetCurrentAccountAddress().Bytes(), acc) - vkt.ClearStrorageRootConversion(*migrdb.GetCurrentAccountAddress()) - - // Store the account code if present - if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { - code := rawdb.ReadCode(statedb.Database().DiskDB(), common.BytesToHash(acc.CodeHash)) - chunks := trie.ChunkifyCode(code) - - mkv.addAccountCode(migrdb.GetCurrentAccountAddress().Bytes(), uint64(len(code)), chunks) - } - - // reset storage iterator marker for next account - migrdb.SetStorageProcessed(false) - migrdb.SetCurrentSlotHash(common.Hash{}) - - // Move to the next account, if available - or end - // the transition otherwise. - if accIt.Next() { - var addr common.Address - if hasPreimagesBin { - if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { - return nil, nil, 0, fmt.Errorf("reading preimage file: %s", err) - } - } else { - addr = common.BytesToAddress(rawdb.ReadPreimage(migrdb.DiskDB(), accIt.Hash())) - if len(addr) != 20 { - return nil, nil, 0, fmt.Errorf("account address len is zero is not 20: %d", len(addr)) - } - } - // fmt.Printf("account switch: %s != %s\n", crypto.Keccak256Hash(addr[:]), accIt.Hash()) - if crypto.Keccak256Hash(addr[:]) != accIt.Hash() { - return nil, nil, 0, fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) - } - preimageSeek += int64(len(addr)) - migrdb.SetCurrentAccountAddress(addr) - } else { - // case when the account iterator has - // reached the end but count < maxCount - migrdb.EndVerkleTransition() - break - } - } - } - migrdb.SetCurrentPreimageOffset(preimageSeek) - - log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash()) - - // Take all the collected key-values and prepare the new leaf values. - // This fires a background routine that will start doing the work that - // migrateCollectedKeyValues() will use to insert into the tree. - // - // TODO: Now both prepare() and migrateCollectedKeyValues() are next to each other, but - // after we fix an existing bug, we can call prepare() before the block execution and - // let it do the work in the background. After the block execution and finalization - // finish, we can call migrateCollectedKeyValues() which should already find everything ready. - mkv.prepare() - now = time.Now() - if err := mkv.migrateCollectedKeyValues(tt.Overlay()); err != nil { - return nil, nil, 0, fmt.Errorf("could not migrate key values: %w", err) - } - log.Info("Inserted key values in overlay tree", "count", count, "duration", time.Since(now)) + // Perform the overlay transition, if relevant + if err := OverlayVerkleTransition(statedb); err != nil { + return nil, nil, 0, fmt.Errorf("error performing verkle overlay transition: %w", err) } // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) From b4c3b59a85f740a021bd81ec3dee79f174b2b5b8 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 25 Sep 2023 14:44:21 +0200 Subject: [PATCH 27/99] add pre-pbss rebase branches to CI (#270) * add post-pbss rebase branches to CI * fix go version in CI * fix linter issues * upgrade go version to 1.21.1 to avoid github deploy error --- .github/workflows/go.yml | 8 ++++---- core/state_processor.go | 2 +- trie/utils/verkle.go | 1 - 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 5ae526f1eedc..61542140b004 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -4,7 +4,7 @@ on: push: branches: [ master ] pull_request: - branches: [ master, verkle-trie-proof-in-block-rebased, verkle-trie-post-merge, beverly-hills-head, 'verkle/replay-change-with-tree-group-tryupdate' ] + branches: [ master, verkle-trie-proof-in-block-rebased, verkle-trie-post-merge, beverly-hills-head, 'verkle/replay-change-with-tree-group-tryupdate', beverly-hills-just-before-pbss, kaustinen-with-shapella ] workflow_dispatch: jobs: @@ -15,7 +15,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.21.1 - name: Build run: go build -v ./... @@ -26,7 +26,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.21.1 - name: Download golangci-lint run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s latest - name: Lint @@ -41,7 +41,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.21.1 - name: Download precomputed points run: wget -nv https://github.com/gballet/go-verkle/releases/download/banderwagonv3/precomp -Otrie/utils/precomp - name: Test diff --git a/core/state_processor.go b/core/state_processor.go index 6c08bc03f323..5d10bceb1817 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -332,7 +332,7 @@ func (kvm *keyValueMigrator) prepare() { } wg.Wait() - // Step 2: Now that we have all stems (i.e: tree keys) calcualted, we can create the new leaves. + // Step 2: Now that we have all stems (i.e: tree keys) calculated, we can create the new leaves. nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData)) for i := range kvm.leafData { nodeValues[i] = kvm.leafData[i].leafNodeData diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 07949ec65e98..17fdf1ade343 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -286,7 +286,6 @@ func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) { // In this branch, the tree-index is zero since we're in the account header, // and the sub-index is the LSB of the modified storage key. return zero, byte(pos[0] & 0xFF) - } // If the storage slot is in the main storage, we need to add the main storage offset. From b7648a503eb67eea8f15b2c4c05ca9e3ca34f9dc Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 16 Aug 2023 17:10:32 +0200 Subject: [PATCH 28/99] quell zero-tree message --- core/state/database.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/state/database.go b/core/state/database.go index 7ee8489d3f62..a7a7ef4dd78f 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -365,8 +365,9 @@ func (db *cachingDB) openStorageMPTrie(stateRoot common.Hash, address common.Add // OpenStorageTrie opens the storage trie of an account func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) { + // TODO this should only return a verkle tree if db.ended { - mpt, err := db.openStorageMPTrie(common.Hash{}, address, common.Hash{}, self) + mpt, err := db.openStorageMPTrie(types.EmptyRootHash, address, common.Hash{}, self) if err != nil { return nil, err } From 9db5d5c357051853fbe71710ebf331e0b18e10ba Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 29 Aug 2023 15:07:43 +0200 Subject: [PATCH 29/99] port kaustinen code on top of shapella-rebased branch activate proof generation on fork + remove code dups use go-verkle's post-state API to verify proofs (#262) use prague as the verkle activation fork (#263) upgrade to latest go-ipa activate verkle transition in "miner" (#265) fix: do not force cancunTime upon verkle activation workaround: do not use root translation in replay workaround: deactivate overlay transition for now fixes from trying to get the devnet to work (#267) this line was left out from the previous commit upgrade to go-verkle with fixed newvalue serialization fix: ensure point cache isn't nil in copy (#268) fix: dependency cycle in tests (#269) upgrade to latest go-verkle fix: write trie preimage data to db (#274) fix: zero-root in produced block + sync (#275) upgrade go-ipa fix build fix typo include review feedback add switch to add proofs to blocks (#278) add fee recipient to witness (#279) touch all fields in withdrawal account header (#277) --- beacon/engine/gen_ed.go | 74 ++++++++++++----------- beacon/engine/types.go | 74 ++++++++++++----------- cmd/geth/config.go | 6 +- cmd/geth/main.go | 2 +- cmd/utils/flags.go | 4 +- consensus/beacon/consensus.go | 76 +++++++++++++++++++++-- consensus/clique/clique.go | 2 +- consensus/consensus.go | 2 +- consensus/ethash/consensus.go | 6 +- core/blockchain.go | 14 +++-- core/chain_makers.go | 111 ++++++++++++++++++++-------------- core/genesis.go | 10 +-- core/state/database.go | 44 +++++--------- core/state/statedb.go | 6 -- core/state_processor_test.go | 3 +- core/state_transition.go | 2 +- core/types/block.go | 33 ++++++++++ core/vm/contracts.go | 2 +- core/vm/evm.go | 10 +-- core/vm/gas_table.go | 14 ++--- core/vm/instructions.go | 14 ++--- core/vm/interpreter.go | 2 +- core/vm/jump_table_export.go | 5 +- core/vm/operations_acl.go | 4 +- eth/backend.go | 4 +- eth/catalyst/api.go | 11 ++++ eth/ethconfig/config.go | 2 +- eth/ethconfig/gen_config.go | 10 +-- eth/tracers/api.go | 4 -- go.mod | 16 ++--- go.sum | 65 ++++++++++++-------- les/client.go | 4 +- light/trie.go | 4 ++ miner/worker.go | 30 ++++++--- params/config.go | 22 +------ trie/transition.go | 2 +- trie/utils/verkle_test.go | 2 +- trie/verkle.go | 75 ++++++++++------------- trie/verkle_test.go | 14 ++--- 39 files changed, 453 insertions(+), 332 deletions(-) diff --git a/beacon/engine/gen_ed.go b/beacon/engine/gen_ed.go index 6893d64a1626..2a92e7420407 100644 --- a/beacon/engine/gen_ed.go +++ b/beacon/engine/gen_ed.go @@ -17,23 +17,24 @@ var _ = (*executableDataMarshaling)(nil) // MarshalJSON marshals as JSON. func (e ExecutableData) MarshalJSON() ([]byte, error) { type ExecutableData struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` - Random common.Hash `json:"prevRandao" gencodec:"required"` - Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` + Random common.Hash `json:"prevRandao" gencodec:"required"` + Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ExecutionWitness *types.ExecutionWitness `json:"executionWitness"` } var enc ExecutableData enc.ParentHash = e.ParentHash @@ -58,29 +59,31 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) { enc.Withdrawals = e.Withdrawals enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed) enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas) + enc.ExecutionWitness = e.ExecutionWitness return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (e *ExecutableData) UnmarshalJSON(input []byte) error { type ExecutableData struct { - ParentHash *common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` - Random *common.Hash `json:"prevRandao" gencodec:"required"` - Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash *common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` + Random *common.Hash `json:"prevRandao" gencodec:"required"` + Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash *common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ExecutionWitness *types.ExecutionWitness `json:"executionWitness"` } var dec ExecutableData if err := json.Unmarshal(input, &dec); err != nil { @@ -154,5 +157,8 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error { if dec.ExcessBlobGas != nil { e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) } + if dec.ExecutionWitness != nil { + e.ExecutionWitness = dec.ExecutionWitness + } return nil } diff --git a/beacon/engine/types.go b/beacon/engine/types.go index f1801edd1a93..0066e9181756 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -64,6 +64,8 @@ type ExecutableData struct { Withdrawals []*types.Withdrawal `json:"withdrawals"` BlobGasUsed *uint64 `json:"blobGasUsed"` ExcessBlobGas *uint64 `json:"excessBlobGas"` + + ExecutionWitness *types.ExecutionWitness `json:"executionWitness"` } // JSON type overrides for executableData. @@ -208,24 +210,25 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash) withdrawalsRoot = &h } header := &types.Header{ - ParentHash: params.ParentHash, - UncleHash: types.EmptyUncleHash, - Coinbase: params.FeeRecipient, - Root: params.StateRoot, - TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), - ReceiptHash: params.ReceiptsRoot, - Bloom: types.BytesToBloom(params.LogsBloom), - Difficulty: common.Big0, - Number: new(big.Int).SetUint64(params.Number), - GasLimit: params.GasLimit, - GasUsed: params.GasUsed, - Time: params.Timestamp, - BaseFee: params.BaseFeePerGas, - Extra: params.ExtraData, - MixDigest: params.Random, - WithdrawalsHash: withdrawalsRoot, - ExcessBlobGas: params.ExcessBlobGas, - BlobGasUsed: params.BlobGasUsed, + ParentHash: params.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: params.FeeRecipient, + Root: params.StateRoot, + TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), + ReceiptHash: params.ReceiptsRoot, + Bloom: types.BytesToBloom(params.LogsBloom), + Difficulty: common.Big0, + Number: new(big.Int).SetUint64(params.Number), + GasLimit: params.GasLimit, + GasUsed: params.GasUsed, + Time: params.Timestamp, + BaseFee: params.BaseFeePerGas, + Extra: params.ExtraData, + MixDigest: params.Random, + WithdrawalsHash: withdrawalsRoot, + ExcessBlobGas: params.ExcessBlobGas, + BlobGasUsed: params.BlobGasUsed, + ExecutionWitness: params.ExecutionWitness, } block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals) if block.Hash() != params.BlockHash { @@ -238,23 +241,24 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash) // fields from the given block. It assumes the given block is post-merge block. func BlockToExecutableData(block *types.Block, fees *big.Int, blobs []kzg4844.Blob, commitments []kzg4844.Commitment, proofs []kzg4844.Proof) *ExecutionPayloadEnvelope { data := &ExecutableData{ - BlockHash: block.Hash(), - ParentHash: block.ParentHash(), - FeeRecipient: block.Coinbase(), - StateRoot: block.Root(), - Number: block.NumberU64(), - GasLimit: block.GasLimit(), - GasUsed: block.GasUsed(), - BaseFeePerGas: block.BaseFee(), - Timestamp: block.Time(), - ReceiptsRoot: block.ReceiptHash(), - LogsBloom: block.Bloom().Bytes(), - Transactions: encodeTransactions(block.Transactions()), - Random: block.MixDigest(), - ExtraData: block.Extra(), - Withdrawals: block.Withdrawals(), - BlobGasUsed: block.BlobGasUsed(), - ExcessBlobGas: block.ExcessBlobGas(), + BlockHash: block.Hash(), + ParentHash: block.ParentHash(), + FeeRecipient: block.Coinbase(), + StateRoot: block.Root(), + Number: block.NumberU64(), + GasLimit: block.GasLimit(), + GasUsed: block.GasUsed(), + BaseFeePerGas: block.BaseFee(), + Timestamp: block.Time(), + ReceiptsRoot: block.ReceiptHash(), + LogsBloom: block.Bloom().Bytes(), + Transactions: encodeTransactions(block.Transactions()), + Random: block.MixDigest(), + ExtraData: block.Extra(), + Withdrawals: block.Withdrawals(), + BlobGasUsed: block.BlobGasUsed(), + ExcessBlobGas: block.ExcessBlobGas(), + ExecutionWitness: block.ExecutionWitness(), } blobsBundle := BlobsBundleV1{ Commitments: make([]hexutil.Bytes, 0), diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 1a3de04bd4d2..bf01c6f91857 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -171,9 +171,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { v := ctx.Uint64(utils.OverrideCancun.Name) cfg.Eth.OverrideCancun = &v } - if ctx.IsSet(utils.OverrideVerkle.Name) { - v := ctx.Uint64(utils.OverrideVerkle.Name) - cfg.Eth.OverrideVerkle = &v + if ctx.IsSet(utils.OverridePrague.Name) { + v := ctx.Uint64(utils.OverridePrague.Name) + cfg.Eth.OverridePrague = &v } backend, eth := utils.RegisterEthService(stack, &cfg.Eth) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a239f88499a1..38fb755b4b5a 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -68,7 +68,7 @@ var ( utils.USBFlag, utils.SmartCardDaemonPathFlag, utils.OverrideCancun, - utils.OverrideVerkle, + utils.OverridePrague, utils.EnablePersonal, utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c92f49d432b6..b927d0f94f83 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -268,8 +268,8 @@ var ( Usage: "Manually specify the Cancun fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } - OverrideVerkle = &cli.Uint64Flag{ - Name: "override.verkle", + OverridePrague = &cli.Uint64Flag{ + Name: "override.prague", Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 64be7b0005d3..94b316f19757 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" "github.com/holiman/uint256" ) @@ -329,14 +330,14 @@ func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers [ // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the beacon protocol. The changes are done inline. -func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { +func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB) error { // Transition isn't triggered yet, use the legacy rules for preparation. reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1) if err != nil { return err } if !reached { - return beacon.ethone.Prepare(chain, header) + return beacon.ethone.Prepare(chain, header, statedb) } header.Difficulty = beaconDifficulty return nil @@ -356,9 +357,17 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. state.AddBalance(w.Address, amount) // The returned gas is not charged + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.VersionLeafKey) state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.BalanceLeafKey) - } - // No block reward which is issued by consensus layer instead. + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.NonceLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeKeccakLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeSizeLeafKey) + } + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.VersionLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.BalanceLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.NonceLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.CodeKeccakLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.CodeSizeLeafKey) } // FinalizeAndAssemble implements consensus.Engine, setting the final state and @@ -384,8 +393,65 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea // Assign the final state root to header. header.Root = state.IntermediateRoot(true) + var ( + p *verkle.VerkleProof + k verkle.StateDiff + keys = state.Witness().Keys() + ) + if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlock { + // Open the pre-tree to prove the pre-state against + parent := chain.GetHeaderByNumber(header.Number.Uint64() - 1) + if parent == nil { + return nil, fmt.Errorf("nil parent header for block %d", header.Number) + } + + preTrie, err := state.Database().OpenTrie(parent.Root) + if err != nil { + return nil, fmt.Errorf("error opening pre-state tree root: %w", err) + } + + var okpre, okpost bool + var vtrpre, vtrpost *trie.VerkleTrie + switch pre := preTrie.(type) { + case *trie.VerkleTrie: + vtrpre, okpre = preTrie.(*trie.VerkleTrie) + vtrpost, okpost = state.GetTrie().(*trie.VerkleTrie) + case *trie.TransitionTrie: + vtrpre = pre.Overlay() + okpre = true + post, _ := state.GetTrie().(*trie.TransitionTrie) + vtrpost = post.Overlay() + okpost = true + default: + panic("invalid tree type") + } + if okpre && okpost { + // Resolve values from the pre state, the post + // state should already have the values in memory. + // TODO: see if this can be captured at the witness + // level, like it used to. + for _, key := range keys { + _, err := vtrpre.GetWithHashedKey(key) + if err != nil { + panic(err) + } + } + + if len(keys) > 0 { + p, k, err = trie.ProveAndSerialize(vtrpre, vtrpost, keys, vtrpre.FlatdbNodeResolver) + if err != nil { + return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err) + } + } + } + } + // Assemble and return the final block. - return types.NewBlockWithWithdrawals(header, txs, uncles, receipts, withdrawals, trie.NewStackTrie(nil)), nil + block := types.NewBlockWithWithdrawals(header, txs, uncles, receipts, withdrawals, trie.NewStackTrie(nil)) + if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlock { + block.SetVerkleProof(p, k) + } + return block, nil } // Seal generates a new sealing request for the given input block and pushes diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index f708050abd13..23c7d32755b2 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -499,7 +499,7 @@ func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*typ // Prepare implements consensus.Engine, preparing all the consensus fields of the // header for running the transactions on top. -func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { +func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header, _ *state.StateDB) error { // If the block isn't a checkpoint, cast a random vote (good enough for now) header.Coinbase = common.Address{} header.Nonce = types.BlockNonce{} diff --git a/consensus/consensus.go b/consensus/consensus.go index 3a2c2d222916..aa5ad43ede3d 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -81,7 +81,7 @@ type Engine interface { // Prepare initializes the consensus fields of a block header according to the // rules of a particular engine. The changes are executed inline. - Prepare(chain ChainHeaderReader, header *types.Header) error + Prepare(chain ChainHeaderReader, header *types.Header, state *state.StateDB) error // Finalize runs any post-transaction state modifications (e.g. block rewards // or process withdrawals) but does not assemble the block. diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 92f8100f6e63..d155a3521cbc 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -479,7 +479,7 @@ var DynamicDifficultyCalculator = makeDifficultyCalculator // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the ethash protocol. The changes are done inline. -func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { +func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header, _ *state.StateDB) error { parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) if parent == nil { return consensus.ErrUnknownAncestor @@ -568,7 +568,7 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Div(r, big8) // This should not happen, but it's useful for replay tests - if config.IsVerkle(header.Number, header.Time) { + if config.IsPrague(header.Number, header.Time) { state.Witness().TouchAddressOnReadAndComputeGas(uncle.Coinbase.Bytes(), uint256.Int{}, utils.BalanceLeafKey) } state.AddBalance(uncle.Coinbase, r) @@ -576,7 +576,7 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Div(blockReward, big32) reward.Add(reward, r) } - if config.IsVerkle(header.Number, header.Time) { + if config.IsPrague(header.Number, header.Time) { state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.BalanceLeafKey) state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.VersionLeafKey) state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.NonceLeafKey) diff --git a/core/blockchain.go b/core/blockchain.go index b3513ff4cd72..27a74de822bf 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -310,8 +310,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis // Make sure the state associated with the block is available head := bc.CurrentBlock() - // Declare the end of the verkle transition is need be - if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsVerkle { + // Declare the end of the verkle transition if need be + if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsPrague { bc.stateCache.EndVerkleTransition() } @@ -411,7 +411,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis Recovery: recover, NoBuild: bc.cacheConfig.SnapshotNoBuild, AsyncBuild: !bc.cacheConfig.SnapshotWait, - Verkle: chainConfig.IsVerkle(head.Number, head.Time), + Verkle: chainConfig.IsPrague(head.Number, head.Time), } bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root) } @@ -1347,6 +1347,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if err := blockBatch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) } + state.Database().TrieDB().WritePreimages() // Commit all cached state changes into underlying memory database. root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) if err != nil { @@ -2531,8 +2532,11 @@ func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } -func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64) { - bc.stateCache.StartVerkleTransition(originalRoot, translatedRoot, chainConfig, cancunTime) +func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64) { + bc.stateCache.StartVerkleTransition(originalRoot, translatedRoot, chainConfig, pragueTime) +} +func (bc *BlockChain) ReorgThroughVerkleTransition() { + bc.stateCache.ReorgThroughVerkleTransition() } func (bc *BlockChain) EndVerkleTransition() { diff --git a/core/chain_makers.go b/core/chain_makers.go index d2aaef260971..5d8ade8a0fb0 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -357,7 +357,7 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, if err != nil { panic(err) } - if genesis.Config != nil && genesis.Config.IsVerkle(genesis.ToBlock().Number(), genesis.ToBlock().Time()) { + if genesis.Config != nil && genesis.Config.IsPrague(genesis.ToBlock().Number(), genesis.ToBlock().Time()) { blocks, receipts, _, _ := GenerateVerkleChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen) return db, blocks, receipts } @@ -372,8 +372,13 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine proofs := make([]*verkle.VerkleProof, 0, n) keyvals := make([]verkle.StateDiff, 0, n) blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) - chainreader := &fakeChainReader{config: config} - var preStateTrie *trie.VerkleTrie + chainreader := &generatedLinearChainReader{ + config: config, + // GenerateVerkleChain should only be called with the genesis block + // as parent. + genesis: parent, + chain: blocks, + } genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b.header = makeHeader(chainreader, parent, statedb, b.engine) @@ -412,48 +417,8 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine panic(fmt.Sprintf("trie write error: %v", err)) } - // Generate an associated verkle proof - tr := preState.GetTrie() - if !tr.IsVerkle() { - panic("tree should be verkle") - } - - vtr := tr.(*trie.VerkleTrie) - // Make sure all keys are resolved before - // building the proof. Ultimately, node - // resolution can be done with a prefetcher - // or from GetCommitmentsAlongPath. - kvs := make(map[string][]byte) - keys := statedb.Witness().Keys() - for _, key := range keys { - v, err := vtr.GetWithHashedKey(key) - if err != nil { - panic(err) - } - kvs[string(key)] = v - } - - // Initialize the preStateTrie if it is nil, this should - // correspond to the genesis block. This is a workaround - // needed until the main verkle PR is rebased on top of - // PBSS. - if preStateTrie == nil { - preStateTrie = vtr - } - - vtr.Hash() - p, k, err := preStateTrie.ProveAndSerialize(statedb.Witness().Keys()) - if err != nil { - panic(err) - } - proofs = append(proofs, p) - keyvals = append(keyvals, k) - - // save the current state of the trie for producing the proof for the next block, - // since reading it from disk is broken with the intermediate PBSS-like system we - // have: it will read the post-state as this is the only state present on disk. - // This is a workaround needed until the main verkle PR is rebased on top of PBSS. - preStateTrie = statedb.GetTrie().(*trie.VerkleTrie) + proofs = append(proofs, block.ExecutionWitness().VerkleProof) + keyvals = append(keyvals, block.ExecutionWitness().StateDiff) return block, b.receipts } @@ -558,3 +523,59 @@ func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } func (cr *fakeChainReader) GetTd(hash common.Hash, number uint64) *big.Int { return nil } + +type generatedLinearChainReader struct { + config *params.ChainConfig + genesis *types.Block + chain []*types.Block +} + +func (v *generatedLinearChainReader) Config() *params.ChainConfig { + return v.config +} + +func (v *generatedLinearChainReader) CurrentHeader() *types.Header { + return nil +} + +func (v *generatedLinearChainReader) GetHeader(_ common.Hash, number uint64) *types.Header { + if number == 0 { + return v.genesis.Header() + } + return v.chain[number-1].Header() +} + +func (v *generatedLinearChainReader) GetHeaderByNumber(number uint64) *types.Header { + if number == 0 { + return v.genesis.Header() + } + return v.chain[number-1].Header() +} + +func (v *generatedLinearChainReader) GetHeaderByHash(hash common.Hash) *types.Header { + if hash == v.genesis.Hash() { + return v.genesis.Header() + } + + for _, block := range v.chain { + if block.Hash() == hash { + return block.Header() + } + } + + return nil +} + +func (v *generatedLinearChainReader) GetBlock(_ common.Hash, number uint64) *types.Block { + if number == 0 { + return v.genesis + } + return v.chain[number-1] +} + +func (v *generatedLinearChainReader) GetTd(_ common.Hash, number uint64) *big.Int { + if number == 0 { + return v.genesis.Difficulty() + } + return v.chain[number-1].Difficulty() +} diff --git a/core/genesis.go b/core/genesis.go index 6b521369bcbe..6ea848508270 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -125,7 +125,7 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c // Create an ephemeral in-memory database for computing hash, // all the derived states will be discarded to not pollute disk. db := state.NewDatabase(rawdb.NewMemoryDatabase()) - if cfg.IsVerkle(big.NewInt(int64(0)), timestamp) { + if cfg.IsPrague(big.NewInt(int64(0)), timestamp) { db.EndVerkleTransition() } statedb, err := state.New(types.EmptyRootHash, db, nil) @@ -289,7 +289,7 @@ func (e *GenesisMismatchError) Error() string { // ChainOverrides contains the changes to chain config. type ChainOverrides struct { OverrideCancun *uint64 - OverrideVerkle *uint64 + OverridePrague *uint64 } // SetupGenesisBlock writes or updates the genesis block in db. @@ -318,8 +318,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen if overrides != nil && overrides.OverrideCancun != nil { config.CancunTime = overrides.OverrideCancun } - if overrides != nil && overrides.OverrideVerkle != nil { - config.VerkleTime = overrides.OverrideVerkle + if overrides != nil && overrides.OverridePrague != nil { + config.PragueTime = overrides.OverridePrague } } } @@ -545,7 +545,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // Note the state changes will be committed in hash-based scheme, use Commit // if path-scheme is preferred. func (g *Genesis) MustCommit(db ethdb.Database) *types.Block { - triedb := trie.NewDatabaseWithConfig(db, &trie.Config{Verkle: g.Config != nil && g.Config.IsVerkle(big.NewInt(int64(g.Number)), g.Timestamp)}) + triedb := trie.NewDatabaseWithConfig(db, &trie.Config{Verkle: g.Config != nil && g.Config.IsPrague(big.NewInt(int64(g.Number)), g.Timestamp)}) block, err := g.Commit(db, triedb) if err != nil { panic(err) diff --git a/core/state/database.go b/core/state/database.go index a7a7ef4dd78f..3caee28c8f33 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -67,6 +67,8 @@ type Database interface { StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64) + ReorgThroughVerkleTransition() + EndVerkleTransition() InTransition() bool @@ -210,7 +212,7 @@ func (db *cachingDB) Transitioned() bool { } // Fork implements the fork -func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64) { +func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64) { fmt.Println(` __________.__ .__ .__ __ .__ .__ ____ \__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ __ _ _|__| ____ / ___\ ______ @@ -219,11 +221,18 @@ func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.H |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ \/\_/ |__|___| /_____//_____/ |__|`) db.started = true - db.AddTranslation(originalRoot, translatedRoot) + db.ended = false + // db.AddTranslation(originalRoot, translatedRoot) db.baseRoot = originalRoot // initialize so that the first storage-less accounts are processed db.StorageProcessed = true - chainConfig.CancunTime = cancunTime + if pragueTime != nil { + chainConfig.PragueTime = pragueTime + } +} + +func (db *cachingDB) ReorgThroughVerkleTransition() { + db.ended, db.started = false, false } func (db *cachingDB) EndVerkleTransition() { @@ -241,25 +250,6 @@ func (db *cachingDB) EndVerkleTransition() { db.ended = true } -func (db *cachingDB) AddTranslation(orig, trans common.Hash) { - // TODO make this persistent - db.translatedRootsLock.Lock() - defer db.translatedRootsLock.Unlock() - db.translatedRoots[db.translationIndex] = trans - db.origRoots[db.translationIndex] = orig - db.translationIndex = (db.translationIndex + 1) % len(db.translatedRoots) -} - -func (db *cachingDB) getTranslation(orig common.Hash) common.Hash { - db.translatedRootsLock.RLock() - defer db.translatedRootsLock.RUnlock() - for i, o := range db.origRoots { - if o == orig { - return db.translatedRoots[i] - } - } - return common.Hash{} -} type cachingDB struct { disk ethdb.KeyValueStore @@ -320,13 +310,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { // TODO separate both cases when I can be certain that it won't // find a Verkle trie where is expects a Transitoion trie. if db.started || db.ended { - var r common.Hash - if db.ended { - r = root - } else { - r = db.getTranslation(root) - } - vkt, err := db.openVKTrie(r) + // NOTE this is a kaustinen-only change, it will break replay + vkt, err := db.openVKTrie(root) if err != nil { return nil, err } @@ -512,7 +497,6 @@ func (db *cachingDB) GetStorageProcessed() bool { } func (db *cachingDB) AddRootTranslation(originalRoot, translatedRoot common.Hash) { - db.AddTranslation(originalRoot, translatedRoot) } func (db *cachingDB) SetLastMerkleRoot(root common.Hash) { diff --git a/core/state/statedb.go b/core/state/statedb.go index 48d2a8e509db..e5c6669da751 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -191,12 +191,6 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) } if sdb.snaps != nil { if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil { - if db, ok := db.(*cachingDB); ok { - trans := db.getTranslation(root) - if trans != (common.Hash{}) { - sdb.snap = sdb.snaps.Snapshot(trans) - } - } } } return sdb, nil diff --git a/core/state_processor_test.go b/core/state_processor_test.go index ffb3285b8f91..77088f86faa6 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -451,9 +451,10 @@ func TestProcessVerkle(t *testing.T) { LondonBlock: big.NewInt(0), Ethash: new(params.EthashConfig), ShanghaiTime: u64(0), - VerkleTime: u64(0), + PragueTime: u64(0), TerminalTotalDifficulty: common.Big0, TerminalTotalDifficultyPassed: true, + ProofInBlock: true, } signer = types.LatestSigner(config) testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") diff --git a/core/state_transition.go b/core/state_transition.go index 14dd86e4987d..2bdfef1c0552 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -403,7 +403,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } st.gasRemaining -= gas - if rules.IsVerkle { + if rules.IsPrague { targetAddr := msg.To originAddr := msg.From diff --git a/core/types/block.go b/core/types/block.go index 4452f4bb21f8..a704d23043ca 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rlp" + "github.com/gballet/go-verkle" ) // A BlockNonce is a 64-bit hash which proves (combined with the @@ -58,6 +59,18 @@ func (n *BlockNonce) UnmarshalText(input []byte) error { return hexutil.UnmarshalFixedText("BlockNonce", input, n[:]) } +type ExecutionWitness struct { + StateDiff verkle.StateDiff `json:"stateDiff"` + VerkleProof *verkle.VerkleProof `json:"verkleProof"` +} + +func (ew *ExecutionWitness) Copy() *ExecutionWitness { + return &ExecutionWitness{ + StateDiff: ew.StateDiff.Copy(), + VerkleProof: ew.VerkleProof.Copy(), + } +} + //go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go //go:generate go run ../../rlp/rlpgen -type Header -out gen_header_rlp.go @@ -90,6 +103,8 @@ type Header struct { // ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers. ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` + + ExecutionWitness *ExecutionWitness `json:"executionWitness" rlp:"-"` } // field type overrides for gencodec @@ -292,6 +307,10 @@ func CopyHeader(h *Header) *Header { cpy.BlobGasUsed = new(uint64) *cpy.BlobGasUsed = *h.BlobGasUsed } + if h.ExecutionWitness != nil { + cpy.ExecutionWitness = h.ExecutionWitness.Copy() + + } return &cpy } @@ -380,6 +399,8 @@ func (b *Block) BlobGasUsed() *uint64 { func (b *Block) Header() *Header { return CopyHeader(b.header) } +func (b *Block) ExecutionWitness() *ExecutionWitness { return b.header.ExecutionWitness } + // Body returns the non-header content of the block. func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles, b.withdrawals} } @@ -401,6 +422,18 @@ func (b *Block) SanityCheck() error { return b.header.SanityCheck() } +func (b *Block) SetVerkleProof(vp *verkle.VerkleProof, statediff verkle.StateDiff) { + b.header.ExecutionWitness = &ExecutionWitness{statediff, vp} + if statediff == nil { + b.header.ExecutionWitness.StateDiff = []verkle.StemStateDiff{} + } + if vp == nil { + b.header.ExecutionWitness.VerkleProof = &verkle.VerkleProof{ + IPAProof: &verkle.IPAProof{}, + } + } +} + type writeCounter uint64 func (c *writeCounter) Write(b []byte) (int, error) { diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 2942755f3fae..036d18a078fe 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -150,7 +150,7 @@ func init() { // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { - case rules.IsVerkle: + case rules.IsPrague: return PrecompiledAddressesBerlin case rules.IsCancun: return PrecompiledAddressesCancun diff --git a/core/vm/evm.go b/core/vm/evm.go index 73c3c2150617..36bd6f32f8f7 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -41,7 +41,7 @@ type ( func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { - case evm.chainRules.IsVerkle: + case evm.chainRules.IsPrague: precompiles = PrecompiledContractsBerlin case evm.chainRules.IsCancun: precompiles = PrecompiledContractsCancun @@ -137,7 +137,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig chainConfig: chainConfig, chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time), } - if txCtx.Accesses == nil && chainConfig.IsVerkle(blockCtx.BlockNumber, blockCtx.Time) { + if txCtx.Accesses == nil && chainConfig.IsPrague(blockCtx.BlockNumber, blockCtx.Time) { txCtx.Accesses = evm.StateDB.(*state.StateDB).NewAccessWitness() } evm.interpreter = NewEVMInterpreter(evm) @@ -147,7 +147,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig // Reset resets the EVM with a new transaction context.Reset // This is not threadsafe and should only be done very cautiously. func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { - if txCtx.Accesses == nil && evm.chainRules.IsVerkle { + if txCtx.Accesses == nil && evm.chainRules.IsPrague { txCtx.Accesses = evm.StateDB.(*state.StateDB).NewAccessWitness() } evm.TxContext = txCtx @@ -212,7 +212,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas var creation bool if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { // proof of absence tryConsumeGas(&gas, evm.Accesses.TouchAndChargeProofOfAbsence(caller.Address().Bytes())) } @@ -529,7 +529,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } - if err == nil && evm.chainRules.IsVerkle { + if err == nil && evm.chainRules.IsPrague { if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:])) { evm.StateDB.RevertToSnapshot(snapshot) err = ErrOutOfGas diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index dc307b82904d..f367c3c92978 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -101,7 +101,7 @@ var ( func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { usedGas := uint64(0) slot := stack.Back(0) - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) } @@ -111,7 +111,7 @@ func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, mem func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { usedGas := uint64(0) - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { where := stack.Back(0) treeIndex, subIndex := trieUtils.GetTreeKeyStorageSlotTreeIndexes(where.Bytes()) usedGas += evm.Accesses.TouchAddressOnReadAndComputeGas(contract.Address().Bytes(), *treeIndex, subIndex) @@ -423,7 +423,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { if _, isPrecompile := evm.precompile(address); !isPrecompile { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes()[:])) if overflow { @@ -463,7 +463,7 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { address := common.Address(stack.Back(1).Bytes20()) if _, isPrecompile := evm.precompile(address); !isPrecompile { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) @@ -488,7 +488,7 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { address := common.Address(stack.Back(1).Bytes20()) if _, isPrecompile := evm.precompile(address); !isPrecompile { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) @@ -513,7 +513,7 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { address := common.Address(stack.Back(1).Bytes20()) if _, isPrecompile := evm.precompile(address); !isPrecompile { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) @@ -542,7 +542,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } } - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { // TODO turn this into a panic (when we are sure this method // will never execute when verkle is enabled) log.Warn("verkle witness accumulation not supported for selfdestruct") diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 13252eda9df9..84edacb1b9bc 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -345,7 +345,7 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) - if interpreter.evm.chainRules.IsVerkle { + if interpreter.evm.chainRules.IsPrague { statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) scope.Contract.UseGas(statelessGas) } @@ -373,7 +373,7 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ contractAddr := scope.Contract.Address() paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) - if interpreter.evm.chainRules.IsVerkle { + if interpreter.evm.chainRules.IsPrague { scope.Contract.UseGas(touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], copyOffset, nonPaddedCopyLength, uint64(len(scope.Contract.Code)), interpreter.evm.Accesses)) } scope.Memory.Set(memOffset.Uint64(), uint64(len(paddedCodeCopy)), paddedCodeCopy) @@ -426,7 +426,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) uint64CodeOffset = 0xffffffffffffffff } addr := common.Address(a.Bytes20()) - if interpreter.evm.chainRules.IsVerkle { + if interpreter.evm.chainRules.IsPrague { code := interpreter.evm.StateDB.GetCode(addr) contract := &Contract{ Code: code, @@ -641,7 +641,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) - if interpreter.evm.chainRules.IsVerkle { + if interpreter.evm.chainRules.IsPrague { contractAddress := crypto.CreateAddress(scope.Contract.Address(), interpreter.evm.StateDB.GetNonce(scope.Contract.Address())) statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], value.Sign() != 0) if !tryConsumeGas(&gas, statelessGas) { @@ -695,7 +695,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) - if interpreter.evm.chainRules.IsVerkle { + if interpreter.evm.chainRules.IsPrague { codeAndHash := &codeAndHash{code: input} contractAddress := crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes()) statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], endowment.Sign() != 0) @@ -959,7 +959,7 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by if *pc < codeLen { scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) - if interpreter.evm.chainRules.IsVerkle && *pc%31 == 0 { + if interpreter.evm.chainRules.IsPrague && *pc%31 == 0 { // touch next chunk if PUSH1 is at the boundary. if so, *pc has // advanced past this boundary. contractAddr := scope.Contract.Address() @@ -987,7 +987,7 @@ func makePush(size uint64, pushByteSize int) executionFunc { endMin = startMin + pushByteSize } - if interpreter.evm.chainRules.IsVerkle { + if interpreter.evm.chainRules.IsPrague { contractAddr := scope.Contract.Address() statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], uint64(startMin), uint64(pushByteSize), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) scope.Contract.UseGas(statelessGas) diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index ad4222b447b7..1b980ccb131d 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -56,7 +56,7 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. var table *JumpTable switch { - case evm.chainRules.IsVerkle: + case evm.chainRules.IsPrague: // TODO replace with prooper instruction set when fork is specified table = &shanghaiInstructionSet case evm.chainRules.IsCancun: diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go index 75bcb8d5bf9e..2ceb75e73273 100644 --- a/core/vm/jump_table_export.go +++ b/core/vm/jump_table_export.go @@ -26,11 +26,8 @@ import ( // the rules. func LookupInstructionSet(rules params.Rules) (JumpTable, error) { switch { - case rules.IsVerkle: - // TODO set to newCancunInstructionSet() when verkle-fork is defined - return newShanghaiInstructionSet(), errors.New("verkle-fork not defined yet") case rules.IsPrague: - return newCancunInstructionSet(), errors.New("prague-fork not defined yet") + return newShanghaiInstructionSet(), errors.New("prague-fork not defined yet") case rules.IsCancun: return newCancunInstructionSet(), nil case rules.IsShanghai: diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 7d2296c4dc01..4d4fe8aed3e6 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -52,7 +52,7 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { } value := common.Hash(y.Bytes32()) - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(x.Bytes()) cost += evm.Accesses.TouchAddressOnWriteAndComputeGas(contract.Address().Bytes(), *treeIndex, subIndex) } @@ -111,7 +111,7 @@ func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me slot := common.Hash(loc.Bytes32()) var gasUsed uint64 - if evm.chainRules.IsVerkle { + if evm.chainRules.IsPrague { where := stack.Back(0) treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(where.Bytes()) addr := contract.Address() diff --git a/eth/backend.go b/eth/backend.go index 667200bcedda..a6c80159077d 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -198,8 +198,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.OverrideCancun != nil { overrides.OverrideCancun = config.OverrideCancun } - if config.OverrideVerkle != nil { - overrides.OverrideVerkle = config.OverrideVerkle + if config.OverridePrague != nil { + overrides.OverridePrague = config.OverridePrague } eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit) if err != nil { diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 1a221941427a..63079415fc14 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -528,6 +528,17 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time()) return api.invalid(errors.New("invalid timestamp"), parent.Header()), nil } + // Trigger the start of the verkle conversion if we're at the right block + if api.eth.BlockChain().Config().IsPrague(block.Number(), block.Time()) && !api.eth.BlockChain().Config().IsPrague(parent.Number(), parent.Time()) { + parent := api.eth.BlockChain().GetHeaderByNumber(block.NumberU64() - 1) + if !api.eth.BlockChain().Config().IsPrague(parent.Number, parent.Time) { + api.eth.BlockChain().StartVerkleTransition(parent.Root, common.Hash{}, api.eth.BlockChain().Config(), nil) + } + } + // Reset db merge state in case of a reorg + if !api.eth.BlockChain().Config().IsPrague(block.Number(), block.Time()) { + api.eth.BlockChain().ReorgThroughVerkleTransition() + } // Another cornercase: if the node is in snap sync mode, but the CL client // tries to make it import a block. That should be denied as pushing something // into the database directly will conflict with the assumptions of snap sync diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 4bc8b8dc6c6e..4606b60408dd 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -157,7 +157,7 @@ type Config struct { OverrideCancun *uint64 `toml:",omitempty"` // OverrideVerkle (TODO: remove after the fork) - OverrideVerkle *uint64 `toml:",omitempty"` + OverridePrague *uint64 `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 324fbe380ea3..2ad499a53485 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -52,7 +52,7 @@ func (c Config) MarshalTOML() (interface{}, error) { RPCEVMTimeout time.Duration RPCTxFeeCap float64 OverrideCancun *uint64 `toml:",omitempty"` - OverrideVerkle *uint64 `toml:",omitempty"` + OverridePrague *uint64 `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -90,7 +90,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.RPCEVMTimeout = c.RPCEVMTimeout enc.RPCTxFeeCap = c.RPCTxFeeCap enc.OverrideCancun = c.OverrideCancun - enc.OverrideVerkle = c.OverrideVerkle + enc.OverridePrague = c.OverridePrague return &enc, nil } @@ -132,7 +132,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { RPCEVMTimeout *time.Duration RPCTxFeeCap *float64 OverrideCancun *uint64 `toml:",omitempty"` - OverrideVerkle *uint64 `toml:",omitempty"` + OverridePrague *uint64 `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -243,8 +243,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.OverrideCancun != nil { c.OverrideCancun = dec.OverrideCancun } - if dec.OverrideVerkle != nil { - c.OverrideVerkle = dec.OverrideVerkle + if dec.OverridePrague != nil { + c.OverridePrague = dec.OverridePrague } return nil } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 740a38ab9fbf..5e90180df8d5 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1020,10 +1020,6 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) copy.PragueTime = timestamp canon = false } - if timestamp := override.VerkleTime; timestamp != nil { - copy.VerkleTime = timestamp - canon = false - } return copy, canon } diff --git a/go.mod b/go.mod index c133d4bd5ba4..77651a84e427 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,8 @@ require ( github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.14.0 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 - github.com/consensys/gnark-crypto v0.10.0 + github.com/consensys/gnark-crypto v0.11.3-0.20230906172141-49815a21349a + github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 @@ -25,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b + github.com/gballet/go-verkle v0.1.1-0.20230921123936-6a6b1f7a751c github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 @@ -56,17 +57,17 @@ require ( github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tyler-smith/go-bip39 v1.1.0 github.com/urfave/cli/v2 v2.24.1 go.uber.org/automaxprocs v1.5.2 - golang.org/x/crypto v0.9.0 + golang.org/x/crypto v0.10.0 golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc golang.org/x/sync v0.3.0 - golang.org/x/sys v0.10.0 - golang.org/x/text v0.9.0 + golang.org/x/sys v0.12.0 + golang.org/x/text v0.10.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -85,14 +86,13 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 // indirect github.com/aws/smithy-go v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.8.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect diff --git a/go.sum b/go.sum index 47603d0dbc18..fb217e376795 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,8 @@ github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= +github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= @@ -75,8 +75,10 @@ github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.10.0 h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA= -github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU= +github.com/consensys/gnark-crypto v0.11.2 h1:GJjjtWJ+db1xGao7vTsOgAOGgjfPe7eRGPL+xxMX0qE= +github.com/consensys/gnark-crypto v0.11.2/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.11.3-0.20230906172141-49815a21349a h1:Rc86uLASrW3xpeWRH8V9W23v5QYegI/wjgbZzwPiC44= +github.com/consensys/gnark-crypto v0.11.3-0.20230906172141-49815a21349a/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -84,10 +86,14 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0= -github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= -github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd h1:jgf65Q4+jHFuLlhVApaVfTUwcU7dAdXK+GESow2UlaI= -github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= +github.com/crate-crypto/go-ipa v0.0.0-20230904185759-9f7637e8ddd0 h1:MztzKYOxMeC8HlWGXvq2wizas+QT0FgITjGThfmbh/0= +github.com/crate-crypto/go-ipa v0.0.0-20230904185759-9f7637e8ddd0/go.mod h1:7fZtshzGQ3dxVpDpF51K9mX8oziq8Xd5AoM/UT9fF5o= +github.com/crate-crypto/go-ipa v0.0.0-20230905211650-63ccabc1a949 h1:m73KBJvYRMuaUth425v6nKeEu6GSq9Zij01+jc2r2Y0= +github.com/crate-crypto/go-ipa v0.0.0-20230905211650-63ccabc1a949/go.mod h1:7fZtshzGQ3dxVpDpF51K9mX8oziq8Xd5AoM/UT9fF5o= +github.com/crate-crypto/go-ipa v0.0.0-20230911163631-de5e505e95bf h1:DwDzUJSm6lD9geUNtNQmMdfuNMo9ucHEVzY2aLkYUI8= +github.com/crate-crypto/go-ipa v0.0.0-20230911163631-de5e505e95bf/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= +github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58 h1:PwUlswsGOrLB677lW4XrlWLeszY3BaDGbvZ6dYk28tQ= +github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -146,10 +152,24 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0= -github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b/go.mod h1:CDncRYVRSDqwakm282WEkjfaAj1hxU/v5RXxk5nXOiI= -github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b h1:2lDzSxjCii8FxrbuxtlFtFiw6c4nTPl9mhaZ6lgpwws= -github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b/go.mod h1:+k9fzNguudDonU5q4/TUaTdmiHw3h3oGOIVmqyhaA3E= +github.com/gballet/go-verkle v0.0.0-20230905121642-6764a0cc51cf h1:nEiCdFdoQz4rDn5URrMOVira10+rLvJ82PfEbkMF3jo= +github.com/gballet/go-verkle v0.0.0-20230905121642-6764a0cc51cf/go.mod h1:vYx+8/EoJeRLJ3R5sCVhmAdpsZIqzxF6Tr5p+8kbJrg= +github.com/gballet/go-verkle v0.0.0-20230905122518-d220d72630e6 h1:o1G+rbcG/jWZWv+kWwwvE+TmFwXamUDEbvMUA6xR5fk= +github.com/gballet/go-verkle v0.0.0-20230905122518-d220d72630e6/go.mod h1:TPmzzGQJd4ZZxR3+hIn6SGnm9aYauFHkuYCOcTvzI6A= +github.com/gballet/go-verkle v0.0.0-20230906092655-319e750ea891 h1:nsdB5gaCl3J98ZRGHCDy3LSomfpY4fA5BvGa3Ux1e4A= +github.com/gballet/go-verkle v0.0.0-20230906092655-319e750ea891/go.mod h1:TPmzzGQJd4ZZxR3+hIn6SGnm9aYauFHkuYCOcTvzI6A= +github.com/gballet/go-verkle v0.0.0-20230906110906-5ce291aceda2 h1:qxP6c7XUjMScf/IzBJc3LdJ7+94UsVJJlYmowCqGkFQ= +github.com/gballet/go-verkle v0.0.0-20230906110906-5ce291aceda2/go.mod h1:g5tqVx8nLwDrC6Gki3pTRO4+VgusEMBJnDaQvi3A15g= +github.com/gballet/go-verkle v0.0.0-20230911184846-b1cb716e965e h1:Lw+ErC384jjxEqPekDeMbsQUuFHe9U9P/j2/wa11d1w= +github.com/gballet/go-verkle v0.0.0-20230911184846-b1cb716e965e/go.mod h1:g5tqVx8nLwDrC6Gki3pTRO4+VgusEMBJnDaQvi3A15g= +github.com/gballet/go-verkle v0.0.0-20230912081326-5a9b0c7bda0d h1:lJ+5o6tMVteFMFSaYw5P8T+s8jt2DMyBV7GvUZduozo= +github.com/gballet/go-verkle v0.0.0-20230912081326-5a9b0c7bda0d/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.0 h1:DNQjU+M3fgbZR/rbiPban4oLl5T3bfijejmRHwwT6n0= +github.com/gballet/go-verkle v0.1.0/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.1-0.20230921123058-fb04943e860f h1:v/wHViCd+qLWSoEB0fXhVds68lB/iFJc3vglb05fOCw= +github.com/gballet/go-verkle v0.1.1-0.20230921123058-fb04943e860f/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.1-0.20230921123936-6a6b1f7a751c h1:sa+wcZ/O1bvCd4Zr5OJvKlvDSdwtNSXrgKxw48t3GPs= +github.com/gballet/go-verkle v0.1.1-0.20230921123936-6a6b1f7a751c/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= @@ -306,7 +326,6 @@ github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4F github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -428,8 +447,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM= github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -480,8 +499,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= @@ -534,7 +553,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -569,17 +587,14 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -591,8 +606,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= diff --git a/les/client.go b/les/client.go index 132c857aa529..691635be0c59 100644 --- a/les/client.go +++ b/les/client.go @@ -95,8 +95,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { if config.OverrideCancun != nil { overrides.OverrideCancun = config.OverrideCancun } - if config.OverrideVerkle != nil { - overrides.OverrideVerkle = config.OverrideVerkle + if config.OverridePrague != nil { + overrides.OverridePrague = config.OverridePrague } chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, &overrides) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { diff --git a/light/trie.go b/light/trie.go index e0a283fdc1d7..53d54615d909 100644 --- a/light/trie.go +++ b/light/trie.go @@ -105,6 +105,10 @@ func (db *odrDatabase) StartVerkleTransition(originalRoot common.Hash, translate panic("not implemented") // TODO: Implement } +func (db *odrDatabase) ReorgThroughVerkleTransition() { + panic("not implemented") // TODO: Implement +} + func (db *odrDatabase) EndVerkleTransition() { panic("not implemented") // TODO: Implement } diff --git a/miner/worker.go b/miner/worker.go index 81aeb1d81388..8568abf37691 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -697,13 +697,7 @@ func (w *worker) resultLoop() { } // makeEnv creates a new environment for the sealing block. -func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address) (*environment, error) { - // Retrieve the parent state to execute on top and start a prefetcher for - // the miner to speed block sealing up a bit. - state, err := w.chain.StateAt(parent.Root) - if err != nil { - return nil, err - } +func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, state *state.StateDB) (*environment, error) { state.StartPrefetcher("miner") // Note the passed coinbase may be different with header.Coinbase. @@ -895,15 +889,33 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) } } + + // Trigger the start of the verkle conversion if we're at the right block + if w.chain.Config().IsPrague(header.Number, header.Time) { + parent := w.chain.GetHeaderByNumber(header.Number.Uint64() - 1) + if !w.chain.Config().IsPrague(parent.Number, parent.Time) { + w.chain.StartVerkleTransition(parent.Root, common.Hash{}, w.chain.Config(), nil) + } + } + + // Retrieve the parent state to execute on top and start a prefetcher for + // the miner to speed block sealing up a bit. + state, err := w.chain.StateAt(parent.Root) + if err != nil { + return nil, err + } + if w.chain.Config().IsPrague(header.Number, header.Time) { + core.OverlayVerkleTransition(state) + } // Run the consensus preparation with the default or customized consensus engine. - if err := w.engine.Prepare(w.chain, header); err != nil { + if err := w.engine.Prepare(w.chain, header, state); err != nil { log.Error("Failed to prepare header for sealing", "err", err) return nil, err } // Could potentially happen if starting to mine in an odd state. // Note genParams.coinbase can be different with header.Coinbase // since clique algorithm can modify the coinbase field in header. - env, err := w.makeEnv(parent, header, genParams.coinbase) + env, err := w.makeEnv(parent, header, genParams.coinbase, state) if err != nil { log.Error("Failed to create sealing context", "err", err) return nil, err diff --git a/params/config.go b/params/config.go index 75c8fd89d09f..0eb882a56ae6 100644 --- a/params/config.go +++ b/params/config.go @@ -128,7 +128,6 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, - VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: true, Ethash: new(EthashConfig), @@ -179,7 +178,6 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, - VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: nil, @@ -209,7 +207,6 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, - VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: new(EthashConfig), @@ -239,7 +236,6 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, - VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: new(EthashConfig), @@ -289,7 +285,6 @@ type ChainConfig struct { ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) - VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. @@ -304,6 +299,9 @@ type ChainConfig struct { Ethash *EthashConfig `json:"ethash,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"` IsDevMode bool `json:"isDev,omitempty"` + + // Proof in block + ProofInBlock bool `json:"proofInBlock,omitempty"` } // EthashConfig is the consensus engine configs for proof-of-work based sealing. @@ -411,9 +409,6 @@ func (c *ChainConfig) Description() string { if c.PragueTime != nil { banner += fmt.Sprintf(" - Prague: @%-10v\n", *c.PragueTime) } - if c.VerkleTime != nil { - banner += fmt.Sprintf(" - Verkle: @%-10v\n", *c.VerkleTime) - } return banner } @@ -512,11 +507,6 @@ func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.PragueTime, time) } -// IsVerkle returns whether num is either equal to the Verkle fork time or greater. -func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { - return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) -} - // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError { @@ -571,7 +561,6 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "shanghaiTime", timestamp: c.ShanghaiTime}, {name: "cancunTime", timestamp: c.CancunTime, optional: true}, {name: "pragueTime", timestamp: c.PragueTime, optional: true}, - {name: "verkleTime", timestamp: c.VerkleTime, optional: true}, } { if lastFork.name != "" { switch { @@ -675,9 +664,6 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int, if isForkTimestampIncompatible(c.PragueTime, newcfg.PragueTime, headTimestamp) { return newTimestampCompatError("Prague fork timestamp", c.PragueTime, newcfg.PragueTime) } - if isForkTimestampIncompatible(c.VerkleTime, newcfg.VerkleTime, headTimestamp) { - return newTimestampCompatError("Verkle fork timestamp", c.VerkleTime, newcfg.VerkleTime) - } return nil } @@ -823,7 +809,6 @@ type Rules struct { IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool IsMerge, IsShanghai, IsCancun, IsPrague bool - IsVerkle bool } // Rules ensures c's ChainID is not nil. @@ -848,6 +833,5 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules IsShanghai: c.IsShanghai(num, timestamp), IsCancun: c.IsCancun(num, timestamp), IsPrague: c.IsPrague(num, timestamp), - IsVerkle: c.IsVerkle(num, timestamp), } } diff --git a/trie/transition.go b/trie/transition.go index ad5a7dc70152..083aa57db1ac 100644 --- a/trie/transition.go +++ b/trie/transition.go @@ -174,7 +174,7 @@ func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error { trie := t.overlay switch root := trie.root.(type) { case *verkle.InternalNode: - return root.InsertStem(key, values, t.overlay.flatdbNodeResolver) + return root.InsertStem(key, values, t.overlay.FlatdbNodeResolver) default: panic("invalid root type") } diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go index 744df9df26ac..f0a0ed7d2894 100644 --- a/trie/utils/verkle_test.go +++ b/trie/utils/verkle_test.go @@ -51,7 +51,7 @@ func TestConstantPoint(t *testing.T) { verkle.FromLEBytes(&expectedPoly[0], []byte{2, 64}) expected := cfg.CommitToPoly(expectedPoly[:], 1) - if !verkle.Equal(expected, getTreePolyIndex0Point) { + if !expected.Equal(getTreePolyIndex0Point) { t.Fatalf("Marshalled constant value is incorrect: %x != %x", expected.Bytes(), getTreePolyIndex0Point.Bytes()) } } diff --git a/trie/verkle.go b/trie/verkle.go index baf9fde541ac..174ebfce2306 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -54,12 +54,12 @@ func NewVerkleTrie(root verkle.VerkleNode, db *Database, pointCache *utils.Point } } -func (trie *VerkleTrie) flatdbNodeResolver(path []byte) ([]byte, error) { +func (trie *VerkleTrie) FlatdbNodeResolver(path []byte) ([]byte, error) { return trie.db.diskdb.Get(append(FlatDBVerkleNodeKeyPrefix, path...)) } func (trie *VerkleTrie) InsertMigratedLeaves(leaves []verkle.LeafNode) error { - return trie.root.(*verkle.InternalNode).InsertMigratedLeaves(leaves, trie.flatdbNodeResolver) + return trie.root.(*verkle.InternalNode).InsertMigratedLeaves(leaves, trie.FlatdbNodeResolver) } var ( @@ -90,13 +90,13 @@ func (trie *VerkleTrie) GetKey(key []byte) []byte { func (trie *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) { pointEval := trie.pointCache.GetTreeKeyHeader(addr[:]) k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(pointEval, key) - return trie.root.Get(k, trie.flatdbNodeResolver) + return trie.root.Get(k, trie.FlatdbNodeResolver) } // GetWithHashedKey returns the value, assuming that the key has already // been hashed. func (trie *VerkleTrie) GetWithHashedKey(key []byte) ([]byte, error) { - return trie.root.Get(key, trie.flatdbNodeResolver) + return trie.root.Get(key, trie.FlatdbNodeResolver) } func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error) { @@ -108,7 +108,7 @@ func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error ) switch t.root.(type) { case *verkle.InternalNode: - values, err = t.root.(*verkle.InternalNode).GetStem(versionkey[:31], t.flatdbNodeResolver) + values, err = t.root.(*verkle.InternalNode).GetStem(versionkey[:31], t.FlatdbNodeResolver) default: return nil, errInvalidRootType } @@ -177,7 +177,7 @@ func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) switch root := t.root.(type) { case *verkle.InternalNode: - err = root.InsertStem(stem, values, t.flatdbNodeResolver) + err = root.InsertStem(stem, values, t.FlatdbNodeResolver) default: return errInvalidRootType } @@ -192,7 +192,7 @@ func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) func (trie *VerkleTrie) UpdateStem(key []byte, values [][]byte) error { switch root := trie.root.(type) { case *verkle.InternalNode: - return root.InsertStem(key, values, trie.flatdbNodeResolver) + return root.InsertStem(key, values, trie.FlatdbNodeResolver) default: panic("invalid root type") } @@ -210,7 +210,7 @@ func (trie *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) } else { copy(v[32-len(value):], value[:]) } - return trie.root.Insert(k, v[:], trie.flatdbNodeResolver) + return trie.root.Insert(k, v[:], trie.FlatdbNodeResolver) } func (t *VerkleTrie) DeleteAccount(addr common.Address) error { @@ -226,7 +226,7 @@ func (t *VerkleTrie) DeleteAccount(addr common.Address) error { switch root := t.root.(type) { case *verkle.InternalNode: - err = root.InsertStem(stem, values, t.flatdbNodeResolver) + err = root.InsertStem(stem, values, t.FlatdbNodeResolver) default: return errInvalidRootType } @@ -244,7 +244,7 @@ func (trie *VerkleTrie) DeleteStorage(addr common.Address, key []byte) error { pointEval := trie.pointCache.GetTreeKeyHeader(addr[:]) k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(pointEval, key) var zero [32]byte - return trie.root.Insert(k, zero[:], trie.flatdbNodeResolver) + return trie.root.Insert(k, zero[:], trie.FlatdbNodeResolver) } // Hash returns the root hash of the trie. It does not write to the database and @@ -311,8 +311,9 @@ func (trie *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { func (trie *VerkleTrie) Copy() *VerkleTrie { return &VerkleTrie{ - root: trie.root.Copy(), - db: trie.db, + root: trie.root.Copy(), + db: trie.db, + pointCache: trie.pointCache, } } @@ -320,8 +321,12 @@ func (trie *VerkleTrie) IsVerkle() bool { return true } -func (trie *VerkleTrie) ProveAndSerialize(keys [][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) { - proof, _, _, _, err := verkle.MakeVerkleMultiProof(trie.root, keys) +func ProveAndSerialize(pretrie, posttrie *VerkleTrie, keys [][]byte, resolver verkle.NodeResolverFn) (*verkle.VerkleProof, verkle.StateDiff, error) { + var postroot verkle.VerkleNode + if posttrie != nil { + postroot = posttrie.root + } + proof, _, _, _, err := verkle.MakeVerkleMultiProof(pretrie.root, postroot, keys, resolver) if err != nil { return nil, nil, err } @@ -342,69 +347,53 @@ func addKey(s set, key []byte) { func DeserializeAndVerifyVerkleProof(vp *verkle.VerkleProof, root []byte, statediff verkle.StateDiff) error { rootC := new(verkle.Point) - rootC.SetBytesTrusted(root) - proof, cis, indices, yis, err := deserializeVerkleProof(vp, rootC, statediff) - if err != nil { - return fmt.Errorf("could not deserialize proof: %w", err) - } - cfg := verkle.GetConfig() - if !verkle.VerifyVerkleProof(proof, cis, indices, yis, cfg) { - return errInvalidProof - } + rootC.SetBytes(root) - return nil -} - -func deserializeVerkleProof(vp *verkle.VerkleProof, rootC *verkle.Point, statediff verkle.StateDiff) (*verkle.Proof, []*verkle.Point, []byte, []*verkle.Fr, error) { var others set = set{} // Mark when an "other" stem has been seen proof, err := verkle.DeserializeProof(vp, statediff) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("verkle proof deserialization error: %w", err) + return fmt.Errorf("verkle proof deserialization error: %w", err) } for _, stem := range proof.PoaStems { addKey(others, stem) } - if len(proof.Keys) != len(proof.Values) { - return nil, nil, nil, nil, fmt.Errorf("keys and values are of different length %d != %d", len(proof.Keys), len(proof.Values)) - } - - tree, err := verkle.TreeFromProof(proof, rootC) + pretree, err := verkle.PreStateTreeFromProof(proof, rootC) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("error rebuilding the tree from proof: %w", err) + return fmt.Errorf("error rebuilding the pre-tree from proof: %w", err) } + // TODO this should not be necessary, remove it + // after the new proof generation code has stabilized. for _, stemdiff := range statediff { for _, suffixdiff := range stemdiff.SuffixDiffs { var key [32]byte copy(key[:31], stemdiff.Stem[:]) key[31] = suffixdiff.Suffix - val, err := tree.Get(key[:], nil) + val, err := pretree.Get(key[:], nil) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could not find key %x in tree rebuilt from proof: %w", key, err) + return fmt.Errorf("could not find key %x in tree rebuilt from proof: %w", key, err) } if len(val) > 0 { if !bytes.Equal(val, suffixdiff.CurrentValue[:]) { - return nil, nil, nil, nil, fmt.Errorf("could not find correct value at %x in tree rebuilt from proof: %x != %x", key, val, *suffixdiff.CurrentValue) + return fmt.Errorf("could not find correct value at %x in tree rebuilt from proof: %x != %x", key, val, *suffixdiff.CurrentValue) } } else { if suffixdiff.CurrentValue != nil && len(suffixdiff.CurrentValue) != 0 { - return nil, nil, nil, nil, fmt.Errorf("could not find correct value at %x in tree rebuilt from proof: %x != %x", key, val, *suffixdiff.CurrentValue) + return fmt.Errorf("could not find correct value at %x in tree rebuilt from proof: %x != %x", key, val, *suffixdiff.CurrentValue) } } } } - // no need to resolve as the tree has been reconstructed from the proof - // and must not contain any unresolved nodes. - pe, _, _, err := tree.GetProofItems(proof.Keys) + posttree, err := verkle.PostStateTreeFromStateDiff(pretree, statediff) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could not get proof items from tree rebuilt from proof: %w", err) + return fmt.Errorf("error rebuilding the post-tree from proof: %w", err) } - return proof, pe.Cis, pe.Zis, pe.Yis, err + return verkle.VerifyVerkleProofWithPreAndPostTrie(proof, pretree, posttree) } // ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which diff --git a/trie/verkle_test.go b/trie/verkle_test.go index 4e21ee501a21..d92b1758d33b 100644 --- a/trie/verkle_test.go +++ b/trie/verkle_test.go @@ -70,9 +70,9 @@ func TestReproduceTree(t *testing.T) { root.Insert(key, values[i], nil) } - proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) + proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) cfg := verkle.GetConfig() - if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + if ok, err := verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg); !ok || err != nil { t.Fatal("could not verify proof") } @@ -289,9 +289,9 @@ func TestReproduceCondrieuStemAggregationInProofOfAbsence(t *testing.T) { root.Insert(key, values[i], nil) } - proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) + proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) cfg := verkle.GetConfig() - if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + if ok, err := verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg); !ok || err != nil { t.Fatal("could not verify proof") } @@ -334,9 +334,9 @@ func TestReproduceCondrieuPoAStemConflictWithAnotherStem(t *testing.T) { root.Insert(key, values[i], nil) } - proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, append(presentKeys, absentKeys...)) + proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) cfg := verkle.GetConfig() - if !verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg) { + if ok, err := verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg); !ok || err != nil { t.Fatal("could not verify proof") } @@ -360,7 +360,7 @@ func TestReproduceCondrieuPoAStemConflictWithAnotherStem(t *testing.T) { func TestEmptyKeySetInProveAndSerialize(t *testing.T) { tree := verkle.New() - verkle.MakeVerkleMultiProof(tree, [][]byte{}) + verkle.MakeVerkleMultiProof(tree, nil, [][]byte{}, nil) } func TestGetTreeKeys(t *testing.T) { From 713db1bbbcebca181f0e549999da9ccac56ca9c3 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 25 Sep 2023 12:02:03 +0200 Subject: [PATCH 30/99] fix: add ProofInBlocks to chain config (#280) --- consensus/beacon/consensus.go | 4 ++-- core/state_processor_test.go | 2 +- params/config.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 94b316f19757..7cea637ba1bc 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -398,7 +398,7 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea k verkle.StateDiff keys = state.Witness().Keys() ) - if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlock { + if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlocks { // Open the pre-tree to prove the pre-state against parent := chain.GetHeaderByNumber(header.Number.Uint64() - 1) if parent == nil { @@ -448,7 +448,7 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea // Assemble and return the final block. block := types.NewBlockWithWithdrawals(header, txs, uncles, receipts, withdrawals, trie.NewStackTrie(nil)) - if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlock { + if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlocks { block.SetVerkleProof(p, k) } return block, nil diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 77088f86faa6..6cdb2e9a4f33 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -454,7 +454,7 @@ func TestProcessVerkle(t *testing.T) { PragueTime: u64(0), TerminalTotalDifficulty: common.Big0, TerminalTotalDifficultyPassed: true, - ProofInBlock: true, + ProofInBlocks: true, } signer = types.LatestSigner(config) testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") diff --git a/params/config.go b/params/config.go index 0eb882a56ae6..19e633a71def 100644 --- a/params/config.go +++ b/params/config.go @@ -301,7 +301,7 @@ type ChainConfig struct { IsDevMode bool `json:"isDev,omitempty"` // Proof in block - ProofInBlock bool `json:"proofInBlock,omitempty"` + ProofInBlocks bool `json:"proofInBlocks,omitempty"` } // EthashConfig is the consensus engine configs for proof-of-work based sealing. From f8bacb2c181a585bbde08639e44c90fe90184291 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 25 Sep 2023 12:06:16 +0200 Subject: [PATCH 31/99] remove StateDB as an extra param to Prepare (#281) --- consensus/beacon/consensus.go | 4 ++-- consensus/clique/clique.go | 2 +- consensus/consensus.go | 2 +- consensus/ethash/consensus.go | 2 +- miner/worker.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 7cea637ba1bc..82abe65ca1c6 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -330,14 +330,14 @@ func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers [ // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the beacon protocol. The changes are done inline. -func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB) error { +func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { // Transition isn't triggered yet, use the legacy rules for preparation. reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1) if err != nil { return err } if !reached { - return beacon.ethone.Prepare(chain, header, statedb) + return beacon.ethone.Prepare(chain, header) } header.Difficulty = beaconDifficulty return nil diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 23c7d32755b2..f708050abd13 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -499,7 +499,7 @@ func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*typ // Prepare implements consensus.Engine, preparing all the consensus fields of the // header for running the transactions on top. -func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header, _ *state.StateDB) error { +func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { // If the block isn't a checkpoint, cast a random vote (good enough for now) header.Coinbase = common.Address{} header.Nonce = types.BlockNonce{} diff --git a/consensus/consensus.go b/consensus/consensus.go index aa5ad43ede3d..3a2c2d222916 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -81,7 +81,7 @@ type Engine interface { // Prepare initializes the consensus fields of a block header according to the // rules of a particular engine. The changes are executed inline. - Prepare(chain ChainHeaderReader, header *types.Header, state *state.StateDB) error + Prepare(chain ChainHeaderReader, header *types.Header) error // Finalize runs any post-transaction state modifications (e.g. block rewards // or process withdrawals) but does not assemble the block. diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index d155a3521cbc..44aec25c1216 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -479,7 +479,7 @@ var DynamicDifficultyCalculator = makeDifficultyCalculator // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the ethash protocol. The changes are done inline. -func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header, _ *state.StateDB) error { +func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) if parent == nil { return consensus.ErrUnknownAncestor diff --git a/miner/worker.go b/miner/worker.go index 8568abf37691..124c93212262 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -908,7 +908,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { core.OverlayVerkleTransition(state) } // Run the consensus preparation with the default or customized consensus engine. - if err := w.engine.Prepare(w.chain, header, state); err != nil { + if err := w.engine.Prepare(w.chain, header); err != nil { log.Error("Failed to prepare header for sealing", "err", err) return nil, err } From eb03735429a7eb511109ddebb388d56ac018ed2d Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:10:22 +0200 Subject: [PATCH 32/99] fix: need commitment in tests (#282) --- go.mod | 4 ++-- go.sum | 4 ++++ trie/verkle_test.go | 10 ++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 77651a84e427..adce2703a8e1 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20230921123936-6a6b1f7a751c + github.com/gballet/go-verkle v0.1.1-0.20230921190644-1a60d228f7b2 github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 @@ -86,7 +86,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 // indirect github.com/aws/smithy-go v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.8.0 // indirect + github.com/bits-and-blooms/bitset v1.9.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect diff --git a/go.sum b/go.sum index fb217e376795..beb79bb00255 100644 --- a/go.sum +++ b/go.sum @@ -45,6 +45,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.9.0 h1:g1YivPG8jOtrN013Fe8OBXubkiTwvm7/vG2vXz03ANU= +github.com/bits-and-blooms/bitset v1.9.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= @@ -170,6 +172,8 @@ github.com/gballet/go-verkle v0.1.1-0.20230921123058-fb04943e860f h1:v/wHViCd+qL github.com/gballet/go-verkle v0.1.1-0.20230921123058-fb04943e860f/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= github.com/gballet/go-verkle v0.1.1-0.20230921123936-6a6b1f7a751c h1:sa+wcZ/O1bvCd4Zr5OJvKlvDSdwtNSXrgKxw48t3GPs= github.com/gballet/go-verkle v0.1.1-0.20230921123936-6a6b1f7a751c/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.1-0.20230921190644-1a60d228f7b2 h1:LYkA2UpEkEAnhP5RJx/JHOBvYI2O9cYJi36WUMsiuxQ= +github.com/gballet/go-verkle v0.1.1-0.20230921190644-1a60d228f7b2/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= diff --git a/trie/verkle_test.go b/trie/verkle_test.go index d92b1758d33b..df7a68ccee80 100644 --- a/trie/verkle_test.go +++ b/trie/verkle_test.go @@ -69,11 +69,15 @@ func TestReproduceTree(t *testing.T) { for i, key := range presentKeys { root.Insert(key, values[i], nil) } + root.Commit() - proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) + proof, Cs, zis, yis, err := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) + if err != nil { + t.Fatalf("could not create proof: %v", err) + } cfg := verkle.GetConfig() if ok, err := verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg); !ok || err != nil { - t.Fatal("could not verify proof") + t.Fatalf("could not verify proof: %v", err) } t.Log("commitments returned by proof:") @@ -288,6 +292,7 @@ func TestReproduceCondrieuStemAggregationInProofOfAbsence(t *testing.T) { for i, key := range presentKeys { root.Insert(key, values[i], nil) } + root.Commit() proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) cfg := verkle.GetConfig() @@ -333,6 +338,7 @@ func TestReproduceCondrieuPoAStemConflictWithAnotherStem(t *testing.T) { for i, key := range presentKeys { root.Insert(key, values[i], nil) } + root.Commit() proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) cfg := verkle.GetConfig() From 2403a275f0a841602934edfa78cf4a776b3adacb Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:07:49 +0200 Subject: [PATCH 33/99] fix linter message --- core/state/database.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/state/database.go b/core/state/database.go index 3caee28c8f33..6e65286f3d57 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -250,7 +250,6 @@ func (db *cachingDB) EndVerkleTransition() { db.ended = true } - type cachingDB struct { disk ethdb.KeyValueStore codeSizeCache *lru.Cache[common.Hash, int] From c94a9dd78b37ce272e9deb41e4dab0755ef02780 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:18:02 +0200 Subject: [PATCH 34/99] fix: a couple of CI issues --- core/types/block.go | 1 - core/vm/interpreter.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/core/types/block.go b/core/types/block.go index a704d23043ca..90cb5df40863 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -309,7 +309,6 @@ func CopyHeader(h *Header) *Header { } if h.ExecutionWitness != nil { cpy.ExecutionWitness = h.ExecutionWitness.Copy() - } return &cpy } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 1b980ccb131d..1bc0e80dfc44 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -179,7 +179,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( logged, pcCopy, gasCopy = false, pc, contract.Gas } - if in.evm.chainRules.IsCancun && !contract.IsDeployment { + if in.evm.chainRules.IsPrague && !contract.IsDeployment { // if the PC ends up in a new "chunk" of verkleized code, charge the // associated costs. contractAddr := contract.Address() From 1b72c344856bdf759d0663c04c8ec6c28171234b Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:23:46 +0200 Subject: [PATCH 35/99] fix more CI complaints --- core/state/database.go | 8 ++------ trie/verkle.go | 1 - 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/core/state/database.go b/core/state/database.go index 6e65286f3d57..e663556336f1 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -258,12 +258,8 @@ type cachingDB struct { // Verkle specific fields // TODO ensure that this info is in the DB - started, ended bool - translatedRoots [32]common.Hash // hash of the translated root, for opening - origRoots [32]common.Hash - translationIndex int - translatedRootsLock sync.RWMutex - LastMerkleRoot common.Hash // root hash of the read-only base tree + started, ended bool + LastMerkleRoot common.Hash // root hash of the read-only base tree addrToPoint *utils.PointCache diff --git a/trie/verkle.go b/trie/verkle.go index 174ebfce2306..68ad6b9e3844 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -63,7 +63,6 @@ func (trie *VerkleTrie) InsertMigratedLeaves(leaves []verkle.LeafNode) error { } var ( - errInvalidProof = errors.New("invalid proof") errInvalidRootType = errors.New("invalid node type for root") // WORKAROUND: this special error is returned if it has been From 5032dd480b7e293d1d287e6c18a393af75c16f19 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:27:30 +0200 Subject: [PATCH 36/99] achieving perfection --- core/state/database.go | 1 - core/state/statedb.go | 21 ++++----------------- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/core/state/database.go b/core/state/database.go index e663556336f1..af8cc1a36e30 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -19,7 +19,6 @@ package state import ( "errors" "fmt" - "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" diff --git a/core/state/statedb.go b/core/state/statedb.go index e5c6669da751..ea1e18613870 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -175,24 +175,11 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) } if tr.IsVerkle() { sdb.witness = sdb.NewAccessWitness() - // if sdb.snaps == nil { - // snapconfig := snapshot.Config{ - // CacheSize: 256, - // Recovery: false, - // NoBuild: false, - // AsyncBuild: false, - // Verkle: true, - // } - // sdb.snaps, err = snapshot.New(snapconfig, db.DiskDB(), db.TrieDB(), root) - // if err != nil { - // return nil, err - // } - // } - } - if sdb.snaps != nil { - if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil { - } } + // if sdb.snaps != nil { + // if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil { + // } + // } return sdb, nil } From 990395c688d5bf4623d13a76386d52e35182c888 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 27 Sep 2023 12:02:02 +0200 Subject: [PATCH 37/99] workaround: disable check for root presence (#283) --- core/genesis.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 6ea848508270..2ed1ab949187 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -341,23 +341,23 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen } // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. - header := rawdb.ReadHeader(db, stored, 0) - if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { - if genesis == nil { - genesis = DefaultGenesisBlock() - } - // Ensure the stored genesis matches with the given one. - hash := genesis.ToBlock().Hash() - if hash != stored { - return genesis.Config, hash, &GenesisMismatchError{stored, hash} - } - block, err := genesis.Commit(db, triedb) - if err != nil { - return genesis.Config, hash, err - } - applyOverrides(genesis.Config) - return genesis.Config, block.Hash(), nil - } + // header := rawdb.ReadHeader(db, stored, 0) + // if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { + // if genesis == nil { + // genesis = DefaultGenesisBlock() + // } + // // Ensure the stored genesis matches with the given one. + // hash := genesis.ToBlock().Hash() + // if hash != stored { + // return genesis.Config, hash, &GenesisMismatchError{stored, hash} + // } + // block, err := genesis.Commit(db, triedb) + // if err != nil { + // return genesis.Config, hash, err + // } + // applyOverrides(genesis.Config) + // return genesis.Config, block.Hash(), nil + // } // Check whether the genesis block is already written. if genesis != nil { hash := genesis.ToBlock().Hash() From acb4a30555d08e318a2ca964864c0df62d1b49b1 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 27 Sep 2023 19:25:49 +0200 Subject: [PATCH 38/99] fix: activate verkle at genesis (#284) --- cmd/geth/chaincmd.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 8e195bcf964d..bed4aa2d2f7c 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -208,6 +208,7 @@ func initGenesis(ctx *cli.Context) error { } triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{ Preimages: ctx.Bool(utils.CachePreimagesFlag.Name), + Verkle: true, }) _, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis) if err != nil { From c82ff997d32d2e644d5367f9e1c68d06c4033040 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 28 Sep 2023 12:19:35 +0200 Subject: [PATCH 39/99] fix: ensure read-only values are resolved in post trie (#285) --- consensus/beacon/consensus.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 82abe65ca1c6..6a18084fdb19 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -435,6 +435,19 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea if err != nil { panic(err) } + + // WORKAROUND: the post trie would normally not + // need to be searched for keys, as all of them + // were resolved during block execution. + // But since the prefetcher isn't currently used + // with verkle, the values that are read but not + // written to, are not resolved as they are read + // straight from the snapshot. They must be read + // in order to build the proof. + _, err = vtrpost.GetWithHashedKey(key) + if err != nil { + panic(err) + } } if len(keys) > 0 { From 328e180429efd981490bea45e30a099f07f5ebbe Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 6 Oct 2023 13:01:27 +0200 Subject: [PATCH 40/99] upgrade deps to get proof generation absence/presence bugfix --- go.mod | 8 ++++---- go.sum | 44 ++++++++------------------------------------ 2 files changed, 12 insertions(+), 40 deletions(-) diff --git a/go.mod b/go.mod index adce2703a8e1..e4f3294231d7 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.14.0 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 - github.com/consensys/gnark-crypto v0.11.3-0.20230906172141-49815a21349a + github.com/consensys/gnark-crypto v0.12.1 github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 @@ -26,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20230921190644-1a60d228f7b2 + github.com/gballet/go-verkle v0.1.1-0.20231004173727-0a4e93ed640b github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 @@ -65,8 +65,8 @@ require ( go.uber.org/automaxprocs v1.5.2 golang.org/x/crypto v0.10.0 golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc - golang.org/x/sync v0.3.0 - golang.org/x/sys v0.12.0 + golang.org/x/sync v0.4.0 + golang.org/x/sys v0.13.0 golang.org/x/text v0.10.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.1 diff --git a/go.sum b/go.sum index beb79bb00255..7367e231d7e1 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,6 @@ github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= -github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bits-and-blooms/bitset v1.9.0 h1:g1YivPG8jOtrN013Fe8OBXubkiTwvm7/vG2vXz03ANU= github.com/bits-and-blooms/bitset v1.9.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= @@ -77,10 +75,8 @@ github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.11.2 h1:GJjjtWJ+db1xGao7vTsOgAOGgjfPe7eRGPL+xxMX0qE= -github.com/consensys/gnark-crypto v0.11.2/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= -github.com/consensys/gnark-crypto v0.11.3-0.20230906172141-49815a21349a h1:Rc86uLASrW3xpeWRH8V9W23v5QYegI/wjgbZzwPiC44= -github.com/consensys/gnark-crypto v0.11.3-0.20230906172141-49815a21349a/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -88,12 +84,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20230904185759-9f7637e8ddd0 h1:MztzKYOxMeC8HlWGXvq2wizas+QT0FgITjGThfmbh/0= -github.com/crate-crypto/go-ipa v0.0.0-20230904185759-9f7637e8ddd0/go.mod h1:7fZtshzGQ3dxVpDpF51K9mX8oziq8Xd5AoM/UT9fF5o= -github.com/crate-crypto/go-ipa v0.0.0-20230905211650-63ccabc1a949 h1:m73KBJvYRMuaUth425v6nKeEu6GSq9Zij01+jc2r2Y0= -github.com/crate-crypto/go-ipa v0.0.0-20230905211650-63ccabc1a949/go.mod h1:7fZtshzGQ3dxVpDpF51K9mX8oziq8Xd5AoM/UT9fF5o= -github.com/crate-crypto/go-ipa v0.0.0-20230911163631-de5e505e95bf h1:DwDzUJSm6lD9geUNtNQmMdfuNMo9ucHEVzY2aLkYUI8= -github.com/crate-crypto/go-ipa v0.0.0-20230911163631-de5e505e95bf/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58 h1:PwUlswsGOrLB677lW4XrlWLeszY3BaDGbvZ6dYk28tQ= github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= @@ -154,26 +144,8 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.0.0-20230905121642-6764a0cc51cf h1:nEiCdFdoQz4rDn5URrMOVira10+rLvJ82PfEbkMF3jo= -github.com/gballet/go-verkle v0.0.0-20230905121642-6764a0cc51cf/go.mod h1:vYx+8/EoJeRLJ3R5sCVhmAdpsZIqzxF6Tr5p+8kbJrg= -github.com/gballet/go-verkle v0.0.0-20230905122518-d220d72630e6 h1:o1G+rbcG/jWZWv+kWwwvE+TmFwXamUDEbvMUA6xR5fk= -github.com/gballet/go-verkle v0.0.0-20230905122518-d220d72630e6/go.mod h1:TPmzzGQJd4ZZxR3+hIn6SGnm9aYauFHkuYCOcTvzI6A= -github.com/gballet/go-verkle v0.0.0-20230906092655-319e750ea891 h1:nsdB5gaCl3J98ZRGHCDy3LSomfpY4fA5BvGa3Ux1e4A= -github.com/gballet/go-verkle v0.0.0-20230906092655-319e750ea891/go.mod h1:TPmzzGQJd4ZZxR3+hIn6SGnm9aYauFHkuYCOcTvzI6A= -github.com/gballet/go-verkle v0.0.0-20230906110906-5ce291aceda2 h1:qxP6c7XUjMScf/IzBJc3LdJ7+94UsVJJlYmowCqGkFQ= -github.com/gballet/go-verkle v0.0.0-20230906110906-5ce291aceda2/go.mod h1:g5tqVx8nLwDrC6Gki3pTRO4+VgusEMBJnDaQvi3A15g= -github.com/gballet/go-verkle v0.0.0-20230911184846-b1cb716e965e h1:Lw+ErC384jjxEqPekDeMbsQUuFHe9U9P/j2/wa11d1w= -github.com/gballet/go-verkle v0.0.0-20230911184846-b1cb716e965e/go.mod h1:g5tqVx8nLwDrC6Gki3pTRO4+VgusEMBJnDaQvi3A15g= -github.com/gballet/go-verkle v0.0.0-20230912081326-5a9b0c7bda0d h1:lJ+5o6tMVteFMFSaYw5P8T+s8jt2DMyBV7GvUZduozo= -github.com/gballet/go-verkle v0.0.0-20230912081326-5a9b0c7bda0d/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= -github.com/gballet/go-verkle v0.1.0 h1:DNQjU+M3fgbZR/rbiPban4oLl5T3bfijejmRHwwT6n0= -github.com/gballet/go-verkle v0.1.0/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= -github.com/gballet/go-verkle v0.1.1-0.20230921123058-fb04943e860f h1:v/wHViCd+qLWSoEB0fXhVds68lB/iFJc3vglb05fOCw= -github.com/gballet/go-verkle v0.1.1-0.20230921123058-fb04943e860f/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= -github.com/gballet/go-verkle v0.1.1-0.20230921123936-6a6b1f7a751c h1:sa+wcZ/O1bvCd4Zr5OJvKlvDSdwtNSXrgKxw48t3GPs= -github.com/gballet/go-verkle v0.1.1-0.20230921123936-6a6b1f7a751c/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= -github.com/gballet/go-verkle v0.1.1-0.20230921190644-1a60d228f7b2 h1:LYkA2UpEkEAnhP5RJx/JHOBvYI2O9cYJi36WUMsiuxQ= -github.com/gballet/go-verkle v0.1.1-0.20230921190644-1a60d228f7b2/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.1-0.20231004173727-0a4e93ed640b h1:LHeiiSTL2FEGCP1ov6FqkikiViqygeVo1ZwJ1x3nYSE= +github.com/gballet/go-verkle v0.1.1-0.20231004173727-0a4e93ed640b/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= @@ -557,8 +529,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -597,8 +569,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 49fdfff681b68032b8206f2821a4362dadef826a Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 10 Oct 2023 11:12:50 +0200 Subject: [PATCH 41/99] fix: make sure AccessWitness isn't nil when calling RPC methods (#287) --- internal/ethapi/api.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 3faf1d8bea7a..1e2e2e13d4f5 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1051,6 +1051,11 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S } evm, vmError := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}, &blockCtx) + // Set witness if trie is verkle + if state.Database().TrieDB().IsVerkle() { + state.SetWitness(state.NewAccessWitness()) + } + // Wait for the context to be done and cancel the evm. Even if the // EVM has finished, cancelling may be done (repeatedly) go func() { From 189a4770ddcecd8b4e1a6d26cd38b825cff10582 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 10 Oct 2023 15:27:31 +0200 Subject: [PATCH 42/99] fix: incorrect access copy (#288) --- core/vm/evm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vm/evm.go b/core/vm/evm.go index 36bd6f32f8f7..a04258b0df22 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -138,7 +138,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time), } if txCtx.Accesses == nil && chainConfig.IsPrague(blockCtx.BlockNumber, blockCtx.Time) { - txCtx.Accesses = evm.StateDB.(*state.StateDB).NewAccessWitness() + evm.Accesses = evm.StateDB.(*state.StateDB).NewAccessWitness() } evm.interpreter = NewEVMInterpreter(evm) return evm From faee2f9587298c79d9d7595b8fff29a6fc979e1f Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 12 Oct 2023 17:37:02 +0200 Subject: [PATCH 43/99] fix: return serialized root in state root, not its mapping to a scalar field (#289) --- trie/verkle.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/trie/verkle.go b/trie/verkle.go index 68ad6b9e3844..d0a914fde87b 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -286,9 +286,7 @@ func (trie *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet, error) { } batch.Write() - // Serialize root commitment form - rootH := root.Hash().BytesLE() - return common.BytesToHash(rootH[:]), nil, nil + return trie.Hash(), nil, nil } // NodeIterator returns an iterator that returns nodes of the trie. Iteration From 0d5126776f47b68396bef304b3fc9a76f1cc56b8 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 18 Oct 2023 09:03:27 +0200 Subject: [PATCH 44/99] add workaround to use --override.prague (#292) --- core/genesis.go | 9 ++++++--- go.mod | 6 +++--- go.sum | 6 ++++++ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 2ed1ab949187..6be1faa0cfd3 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -368,9 +368,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // Get the existing chain configuration. newcfg := genesis.configOrDefault(stored) applyOverrides(newcfg) - if err := newcfg.CheckConfigForkOrder(); err != nil { - return newcfg, common.Hash{}, err - } + // WORKAROUND it looks like this is broken, because overriding + // pragueTime will cause an error here, claiming that shanghaiTime + // wasn't set (it is). + // if err := newcfg.CheckConfigForkOrder(); err != nil { + // return newcfg, common.Hash{}, err + // } storedcfg := rawdb.ReadChainConfig(db, stored) if storedcfg == nil { log.Warn("Found genesis block without chain config") diff --git a/go.mod b/go.mod index e4f3294231d7..54ad6c5da931 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/cloudflare/cloudflare-go v0.14.0 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/consensys/gnark-crypto v0.12.1 - github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58 + github.com/crate-crypto/go-ipa v0.0.0-20231015184653-ceac2650f699 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 @@ -26,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20231004173727-0a4e93ed640b + github.com/gballet/go-verkle v0.1.1-0.20231017182008-0af58979f08c github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 @@ -86,7 +86,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 // indirect github.com/aws/smithy-go v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.9.0 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect diff --git a/go.sum b/go.sum index 7367e231d7e1..0fdb5918afbb 100644 --- a/go.sum +++ b/go.sum @@ -45,6 +45,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.9.0 h1:g1YivPG8jOtrN013Fe8OBXubkiTwvm7/vG2vXz03ANU= github.com/bits-and-blooms/bitset v1.9.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= @@ -86,6 +88,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58 h1:PwUlswsGOrLB677lW4XrlWLeszY3BaDGbvZ6dYk28tQ= github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= +github.com/crate-crypto/go-ipa v0.0.0-20231015184653-ceac2650f699 h1:ng/jln5iPr92iLbq6dHHa5dbObAgUmAoQO7Zjx1vYHM= +github.com/crate-crypto/go-ipa v0.0.0-20231015184653-ceac2650f699/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -146,6 +150,8 @@ github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqG github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gballet/go-verkle v0.1.1-0.20231004173727-0a4e93ed640b h1:LHeiiSTL2FEGCP1ov6FqkikiViqygeVo1ZwJ1x3nYSE= github.com/gballet/go-verkle v0.1.1-0.20231004173727-0a4e93ed640b/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.1-0.20231017182008-0af58979f08c h1:MgjmvQqti384Re4Xg2fqtYYH0k4fWkavDVsVc9op5CE= +github.com/gballet/go-verkle v0.1.1-0.20231017182008-0af58979f08c/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= From a29f5d60c248285f8b7e81921fac55e58e1a399f Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:48:24 +0200 Subject: [PATCH 45/99] Shanghai time workaround (#294) * add workaround to use --override.prague * fix typo in comments * fix: enable overrides at genesis time --- cmd/geth/chaincmd.go | 10 ++++++++-- core/genesis.go | 3 ++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index bed4aa2d2f7c..55c22f7322f3 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -49,7 +49,7 @@ var ( Name: "init", Usage: "Bootstrap and initialize a new genesis block", ArgsUsage: "", - Flags: flags.Merge([]cli.Flag{utils.CachePreimagesFlag}, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{utils.CachePreimagesFlag, utils.OverridePrague}, utils.DatabasePathFlags), Description: ` The init command initializes a new genesis block and definition for the network. This is a destructive action and changes the network in which you will be @@ -201,6 +201,12 @@ func initGenesis(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() + var overrides core.ChainOverrides + if ctx.IsSet(utils.OverridePrague.Name) { + v := ctx.Uint64(utils.OverridePrague.Name) + overrides.OverridePrague = &v + } + for _, name := range []string{"chaindata", "lightchaindata"} { chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false) if err != nil { @@ -210,7 +216,7 @@ func initGenesis(ctx *cli.Context) error { Preimages: ctx.Bool(utils.CachePreimagesFlag.Name), Verkle: true, }) - _, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis) + _, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides) if err != nil { utils.Fatalf("Failed to write genesis block: %v", err) } diff --git a/core/genesis.go b/core/genesis.go index 6be1faa0cfd3..74294afb7272 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -332,11 +332,11 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen } else { log.Info("Writing custom genesis block") } + applyOverrides(genesis.Config) block, err := genesis.Commit(db, triedb) if err != nil { return genesis.Config, common.Hash{}, err } - applyOverrides(genesis.Config) return genesis.Config, block.Hash(), nil } // We have the genesis block in database(perhaps in ancient database) @@ -360,6 +360,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // } // Check whether the genesis block is already written. if genesis != nil { + applyOverrides(genesis.Config) hash := genesis.ToBlock().Hash() if hash != stored { return genesis.Config, hash, &GenesisMismatchError{stored, hash} From e437264a8c21fc3e586359a4a22ed641d646351a Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Wed, 18 Oct 2023 15:17:23 -0300 Subject: [PATCH 46/99] remove pre-state tree warmup (#293) * remove pre-state tree warmup Signed-off-by: Ignacio Hagopian * update go-verkle Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- consensus/beacon/consensus.go | 7 ------- go.mod | 2 +- go.sum | 10 ++-------- 3 files changed, 3 insertions(+), 16 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 6a18084fdb19..4627b4d837bc 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -426,16 +426,9 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea panic("invalid tree type") } if okpre && okpost { - // Resolve values from the pre state, the post - // state should already have the values in memory. // TODO: see if this can be captured at the witness // level, like it used to. for _, key := range keys { - _, err := vtrpre.GetWithHashedKey(key) - if err != nil { - panic(err) - } - // WORKAROUND: the post trie would normally not // need to be searched for keys, as all of them // were resolved during block execution. diff --git a/go.mod b/go.mod index 54ad6c5da931..74abe3a17cbf 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20231017182008-0af58979f08c + github.com/gballet/go-verkle v0.1.1-0.20231018152418-3d7dc630839e github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 diff --git a/go.sum b/go.sum index 0fdb5918afbb..c48a60c99f4c 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,6 @@ github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.9.0 h1:g1YivPG8jOtrN013Fe8OBXubkiTwvm7/vG2vXz03ANU= -github.com/bits-and-blooms/bitset v1.9.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= @@ -86,8 +84,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58 h1:PwUlswsGOrLB677lW4XrlWLeszY3BaDGbvZ6dYk28tQ= -github.com/crate-crypto/go-ipa v0.0.0-20230914135612-d1b03fcb8e58/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= github.com/crate-crypto/go-ipa v0.0.0-20231015184653-ceac2650f699 h1:ng/jln5iPr92iLbq6dHHa5dbObAgUmAoQO7Zjx1vYHM= github.com/crate-crypto/go-ipa v0.0.0-20231015184653-ceac2650f699/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= @@ -148,10 +144,8 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231004173727-0a4e93ed640b h1:LHeiiSTL2FEGCP1ov6FqkikiViqygeVo1ZwJ1x3nYSE= -github.com/gballet/go-verkle v0.1.1-0.20231004173727-0a4e93ed640b/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= -github.com/gballet/go-verkle v0.1.1-0.20231017182008-0af58979f08c h1:MgjmvQqti384Re4Xg2fqtYYH0k4fWkavDVsVc9op5CE= -github.com/gballet/go-verkle v0.1.1-0.20231017182008-0af58979f08c/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.1-0.20231018152418-3d7dc630839e h1:3IB7OJpOmge9NpBvHaaiZlZSQQHVRfzKYe7DWWYdHyM= +github.com/gballet/go-verkle v0.1.1-0.20231018152418-3d7dc630839e/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= From e1dd462e1b5fe5ddb30d7c03639d7ee9cb52f542 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Mon, 23 Oct 2023 11:39:34 -0300 Subject: [PATCH 47/99] Update go-verkle (#296) * update go-verkle Signed-off-by: Ignacio Hagopian * ci: fix golangci-linter version Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- .github/workflows/go.yml | 2 +- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 61542140b004..d5a5687a4e2d 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -28,7 +28,7 @@ jobs: with: go-version: 1.21.1 - name: Download golangci-lint - run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s latest + run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.54.2 - name: Lint run: ./bin/golangci-lint run - name: Vet diff --git a/go.mod b/go.mod index 74abe3a17cbf..bb1e8dc0027b 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20231018152418-3d7dc630839e + github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 diff --git a/go.sum b/go.sum index c48a60c99f4c..4a9016a0b5b9 100644 --- a/go.sum +++ b/go.sum @@ -144,8 +144,8 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231018152418-3d7dc630839e h1:3IB7OJpOmge9NpBvHaaiZlZSQQHVRfzKYe7DWWYdHyM= -github.com/gballet/go-verkle v0.1.1-0.20231018152418-3d7dc630839e/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a h1:kqhR2nTIep0lw7zJBp2ju+fWbqP3PojUw3L+jv1qBK4= +github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= From 4f9beb0526030825f17df06c3e07eae1ee48d400 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Wed, 25 Oct 2023 15:56:06 -0300 Subject: [PATCH 48/99] proofs: remove post-values in Multiproof (#297) * update proof generation Signed-off-by: Ignacio Hagopian * simplification and comment Signed-off-by: Ignacio Hagopian * update go-verkle Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- consensus/beacon/consensus.go | 17 ----------------- core/state_processor_test.go | 2 +- go.mod | 4 ++-- go.sum | 8 ++++---- trie/verkle.go | 29 +++++++++++++---------------- 5 files changed, 20 insertions(+), 40 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 4627b4d837bc..2ca701c42f56 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -426,23 +426,6 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea panic("invalid tree type") } if okpre && okpost { - // TODO: see if this can be captured at the witness - // level, like it used to. - for _, key := range keys { - // WORKAROUND: the post trie would normally not - // need to be searched for keys, as all of them - // were resolved during block execution. - // But since the prefetcher isn't currently used - // with verkle, the values that are read but not - // written to, are not resolved as they are read - // straight from the snapshot. They must be read - // in order to build the proof. - _, err = vtrpost.GetWithHashedKey(key) - if err != nil { - panic(err) - } - } - if len(keys) > 0 { p, k, err = trie.ProveAndSerialize(vtrpre, vtrpost, keys, vtrpre.FlatdbNodeResolver) if err != nil { diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 6cdb2e9a4f33..8ed660a5042e 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -518,7 +518,7 @@ func TestProcessVerkle(t *testing.T) { //f.Write(buf.Bytes()) //fmt.Printf("root= %x\n", chain[0].Root()) // check the proof for the last block - err := trie.DeserializeAndVerifyVerkleProof(proofs[1], chain[0].Root().Bytes(), keyvals[1]) + err := trie.DeserializeAndVerifyVerkleProof(proofs[1], chain[0].Root().Bytes(), chain[1].Root().Bytes(), keyvals[1]) if err != nil { t.Fatal(err) } diff --git a/go.mod b/go.mod index bb1e8dc0027b..25fb4e4ae0f7 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/cloudflare/cloudflare-go v0.14.0 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/consensys/gnark-crypto v0.12.1 - github.com/crate-crypto/go-ipa v0.0.0-20231015184653-ceac2650f699 + github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 @@ -26,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a + github.com/gballet/go-verkle v0.1.1-0.20231025151349-87337dd2894a github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 diff --git a/go.sum b/go.sum index 4a9016a0b5b9..5d3e1567d16a 100644 --- a/go.sum +++ b/go.sum @@ -84,8 +84,8 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20231015184653-ceac2650f699 h1:ng/jln5iPr92iLbq6dHHa5dbObAgUmAoQO7Zjx1vYHM= -github.com/crate-crypto/go-ipa v0.0.0-20231015184653-ceac2650f699/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -144,8 +144,8 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a h1:kqhR2nTIep0lw7zJBp2ju+fWbqP3PojUw3L+jv1qBK4= -github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= +github.com/gballet/go-verkle v0.1.1-0.20231025151349-87337dd2894a h1:UV5Et3Ab62e6hQ6vDZC0h0i9hmKm8KKtV78zDOzud08= +github.com/gballet/go-verkle v0.1.1-0.20231025151349-87337dd2894a/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= diff --git a/trie/verkle.go b/trie/verkle.go index d0a914fde87b..748464ecdd56 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -336,27 +336,16 @@ func ProveAndSerialize(pretrie, posttrie *VerkleTrie, keys [][]byte, resolver ve return p, kvps, nil } -type set = map[string]struct{} - -func addKey(s set, key []byte) { - s[string(key)] = struct{}{} -} - -func DeserializeAndVerifyVerkleProof(vp *verkle.VerkleProof, root []byte, statediff verkle.StateDiff) error { - rootC := new(verkle.Point) - rootC.SetBytes(root) - - var others set = set{} // Mark when an "other" stem has been seen +func DeserializeAndVerifyVerkleProof(vp *verkle.VerkleProof, preStateRoot []byte, postStateRoot []byte, statediff verkle.StateDiff) error { + // TODO: check that `OtherStems` have expected length and values. proof, err := verkle.DeserializeProof(vp, statediff) if err != nil { return fmt.Errorf("verkle proof deserialization error: %w", err) } - for _, stem := range proof.PoaStems { - addKey(others, stem) - } - + rootC := new(verkle.Point) + rootC.SetBytes(preStateRoot) pretree, err := verkle.PreStateTreeFromProof(proof, rootC) if err != nil { return fmt.Errorf("error rebuilding the pre-tree from proof: %w", err) @@ -385,12 +374,20 @@ func DeserializeAndVerifyVerkleProof(vp *verkle.VerkleProof, root []byte, stated } } + // TODO: this is necessary to verify that the post-values are the correct ones. + // But all this can be avoided with a even faster way. The EVM block execution can + // keep track of the written keys, and compare that list with this post-values list. + // This can avoid regenerating the post-tree which is somewhat expensive. posttree, err := verkle.PostStateTreeFromStateDiff(pretree, statediff) if err != nil { return fmt.Errorf("error rebuilding the post-tree from proof: %w", err) } + regeneratedPostTreeRoot := posttree.Commitment().Bytes() + if !bytes.Equal(regeneratedPostTreeRoot[:], postStateRoot) { + return fmt.Errorf("post tree root mismatch: %x != %x", regeneratedPostTreeRoot, postStateRoot) + } - return verkle.VerifyVerkleProofWithPreAndPostTrie(proof, pretree, posttree) + return verkle.VerifyVerkleProofWithPreState(proof, pretree) } // ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which From 4815fe2686560bdb32ded983fc276383c5df869b Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:14:10 +0200 Subject: [PATCH 49/99] remove unused EndVerkleTransition in blockchain.go (#301) --- core/blockchain.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 27a74de822bf..e2274c03ce11 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2539,10 +2539,6 @@ func (bc *BlockChain) ReorgThroughVerkleTransition() { bc.stateCache.ReorgThroughVerkleTransition() } -func (bc *BlockChain) EndVerkleTransition() { - bc.stateCache.EndVerkleTransition() -} - func (bc *BlockChain) AddRootTranslation(originalRoot, translatedRoot common.Hash) { bc.stateCache.AddRootTranslation(originalRoot, translatedRoot) } From afc606759032aa59ac60d386ccdfccc30d38f7d3 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Mon, 27 Nov 2023 12:33:28 -0300 Subject: [PATCH 50/99] mod: update go-verkle (#312) * update go-verkle Signed-off-by: Ignacio Hagopian * fix breaking apis Signed-off-by: Ignacio Hagopian * fix apis Signed-off-by: Ignacio Hagopian * avoid test failing due to timeouts Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- .github/workflows/go.yml | 4 +--- cmd/geth/verkle.go | 8 ++++---- go.mod | 2 +- go.sum | 4 ++-- trie/transition.go | 2 +- trie/verkle.go | 8 ++++---- 6 files changed, 13 insertions(+), 15 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index d5a5687a4e2d..1678d4f3e8b0 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -42,7 +42,5 @@ jobs: uses: actions/setup-go@v2 with: go-version: 1.21.1 - - name: Download precomputed points - run: wget -nv https://github.com/gballet/go-verkle/releases/download/banderwagonv3/precomp -Otrie/utils/precomp - name: Test - run: go test ./... + run: go test ./... -timeout=10h diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index d1953697b9bd..2f65a5c29554 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -202,7 +202,7 @@ func convertToVerkle(ctx *cli.Context) error { // Otherwise, store the previous group in the tree with a // stem insertion. - vRoot.InsertStem(chunkkey[:31], values, chaindb.Get) + vRoot.InsertValuesAtStem(chunkkey[:31], values, chaindb.Get) } // Write the code size in the account header group @@ -267,7 +267,7 @@ func convertToVerkle(ctx *cli.Context) error { copy(k[:], []byte(s)) // reminder that InsertStem will merge leaves // if they exist. - vRoot.InsertStem(k[:31], vs, chaindb.Get) + vRoot.InsertValuesAtStem(k[:31], vs, chaindb.Get) } translatedStorage = make(map[string][][]byte) vRoot.FlushAtDepth(2, saveverkle) @@ -276,7 +276,7 @@ func convertToVerkle(ctx *cli.Context) error { for s, vs := range translatedStorage { var k [31]byte copy(k[:], []byte(s)) - vRoot.InsertStem(k[:31], vs, chaindb.Get) + vRoot.InsertValuesAtStem(k[:31], vs, chaindb.Get) } storageIt.Release() if storageIt.Error() != nil { @@ -285,7 +285,7 @@ func convertToVerkle(ctx *cli.Context) error { } } // Finish with storing the complete account header group inside the tree. - vRoot.InsertStem(stem[:31], newValues, chaindb.Get) + vRoot.InsertValuesAtStem(stem[:31], newValues, chaindb.Get) if time.Since(lastReport) > time.Second*8 { log.Info("Traversing state", "accounts", accounts, "elapsed", common.PrettyDuration(time.Since(start))) diff --git a/go.mod b/go.mod index 25fb4e4ae0f7..76a42d440d42 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20231025151349-87337dd2894a + github.com/gballet/go-verkle v0.1.1-0.20231125115329-d193f0b46e01 github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 diff --git a/go.sum b/go.sum index 5d3e1567d16a..e7222885c1bf 100644 --- a/go.sum +++ b/go.sum @@ -144,8 +144,8 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231025151349-87337dd2894a h1:UV5Et3Ab62e6hQ6vDZC0h0i9hmKm8KKtV78zDOzud08= -github.com/gballet/go-verkle v0.1.1-0.20231025151349-87337dd2894a/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= +github.com/gballet/go-verkle v0.1.1-0.20231125115329-d193f0b46e01 h1:Jm7DG6/BptrrNgOh9Jb6LPBbz75VJA5FkFKB4O/zbQw= +github.com/gballet/go-verkle v0.1.1-0.20231125115329-d193f0b46e01/go.mod h1:OzHSBt37xRRHc27lb9PaCldBnJYQZP8KcMdYyOB2dtU= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= diff --git a/trie/transition.go b/trie/transition.go index 083aa57db1ac..51fa7c373ced 100644 --- a/trie/transition.go +++ b/trie/transition.go @@ -174,7 +174,7 @@ func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error { trie := t.overlay switch root := trie.root.(type) { case *verkle.InternalNode: - return root.InsertStem(key, values, t.overlay.FlatdbNodeResolver) + return root.InsertValuesAtStem(key, values, t.overlay.FlatdbNodeResolver) default: panic("invalid root type") } diff --git a/trie/verkle.go b/trie/verkle.go index 748464ecdd56..f5abe124ed73 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -107,7 +107,7 @@ func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error ) switch t.root.(type) { case *verkle.InternalNode: - values, err = t.root.(*verkle.InternalNode).GetStem(versionkey[:31], t.FlatdbNodeResolver) + values, err = t.root.(*verkle.InternalNode).GetValuesAtStem(versionkey[:31], t.FlatdbNodeResolver) default: return nil, errInvalidRootType } @@ -176,7 +176,7 @@ func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) switch root := t.root.(type) { case *verkle.InternalNode: - err = root.InsertStem(stem, values, t.FlatdbNodeResolver) + err = root.InsertValuesAtStem(stem, values, t.FlatdbNodeResolver) default: return errInvalidRootType } @@ -191,7 +191,7 @@ func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) func (trie *VerkleTrie) UpdateStem(key []byte, values [][]byte) error { switch root := trie.root.(type) { case *verkle.InternalNode: - return root.InsertStem(key, values, trie.FlatdbNodeResolver) + return root.InsertValuesAtStem(key, values, trie.FlatdbNodeResolver) default: panic("invalid root type") } @@ -225,7 +225,7 @@ func (t *VerkleTrie) DeleteAccount(addr common.Address) error { switch root := t.root.(type) { case *verkle.InternalNode: - err = root.InsertStem(stem, values, t.FlatdbNodeResolver) + err = root.InsertValuesAtStem(stem, values, t.FlatdbNodeResolver) default: return errInvalidRootType } From f95255839aa3445ccfda6a5ef178f87a668e3916 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 20 Dec 2023 18:35:42 +0100 Subject: [PATCH 51/99] fix: main storage offset value (#329) --- trie/utils/verkle.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 17fdf1ade343..bf9872e0f347 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -37,9 +37,9 @@ var ( zero = uint256.NewInt(0) VerkleNodeWidthLog2 = 8 HeaderStorageOffset = uint256.NewInt(64) - mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(256), 31-uint(VerkleNodeWidthLog2)) + mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(1), 248-uint(VerkleNodeWidthLog2)) CodeOffset = uint256.NewInt(128) - MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(256), 31) + MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(1), 248 /* 8 * 31*/) VerkleNodeWidth = uint256.NewInt(256) codeStorageDelta = uint256.NewInt(0).Sub(CodeOffset, HeaderStorageOffset) From 6843ed14e20b2fc6a887d72e7d97ca66d577f163 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Sat, 20 Jan 2024 07:57:26 -0300 Subject: [PATCH 52/99] vm: fix access witness recording (#337) Signed-off-by: Ignacio Hagopian --- core/vm/evm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vm/evm.go b/core/vm/evm.go index a04258b0df22..7ad6c13c5daf 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -214,7 +214,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { if evm.chainRules.IsPrague { // proof of absence - tryConsumeGas(&gas, evm.Accesses.TouchAndChargeProofOfAbsence(caller.Address().Bytes())) + tryConsumeGas(&gas, evm.Accesses.TouchAndChargeProofOfAbsence(addr.Bytes())) } // Calling a non existing account, don't do anything, but ping the tracer if debug { From d6477cdc7d97fabfdca0e72054c62650cf911e76 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Sat, 20 Jan 2024 07:58:02 -0300 Subject: [PATCH 53/99] mod: move from gballet/go-verkle to ethereum/go-verkle (#335) Signed-off-by: Ignacio Hagopian --- cmd/geth/verkle.go | 2 +- consensus/beacon/consensus.go | 2 +- core/chain_makers.go | 2 +- core/state/database.go | 2 +- core/state_processor.go | 2 +- core/types/block.go | 2 +- go.mod | 1 + go.sum | 2 ++ trie/transition.go | 2 +- trie/utils/verkle.go | 2 +- trie/utils/verkle_test.go | 2 +- trie/verkle.go | 2 +- trie/verkle_iterator.go | 2 +- trie/verkle_iterator_test.go | 2 +- trie/verkle_test.go | 2 +- 15 files changed, 16 insertions(+), 13 deletions(-) diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 2f65a5c29554..ea424ebaf138 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -36,7 +36,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" tutils "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" cli "github.com/urfave/cli/v2" ) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 2ca701c42f56..b09ece674804 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -31,7 +31,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/core/chain_makers.go b/core/chain_makers.go index 5d8ade8a0fb0..d43e9126a2b6 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" ) // BlockGen creates blocks for testing. diff --git a/core/state/database.go b/core/state/database.go index af8cc1a36e30..5707e2c88b60 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" ) const ( diff --git a/core/state_processor.go b/core/state_processor.go index 5d10bceb1817..84c2f6429a30 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -36,7 +36,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" tutils "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/core/types/block.go b/core/types/block.go index 90cb5df40863..64c9fe80ff22 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -29,7 +29,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rlp" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" ) // A BlockNonce is a 64-bit hash which proves (combined with the diff --git a/go.mod b/go.mod index 76a42d440d42..81367549719c 100644 --- a/go.mod +++ b/go.mod @@ -96,6 +96,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect + github.com/ethereum/go-verkle v0.1.1-0.20240119133216-f8289fc59149 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-ole/go-ole v1.2.1 // indirect diff --git a/go.sum b/go.sum index e7222885c1bf..bf0e786ed54b 100644 --- a/go.sum +++ b/go.sum @@ -127,6 +127,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/c-kzg-4844 v0.3.0 h1:3Y3hD6l5i0dEYsBL50C+Om644kve3pNqoAcvE26o9zI= github.com/ethereum/c-kzg-4844 v0.3.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8= +github.com/ethereum/go-verkle v0.1.1-0.20240119133216-f8289fc59149 h1:7gbu2YdLL8SicVklig4nyizkWkw367BP+5eEivNPy04= +github.com/ethereum/go-verkle v0.1.1-0.20240119133216-f8289fc59149/go.mod h1:cZmLDzTyZPwUygE2ksQEcxOLZ8YpfRghnVtfxRnhgJM= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/trie/transition.go b/trie/transition.go index 51fa7c373ced..24daf436ed8a 100644 --- a/trie/transition.go +++ b/trie/transition.go @@ -21,7 +21,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie/trienode" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" ) type TransitionTrie struct { diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index bf9872e0f347..16c707c13acb 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -21,7 +21,7 @@ import ( "sync" "github.com/crate-crypto/go-ipa/bandersnatch/fr" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go index f0a0ed7d2894..66f1cc473ea6 100644 --- a/trie/utils/verkle_test.go +++ b/trie/utils/verkle_test.go @@ -23,7 +23,7 @@ import ( "math/rand" "testing" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/trie/verkle.go b/trie/verkle.go index f5abe124ed73..3a402bebe5b6 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/trie/verkle_iterator.go b/trie/verkle_iterator.go index c5f59a0f5937..5f5fc725ed46 100644 --- a/trie/verkle_iterator.go +++ b/trie/verkle_iterator.go @@ -19,7 +19,7 @@ package trie import ( "github.com/ethereum/go-ethereum/common" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" ) type verkleNodeIteratorState struct { diff --git a/trie/verkle_iterator_test.go b/trie/verkle_iterator_test.go index 1fd3fd76a6d9..d1611feee32c 100644 --- a/trie/verkle_iterator_test.go +++ b/trie/verkle_iterator_test.go @@ -24,7 +24,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" ) func TestVerkleIterator(t *testing.T) { diff --git a/trie/verkle_test.go b/trie/verkle_test.go index df7a68ccee80..aef2eef76ed6 100644 --- a/trie/verkle_test.go +++ b/trie/verkle_test.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/trie/utils" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" ) func TestReproduceTree(t *testing.T) { From f46632148cb43b9e7f99f039901a06c7ad1438be Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Sat, 20 Jan 2024 08:02:54 -0300 Subject: [PATCH 54/99] accesswitness: avoid charging cost for origin and target (#334) * accesswitness: avoid charging cost for origin and target Signed-off-by: Ignacio Hagopian * make the linter happy Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- core/state/access_witness.go | 44 +++++++++++++++++++++--------------- core/state_processor_test.go | 10 ++++---- 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/core/state/access_witness.go b/core/state/access_witness.go index 8b03cf371a60..65d7bb25a32c 100644 --- a/core/state/access_witness.go +++ b/core/state/access_witness.go @@ -139,27 +139,35 @@ func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte) uint } func (aw *AccessWitness) TouchTxOriginAndComputeGas(originAddr []byte) uint64 { - var gas uint64 - gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey) - return gas + // var gas uint64 + // gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey) + // gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey) + // gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) + // gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey) + // gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey) + + // Kaustinen note: we're currently experimenting with stop chargin gas for the origin address + // so simple transfer still take 21000 gas. This is to potentially avoid breaking existing tooling. + // This is the reason why we return 0 instead of `gas`. + return 0 } func (aw *AccessWitness) TouchTxExistingAndComputeGas(targetAddr []byte, sendsValue bool) uint64 { - var gas uint64 - gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.VersionLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeSizeLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.NonceLeafKey) - if sendsValue { - gas += aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) - } else { - gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) - } - return gas + // var gas uint64 + // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.VersionLeafKey) + // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeSizeLeafKey) + // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) + // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.NonceLeafKey) + // if sendsValue { + // gas += aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + // } else { + // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + // } + + // Kaustinen note: we're currently experimenting with stop chargin gas for the origin address + // so simple transfer still take 21000 gas. This is to potentially avoid breaking existing tooling. + // This is the reason why we return 0 instead of `gas`. + return 0 } func (aw *AccessWitness) TouchAddressOnWriteAndComputeGas(addr []byte, treeIndex uint256.Int, subIndex byte) uint64 { diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 8ed660a5042e..7bd63475ca9e 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -17,7 +17,6 @@ package core import ( - //"bytes" "crypto/ecdsa" //"fmt" @@ -481,10 +480,10 @@ func TestProcessVerkle(t *testing.T) { // is now independent of the blockchain database. gspec.MustCommit(gendb) - txCost1 := params.WitnessBranchWriteCost*2 + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*3 + params.WitnessChunkReadCost*10 + params.TxGas - txCost2 := params.WitnessBranchWriteCost + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*2 + params.WitnessChunkReadCost*10 + params.TxGas - contractCreationCost := intrinsicContractCreationGas + uint64(6900 /* from */ +7700 /* creation */ +2939 /* execution costs */) - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(6900 /* from */ +7000 /* creation */ +315944 /* execution costs */) + txCost1 := params.TxGas + txCost2 := params.TxGas + contractCreationCost := intrinsicContractCreationGas + uint64(7700 /* creation */ +2939 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(7000 /* creation */ +315944 /* execution costs */) blockGasUsagesExpected := []uint64{ txCost1*2 + txCost2, txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, @@ -492,6 +491,7 @@ func TestProcessVerkle(t *testing.T) { // TODO utiliser GenerateChainWithGenesis pour le rendre plus pratique chain, _, proofs, keyvals := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 2, func(i int, gen *BlockGen) { gen.SetPoS() + // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) gen.AddTx(tx) From 7757a53d477f4f9246ef348a81f00089625c00cc Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 26 Jan 2024 11:14:52 +0100 Subject: [PATCH 55/99] force activation of EIP-6780 (#338) * force activation of EIP-6780 * rework the PR in a more manageable way --- core/vm/interpreter.go | 2 +- core/vm/jump_table.go | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 1bc0e80dfc44..17b30fae1203 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -58,7 +58,7 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { switch { case evm.chainRules.IsPrague: // TODO replace with prooper instruction set when fork is specified - table = &shanghaiInstructionSet + table = &pragueInstructionSet case evm.chainRules.IsCancun: table = &cancunInstructionSet case evm.chainRules.IsShanghai: diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 0a881236f64e..5dcabe387d6f 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -57,6 +57,7 @@ var ( mergeInstructionSet = newMergeInstructionSet() shanghaiInstructionSet = newShanghaiInstructionSet() cancunInstructionSet = newCancunInstructionSet() + pragueInstructionSet = newPragueInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. @@ -80,6 +81,12 @@ func validate(jt JumpTable) JumpTable { return jt } +func newPragueInstructionSet() JumpTable { + instructionSet := newShanghaiInstructionSet() + enable6780(&instructionSet) + return validate(instructionSet) +} + func newCancunInstructionSet() JumpTable { instructionSet := newShanghaiInstructionSet() enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode) From a5d74825aa4aa4e5515e6f21acaed16a7a7ef6d2 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 26 Jan 2024 11:27:18 +0100 Subject: [PATCH 56/99] fix: return error if witness costs OOG in instructions (#319) --- core/vm/instructions.go | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 84edacb1b9bc..a7860f0fde82 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -347,7 +347,10 @@ func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) if interpreter.evm.chainRules.IsPrague { statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) - scope.Contract.UseGas(statelessGas) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } } slot.SetUint64(cs) return nil, nil @@ -374,7 +377,11 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ contractAddr := scope.Contract.Address() paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) if interpreter.evm.chainRules.IsPrague { - scope.Contract.UseGas(touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], copyOffset, nonPaddedCopyLength, uint64(len(scope.Contract.Code)), interpreter.evm.Accesses)) + statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], copyOffset, nonPaddedCopyLength, uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } } scope.Memory.Set(memOffset.Uint64(), uint64(len(paddedCodeCopy)), paddedCodeCopy) return nil, nil @@ -433,8 +440,11 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) self: AccountRef(addr), } paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) - gas := touchCodeChunksRangeOnReadAndChargeGas(addr[:], copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), interpreter.evm.Accesses) - scope.Contract.UseGas(gas) + statelessGas := touchCodeChunksRangeOnReadAndChargeGas(addr[:], copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), interpreter.evm.Accesses) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } scope.Memory.Set(memOffset.Uint64(), length.Uint64(), paddedCodeCopy) } else { codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) @@ -964,7 +974,10 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by // advanced past this boundary. contractAddr := scope.Contract.Address() statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], *pc+1, uint64(1), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) - scope.Contract.UseGas(statelessGas) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } } } else { scope.Stack.push(integer.Clear()) @@ -990,7 +1003,10 @@ func makePush(size uint64, pushByteSize int) executionFunc { if interpreter.evm.chainRules.IsPrague { contractAddr := scope.Contract.Address() statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], uint64(startMin), uint64(pushByteSize), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) - scope.Contract.UseGas(statelessGas) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } } integer := new(uint256.Int) From 8f2ffed7bb662d307f07314e0022d06c8df2f08b Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 26 Jan 2024 11:32:22 +0100 Subject: [PATCH 57/99] only add the coinbase to the witness if the block reward > 0 (#332) --- consensus/beacon/consensus.go | 5 ----- core/state_transition.go | 11 +++++++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index b09ece674804..ef483c8d328e 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -363,11 +363,6 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeKeccakLeafKey) state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeSizeLeafKey) } - state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.VersionLeafKey) - state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.BalanceLeafKey) - state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.NonceLeafKey) - state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.CodeKeccakLeafKey) - state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.CodeSizeLeafKey) } // FinalizeAndAssemble implements consensus.Engine, setting the final state and diff --git a/core/state_transition.go b/core/state_transition.go index 2bdfef1c0552..969e7a75fb9b 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -29,6 +29,8 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" ) // ExecutionResult includes all output after executing given evm @@ -476,6 +478,15 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { fee := new(big.Int).SetUint64(st.gasUsed()) fee.Mul(fee, effectiveTip) st.state.AddBalance(st.evm.Context.Coinbase, fee) + + // add the coinbase to the witness iff the fee is greater than 0 + if rules.IsPrague && fee.Sign() != 0 { + st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.VersionLeafKey) + st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.BalanceLeafKey) + st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.NonceLeafKey) + st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.CodeKeccakLeafKey) + st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.CodeSizeLeafKey) + } } return &ExecutionResult{ From 14d5a0177513059fd91c6476ee4a906fabea3b17 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 26 Jan 2024 12:58:05 +0100 Subject: [PATCH 58/99] change timeout to 20 minutes --- .github/workflows/go.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 1678d4f3e8b0..76d69a7108db 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -43,4 +43,4 @@ jobs: with: go-version: 1.21.1 - name: Test - run: go test ./... -timeout=10h + run: go test ./... -timeout=20m From 85a7198c2d122c9a865bffb9e6475645e1ab658b Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 26 Jan 2024 14:08:54 +0100 Subject: [PATCH 59/99] test: check witness when contract creation fails (#333) --- core/state_processor_test.go | 130 +++++++++++++++++++++++++++++++++++ trie/utils/verkle.go | 35 ++++------ 2 files changed, 142 insertions(+), 23 deletions(-) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 7bd63475ca9e..e06eca11b688 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -17,6 +17,7 @@ package core import ( + "bytes" "crypto/ecdsa" //"fmt" @@ -542,3 +543,132 @@ func TestProcessVerkle(t *testing.T) { } } } + +func TestProcessVerkleiInvalidContractCreation(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(69420), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") + account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account1: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account2: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 1, + }, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + genesis := gspec.MustCommit(bcdb) + + // Commit the genesis block to the block-generation database as it + // is now independent of the blockchain database. + gspec.MustCommit(gendb) + + // Create two blocks that reproduce what is happening on kaustinen. + // - The first block contains two failing contract creation transactions, that write to storage before they revert. + // - The second block contains a single failing contract creation transaction, that fails right off the bat. + _, _, _, statediff := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 2, func(i int, gen *BlockGen) { + gen.SetPoS() + + if i == 0 { + var tx1, tx2, tx3 types.Transaction + // SSTORE at slot 105 and reverts + tx1payload := common.Hex2Bytes("f8d48084479c2c18830186a08080b8806000602955bda3f9600060ca55600060695523b360006039551983576000601255b0620c2fde2c592ac2600060bc55e0ac6000606455a63e22600060e655eb607e605c5360a2605d5360c7605e53601d605f5360eb606053606b606153608e60625360816063536079606453601e60655360fc60665360b7606753608b60685383021e7ca0cc20c65a97d2e526b8ec0f4266e8b01bdcde43b9aeb59d8bfb44e8eb8119c109a07a8e751813ae1b2ce734960dbc39a4f954917d7822a2c5d1dca18b06c584131f") + if err := tx1.UnmarshalBinary(tx1payload); err != nil { + t.Fatal(err) + } + gen.AddTx(&tx1) + + // SSTORE at slot 133 and reverts + tx2payload := common.Hex2Bytes("02f8db83010f2c01843b9aca0084479c2c18830186a08080b88060006085553fad6000600a55600060565555600060b55506600060cf557f1b8b38183e7bd1bdfaa7123c5a4976e54cce0e42049d841411978fd3595e25c66019527f0538943712953cf08900aae40222a40b2d5a4ac8075ad8cf0870e2be307edbb96039527f9f3174ff85024747041ae7a611acffb987c513c088d90ab288aec080a0cd6ac65ce2cb0a912371f6b5a551ba8caffc22ec55ad4d3cb53de41d05eb77b6a02e0dfe8513dfa6ec7bfd7eda6f5c0dac21b39b982436045e128cec46cfd3f960") + if err := tx2.UnmarshalBinary(tx2payload); err != nil { + t.Fatal(err) + } + gen.AddTx(&tx2) + + // this one is a simple transfer that succeeds, necessary to get the correct nonce in the other block. + tx3payload := common.Hex2Bytes("f8e80184479c2c18830186a094bbbbde4ca27f83fc18aa108170547ff57675936a80b8807ff71f7c15faadb969a76a5f54a81a0117e1e743cb7f24e378eda28442ea4c6eb6604a527fb5409e5718d44e23bfffac926e5ea726067f772772e7e19446acba0c853f62f5606a526020608a536088608b536039608c536004608d5360af608e537f7f7675d9f210e0a61564e6d11e7cd75f5bc9009ac9f6b94a0fc63035441a83021e7ba04a4a172d81ebb02847829b76a387ac09749c8b65668083699abe20c887fb9efca07c5b1a990702ec7b31a5e8e3935cd9a77649f8c25a84131229e24ab61aec6093") + if err := tx3.UnmarshalBinary(tx3payload); err != nil { + t.Fatal(err) + } + gen.AddTx(&tx3) + } else { + var tx types.Transaction + // immediately reverts + txpayload := common.Hex2Bytes("01f8d683010f2c028443ad7d0e830186a08080b880b00e7fa3c849dce891cce5fae8a4c46cbb313d6aec0c0ffe7863e05fb7b22d4807674c6055527ffbfcb0938f3e18f7937aa8fa95d880afebd5c4cec0d85186095832d03c85cf8a60755260ab60955360cf6096536066609753606e60985360fa609953609e609a53608e609b536024609c5360f6609d536072609e5360a4609fc080a08fc6f7101f292ff1fb0de8ac69c2d320fbb23bfe61cf327173786ea5daee6e37a044c42d91838ef06646294bf4f9835588aee66243b16a66a2da37641fae4c045f") + if err := tx.UnmarshalBinary(txpayload); err != nil { + t.Fatal(err) + } + gen.AddTx(&tx) + } + }) + + // Check that values 0x29 and 0x05 are found in the storage (and that they lead + // to no update, since the contract creation code reverted) + for _, stemStateDiff := range statediff[0] { + // Check that the value 0x85, which is overflowing the account header, + // is present. + if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("a10042195481d30478251625e1ccef0e2174dc4e083e81d2566d880373f791")) { + for _, suffixDiff := range stemStateDiff.SuffixDiffs { + if suffixDiff.Suffix != 133 { + t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) + } + } + } else if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("b24fa84f214459af17d6e3f604811f252cac93146f02d67d7811bbcdfa448b")) { + for _, suffixDiff := range stemStateDiff.SuffixDiffs { + if suffixDiff.Suffix != 105 && suffixDiff.Suffix != 0 && suffixDiff.Suffix != 2 && suffixDiff.Suffix != 3 { + t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) + } + } + } else { + for _, suffixDiff := range stemStateDiff.SuffixDiffs { + if suffixDiff.Suffix > 4 { + t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) + } + } + } + } + + // Check that no account has a value above 4 in the 2nd block as no storage nor + // code should make it to the witness. + for _, stemStateDiff := range statediff[1] { + for _, suffixDiff := range stemStateDiff.SuffixDiffs { + if suffixDiff.Suffix > 4 { + t.Fatalf("invalid suffix diff found for %x in block #2: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) + } + } + } +} diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 16c707c13acb..3c581cee57d1 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -186,28 +186,6 @@ func GetTreeKeyCodeChunkWithEvaluatedAddress(addressPoint *verkle.Point, chunk * return GetTreeKeyWithEvaluatedAddess(addressPoint, treeIndex, subIndex) } -func GetTreeKeyStorageSlot(address []byte, storageKey *uint256.Int) []byte { - pos := storageKey.Clone() - if storageKey.Cmp(codeStorageDelta) < 0 { - pos.Add(HeaderStorageOffset, storageKey) - } else { - pos.Add(MainStorageOffset, storageKey) - } - treeIndex := new(uint256.Int).Div(pos, VerkleNodeWidth) - - // calculate the sub_index, i.e. the index in the stem tree. - // Because the modulus is 256, it's the last byte of treeIndex - subIndexMod := new(uint256.Int).Mod(pos, VerkleNodeWidth) - var subIndex byte - if len(subIndexMod) != 0 { - // uint256 is broken into 4 little-endian quads, - // each with native endianness. Extract the least - // significant byte. - subIndex = byte(subIndexMod[0]) - } - return GetTreeKey(address, treeIndex, subIndex) -} - func PointToHash(evaluated *verkle.Point, suffix byte) []byte { // The output of Byte() is big engian for banderwagon. This // introduces an imbalance in the tree, because hashes are @@ -289,12 +267,23 @@ func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) { } // If the storage slot is in the main storage, we need to add the main storage offset. + // The first MAIN_STORAGE_OFFSET group will see its + // first 64 slots unreachable. This is either a typo in the + // spec or intended to conserve the 256-u256 + // aligment. If we decide to ever access these 64 + // slots, uncomment this. + // // Get the new offset since we now know that we are above 64. + // pos.Sub(&pos, codeStorageDelta) + // suffix := byte(pos[0] & 0xFF) + suffix := storageKey[len(storageKey)-1] + // We first divide by VerkleNodeWidth to create room to avoid an overflow next. pos.Rsh(&pos, uint(VerkleNodeWidthLog2)) + // We add mainStorageOffset/VerkleNodeWidth which can't overflow. pos.Add(&pos, mainStorageOffsetLshVerkleNodeWidth) // The sub-index is the LSB of the original storage key, since mainStorageOffset // doesn't affect this byte, so we can avoid masks or shifts. - return &pos, storageKey[len(storageKey)-1] + return &pos, suffix } From ac3f26947dcc00d39afcabaa9171a09b899aa7d9 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 26 Jan 2024 17:41:31 +0100 Subject: [PATCH 60/99] implement eip 2935 (#342) * implement eip 2935 * add touched historical contract slot to the witness --- cmd/evm/internal/t8ntool/execution.go | 4 ++++ cmd/evm/internal/t8ntool/gen_stenv.go | 6 ++++++ core/state_processor.go | 9 +++++++++ core/vm/instructions.go | 29 +++++++++++++++++++++++++++ miner/worker.go | 3 +++ params/protocol_params.go | 9 ++++++++- 6 files changed, 59 insertions(+), 1 deletion(-) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index c408623fe32f..8c6725cb316b 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -88,6 +88,7 @@ type stEnv struct { ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"` ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` + ParentHash *common.Hash `json:"parentHash,omitempty"` } type stEnvMarshaling struct { @@ -182,6 +183,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { misc.ApplyDAOHardFork(statedb) } + if chainConfig.IsPrague(big.NewInt(int64(pre.Env.Number)), pre.Env.Timestamp) { + core.ProcessParentBlockHash(statedb, pre.Env.Number-1, *pre.Env.ParentHash) + } var blobGasUsed uint64 for i, tx := range txs { if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil { diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index f50fee5e31c3..5436407f07bb 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -36,6 +36,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) { ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` + ParentHash *common.Hash `json:"parentHash,omitempty"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) @@ -57,6 +58,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) { enc.ExcessBlobGas = (*math.HexOrDecimal64)(s.ExcessBlobGas) enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas) enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed) + enc.ParentHash = s.ParentHash return json.Marshal(&enc) } @@ -82,6 +84,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` + ParentHash *common.Hash `json:"parentHash,omitempty"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -148,5 +151,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.ParentBlobGasUsed != nil { s.ParentBlobGasUsed = (*uint64)(dec.ParentBlobGasUsed) } + if dec.ParentHash != nil { + s.ParentHash = dec.ParentHash + } return nil } diff --git a/core/state_processor.go b/core/state_processor.go index 84c2f6429a30..ed55c478843c 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -85,6 +85,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vmenv = vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg) signer = types.MakeSigner(p.config, header.Number, header.Time) ) + if p.config.IsPrague(block.Number(), block.Time()) { + ProcessParentBlockHash(statedb, block.NumberU64()-1, block.ParentHash()) + } // Iterate over and process the individual transactions for i, tx := range block.Transactions() { msg, err := TransactionToMessage(tx, signer, header.BaseFee) @@ -359,3 +362,9 @@ func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) er return nil } + +func ProcessParentBlockHash(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash) { + var key common.Hash + binary.BigEndian.PutUint64(key[24:], prevNumber) + statedb.SetState(params.HistoryStorageAddress, key, prevHash) +} diff --git a/core/vm/instructions.go b/core/vm/instructions.go index a7860f0fde82..00787b3d030f 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -17,6 +17,8 @@ package vm import ( + "encoding/binary" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/state" @@ -497,6 +499,13 @@ func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ return nil, nil } +func getBlockHashFromContract(number uint64, statedb StateDB, witness *state.AccessWitness) common.Hash { + var pnum common.Hash + binary.BigEndian.PutUint64(pnum[24:], number) + witness.TouchAddressOnReadAndComputeGas(params.HistoryStorageAddress[:], *uint256.NewInt(number / 256), byte(number&0xFF)) + return statedb.GetState(params.HistoryStorageAddress, pnum) +} + func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { num := scope.Stack.peek() num64, overflow := num.Uint64WithOverflow() @@ -504,6 +513,26 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( num.Clear() return nil, nil } + + evm := interpreter.evm + bnum := evm.Context.BlockNumber.Uint64() + // if Prague is active, check if we are past the 256th block so that + // reading from the contract can be activated (EIP 2935). + if evm.chainRules.IsPrague && bnum > 256 { + if getBlockHashFromContract(bnum-256, evm.StateDB, evm.Accesses) != (common.Hash{}) { + // EIP-2935 case: get the block number from the fork, as we are 256 blocks + // after the fork activation. + + num.SetBytes(getBlockHashFromContract(num64, evm.StateDB, evm.Accesses).Bytes()) + return nil, nil + } + + // if the 256th ancestor didn't have its hash stored in the + // history contract, then we are within 256 blocks of the + // fork activation, and the former behavior should be retained. + // Fall through the legacy use case. + } + var upper, lower uint64 upper = interpreter.evm.Context.BlockNumber.Uint64() if upper < 257 { diff --git a/miner/worker.go b/miner/worker.go index 124c93212262..ef8d6087b7cd 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -920,6 +920,9 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { log.Error("Failed to create sealing context", "err", err) return nil, err } + if w.chainConfig.IsPrague(header.Number, header.Time) { + core.ProcessParentBlockHash(env.state, header.Number.Uint64()-1, header.ParentHash) + } return env, nil } diff --git a/params/protocol_params.go b/params/protocol_params.go index a407ed147329..6d91ee48a85f 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -16,7 +16,11 @@ package params -import "math/big" +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) const ( GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. @@ -179,4 +183,7 @@ var ( GenesisDifficulty = big.NewInt(131072) // Difficulty of the Genesis block. MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be. DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not. + + // HistoryStorageAddress is where the historical block hashes are stored. + HistoryStorageAddress common.Address = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe") ) From 7d02a604d7dd74e1561a8870b379d022891b22dc Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 29 Jan 2024 09:52:45 +0100 Subject: [PATCH 61/99] fix: support a verkle pre-tree at the conversion block (#313) * fix: support a verkle pre-tree at the conversion block * make linter happy --- consensus/beacon/consensus.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index ef483c8d328e..35a7ed2b56d0 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -418,7 +418,14 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea vtrpost = post.Overlay() okpost = true default: - panic("invalid tree type") + // This should only happen for the first block of the + // conversion, when the previous tree is a merkle tree. + // Logically, the "previous" verkle tree is an empty tree. + okpre = true + vtrpre = trie.NewVerkleTrie(verkle.New(), state.Database().TrieDB(), utils.NewPointCache(), false) + post := state.GetTrie().(*trie.TransitionTrie) + vtrpost = post.Overlay() + okpost = true } if okpre && okpost { if len(keys) > 0 { From bea12cc674d93212f1590ea72975d8b9030541a0 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 29 Jan 2024 11:04:26 +0100 Subject: [PATCH 62/99] fix: decide if genesis is verkle using genesis timestamp (#290) * fix: decide if genesis is verkle using genesis timestamp * fix linter message --- cmd/geth/chaincmd.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 55c22f7322f3..1d82218b3079 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "math/big" "os" "runtime" "strconv" @@ -214,7 +215,7 @@ func initGenesis(ctx *cli.Context) error { } triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{ Preimages: ctx.Bool(utils.CachePreimagesFlag.Name), - Verkle: true, + Verkle: genesis.Config.IsPrague(big.NewInt(0), genesis.Timestamp), }) _, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides) if err != nil { From 95d7f39db53f0fd20df49f904d66d93ef4da009d Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 29 Jan 2024 13:16:59 +0100 Subject: [PATCH 63/99] set verkle mode from genesis with override (#308) * set verkle mode from genesis with override * make linter happy * fix more linter snafu --- cmd/geth/chaincmd.go | 3 +-- core/genesis.go | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 1d82218b3079..2179b61032b6 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -20,7 +20,6 @@ import ( "encoding/json" "errors" "fmt" - "math/big" "os" "runtime" "strconv" @@ -215,7 +214,7 @@ func initGenesis(ctx *cli.Context) error { } triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{ Preimages: ctx.Bool(utils.CachePreimagesFlag.Name), - Verkle: genesis.Config.IsPrague(big.NewInt(0), genesis.Timestamp), + Verkle: genesis.IsVerkle(), }) _, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides) if err != nil { diff --git a/core/genesis.go b/core/genesis.go index 74294afb7272..e6874039f2e2 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -456,6 +456,12 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { } } +// IsVerkle indicates whether the state is already stored in a verkle +// tree at genesis time. +func (g *Genesis) IsVerkle() bool { + return g.Config.IsPrague(new(big.Int).SetUint64(g.Number), g.Timestamp) +} + // ToBlock returns the genesis block according to genesis specification. func (g *Genesis) ToBlock() *types.Block { root, err := g.Alloc.deriveHash(g.Config, g.Timestamp) From b7bd9e33ad0c9d8c7051bce393081cc627104d74 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 29 Jan 2024 13:47:11 +0100 Subject: [PATCH 64/99] fix: test for ExtStatus count when two missing stems collide (#347) --- trie/verkle_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trie/verkle_test.go b/trie/verkle_test.go index aef2eef76ed6..9f30c6bde858 100644 --- a/trie/verkle_test.go +++ b/trie/verkle_test.go @@ -313,8 +313,8 @@ func TestReproduceCondrieuStemAggregationInProofOfAbsence(t *testing.T) { t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.Commitment().Bytes()) t.Logf("%d", len(proof.ExtStatus)) - if len(proof.ExtStatus) != 5 { - t.Fatalf("invalid number of declared stems: %d != 5", len(proof.ExtStatus)) + if len(proof.ExtStatus) != 6 { + t.Fatalf("invalid number of declared stems: %d != 6", len(proof.ExtStatus)) } } From eb31ae3f9ec87037cb6c531ef2bb9d612fe9bc66 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Mon, 29 Jan 2024 13:33:34 -0300 Subject: [PATCH 65/99] accesswitness: touch for origin and to (#348) Signed-off-by: Ignacio Hagopian --- core/state/access_witness.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/core/state/access_witness.go b/core/state/access_witness.go index 65d7bb25a32c..7eb0990b645c 100644 --- a/core/state/access_witness.go +++ b/core/state/access_witness.go @@ -139,34 +139,34 @@ func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte) uint } func (aw *AccessWitness) TouchTxOriginAndComputeGas(originAddr []byte) uint64 { - // var gas uint64 - // gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey) - // gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey) - // gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) - // gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey) - // gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey) + aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey) + aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey) + aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) + aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey) + aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey) // Kaustinen note: we're currently experimenting with stop chargin gas for the origin address // so simple transfer still take 21000 gas. This is to potentially avoid breaking existing tooling. // This is the reason why we return 0 instead of `gas`. + // Note that we still have to touch the addresses to make sure the witness is correct. return 0 } func (aw *AccessWitness) TouchTxExistingAndComputeGas(targetAddr []byte, sendsValue bool) uint64 { - // var gas uint64 - // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.VersionLeafKey) - // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeSizeLeafKey) - // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) - // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.NonceLeafKey) - // if sendsValue { - // gas += aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) - // } else { - // gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) - // } + aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.VersionLeafKey) + aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeSizeLeafKey) + aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) + aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.NonceLeafKey) + if sendsValue { + aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + } else { + aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + } // Kaustinen note: we're currently experimenting with stop chargin gas for the origin address // so simple transfer still take 21000 gas. This is to potentially avoid breaking existing tooling. // This is the reason why we return 0 instead of `gas`. + // Note that we still have to touch the addresses to make sure the witness is correct. return 0 } From c1e0ff10ecfdfe6cb357219136e45285da8fad53 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 29 Jan 2024 21:54:57 +0100 Subject: [PATCH 66/99] fix: only disable HasLegacyNode call for verkle-at-genesis testnets (#350) --- core/genesis.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index e6874039f2e2..19a84df8182c 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -341,23 +341,23 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen } // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. - // header := rawdb.ReadHeader(db, stored, 0) - // if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { - // if genesis == nil { - // genesis = DefaultGenesisBlock() - // } - // // Ensure the stored genesis matches with the given one. - // hash := genesis.ToBlock().Hash() - // if hash != stored { - // return genesis.Config, hash, &GenesisMismatchError{stored, hash} - // } - // block, err := genesis.Commit(db, triedb) - // if err != nil { - // return genesis.Config, hash, err - // } - // applyOverrides(genesis.Config) - // return genesis.Config, block.Hash(), nil - // } + header := rawdb.ReadHeader(db, stored, 0) + if !genesis.Config.IsPrague(big.NewInt(0), genesis.Timestamp) && header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { + if genesis == nil { + genesis = DefaultGenesisBlock() + } + // Ensure the stored genesis matches with the given one. + hash := genesis.ToBlock().Hash() + if hash != stored { + return genesis.Config, hash, &GenesisMismatchError{stored, hash} + } + block, err := genesis.Commit(db, triedb) + if err != nil { + return genesis.Config, hash, err + } + applyOverrides(genesis.Config) + return genesis.Config, block.Hash(), nil + } // Check whether the genesis block is already written. if genesis != nil { applyOverrides(genesis.Config) From 4c4de3102a4ee3d8359a8b598499d0e6ba71a8ac Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 29 Jan 2024 21:56:08 +0100 Subject: [PATCH 67/99] fix: eip2935 block hash storage in chain makers (#351) --- core/chain_makers.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/chain_makers.go b/core/chain_makers.go index d43e9126a2b6..0b262cba16fc 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -385,6 +385,10 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine preState := statedb.Copy() fmt.Println("prestate", preState.GetTrie().(*trie.VerkleTrie).ToDot()) + if config.IsPrague(b.header.Number, b.header.Time) { + ProcessParentBlockHash(statedb, b.header.Number.Uint64()-1, b.header.ParentHash) + } + // Mutate the state and block according to any hard-fork specs if daoBlock := config.DAOForkBlock; daoBlock != nil { limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) From b7a19b4a4dc9b4fe85010ecfdc3546a8c0b0d0e8 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Tue, 30 Jan 2024 06:53:43 -0300 Subject: [PATCH 68/99] trie/utils: bound point cache size (#352) Signed-off-by: Ignacio Hagopian --- trie/utils/verkle.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 3c581cee57d1..65f4c1fa2ab0 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -18,9 +18,9 @@ package utils import ( "encoding/binary" - "sync" "github.com/crate-crypto/go-ipa/bandersnatch/fr" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) @@ -31,6 +31,8 @@ const ( NonceLeafKey = 2 CodeKeccakLeafKey = 3 CodeSizeLeafKey = 4 + + maxPointCacheByteSize = 100 << 20 ) var ( @@ -47,28 +49,26 @@ var ( ) type PointCache struct { - cache map[string]*verkle.Point - lock sync.RWMutex + cache *lru.Cache[string, *verkle.Point] } func NewPointCache() *PointCache { + // Each verkle.Point is 96 bytes. + verklePointSize := 96 + capacity := maxPointCacheByteSize / verklePointSize return &PointCache{ - cache: make(map[string]*verkle.Point), + cache: lru.NewCache[string, *verkle.Point](capacity), } } func (pc *PointCache) GetTreeKeyHeader(addr []byte) *verkle.Point { - pc.lock.RLock() - point, ok := pc.cache[string(addr)] - pc.lock.RUnlock() + point, ok := pc.cache.Get(string(addr)) if ok { return point } point = EvaluateAddressPoint(addr) - pc.lock.Lock() - pc.cache[string(addr)] = point - pc.lock.Unlock() + pc.cache.Add(string(addr), point) return point } From c3b9b375cc1b08f42809638897145d93c1f6bbcd Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 31 Jan 2024 11:15:43 +0100 Subject: [PATCH 69/99] fix: nil pointer when deactivating verkle-incompatible genesis code (#353) --- core/genesis.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/genesis.go b/core/genesis.go index 19a84df8182c..1262d350f26c 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -342,7 +342,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) - if !genesis.Config.IsPrague(big.NewInt(0), genesis.Timestamp) && header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { + notverkle := genesis == nil || genesis.Config == nil || genesis.Config.PragueTime == nil || !genesis.Config.IsPrague(big.NewInt(0), genesis.Timestamp) + if notverkle && header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { if genesis == nil { genesis = DefaultGenesisBlock() } From 9be95a24f44cc34198bea37c06a93d8f8e276cbe Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 1 Feb 2024 07:25:32 +0100 Subject: [PATCH 70/99] another workaround to force verkle at creation --- core/blockchain.go | 1 + core/genesis.go | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index e2274c03ce11..797d31388476 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -242,6 +242,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis triedb := trie.NewDatabaseWithConfig(db, &trie.Config{ Cache: cacheConfig.TrieCleanLimit, Preimages: cacheConfig.Preimages, + Verkle: true, }) // Setup the genesis block, commit the provided genesis specification // to database if the genesis block is not present yet, or load the diff --git a/core/genesis.go b/core/genesis.go index 1262d350f26c..c8a4bc5952d9 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -342,8 +342,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) - notverkle := genesis == nil || genesis.Config == nil || genesis.Config.PragueTime == nil || !genesis.Config.IsPrague(big.NewInt(0), genesis.Timestamp) - if notverkle && header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { + if !triedb.IsVerkle() && header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { if genesis == nil { genesis = DefaultGenesisBlock() } From cc48d57e1520b640f68b9dfef5e25078c0229613 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 2 Feb 2024 07:12:52 +0100 Subject: [PATCH 71/99] refactor: move trieDB creation outside of block-making loop (#354) --- core/chain_makers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/chain_makers.go b/core/chain_makers.go index 0b262cba16fc..37a5fbf5100f 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -429,9 +429,9 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine return nil, nil } var snaps *snapshot.Tree + triedb := state.NewDatabaseWithConfig(db, nil) + triedb.EndVerkleTransition() for i := 0; i < n; i++ { - triedb := state.NewDatabaseWithConfig(db, nil) - triedb.EndVerkleTransition() statedb, err := state.New(parent.Root(), triedb, snaps) if err != nil { panic(fmt.Sprintf("could not find state for block %d: err=%v, parent root=%x", i, err, parent.Root())) From afcfa5af0729281707ca734c677f169bdcb8e94e Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Fri, 2 Feb 2024 07:56:03 +0100 Subject: [PATCH 72/99] Add eip2935 256th ancestor stem to witness (#355) --- core/state_processor.go | 3 +++ core/state_processor_test.go | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/core/state_processor.go b/core/state_processor.go index ed55c478843c..b22c45469e6f 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" tutils "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-verkle" "github.com/holiman/uint256" @@ -367,4 +368,6 @@ func ProcessParentBlockHash(statedb *state.StateDB, prevNumber uint64, prevHash var key common.Hash binary.BigEndian.PutUint64(key[24:], prevNumber) statedb.SetState(params.HistoryStorageAddress, key, prevHash) + index, suffix := utils.GetTreeKeyStorageSlotTreeIndexes(key[:]) + statedb.Witness().TouchAddressOnWriteAndComputeGas(params.HistoryStorageAddress[:], *index, suffix) } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index e06eca11b688..b40e1f31f5ac 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -653,6 +653,14 @@ func TestProcessVerkleiInvalidContractCreation(t *testing.T) { t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) } } + } else if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("97f2911f5efe08b74c28727d004e36d260225e73525fe2a300c8f58c7ffd76")) { + // BLOCKHASH contract stem + if len(stemStateDiff.SuffixDiffs) > 1 { + t.Fatalf("invalid suffix diff count found for BLOCKHASH contract: %d != 1", len(stemStateDiff.SuffixDiffs)) + } + if stemStateDiff.SuffixDiffs[0].Suffix != 64 { + t.Fatalf("invalid suffix diff value found for BLOCKHASH contract: %d != 64", stemStateDiff.SuffixDiffs[0].Suffix) + } } else { for _, suffixDiff := range stemStateDiff.SuffixDiffs { if suffixDiff.Suffix > 4 { @@ -666,7 +674,15 @@ func TestProcessVerkleiInvalidContractCreation(t *testing.T) { // code should make it to the witness. for _, stemStateDiff := range statediff[1] { for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if suffixDiff.Suffix > 4 { + if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("97f2911f5efe08b74c28727d004e36d260225e73525fe2a300c8f58c7ffd76")) { + // BLOCKHASH contract stem + if len(stemStateDiff.SuffixDiffs) > 1 { + t.Fatalf("invalid suffix diff count found for BLOCKHASH contract at block #2: %d != 1", len(stemStateDiff.SuffixDiffs)) + } + if stemStateDiff.SuffixDiffs[0].Suffix != 65 { + t.Fatalf("invalid suffix diff value found for BLOCKHASH contract at block #2: %d != 65", stemStateDiff.SuffixDiffs[0].Suffix) + } + } else if suffixDiff.Suffix > 4 { t.Fatalf("invalid suffix diff found for %x in block #2: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) } } From 953338b22bcd204d3875eeaf19731002ef6710ff Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Sat, 3 Feb 2024 05:14:05 -0300 Subject: [PATCH 73/99] state: avoid EIP-158 for history storage address (#359) Signed-off-by: Ignacio Hagopian --- core/state/statedb.go | 2 +- core/state_processor_test.go | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index ea1e18613870..1e7c892c2d8a 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -937,7 +937,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // Thus, we can safely ignore it here continue } - if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { + if (obj.selfDestructed || (deleteEmptyObjects && obj.empty())) && addr != params.HistoryStorageAddress { obj.deleted = true // We need to maintain account deletions explicitly (will remain diff --git a/core/state_processor_test.go b/core/state_processor_test.go index b40e1f31f5ac..e152338ac6e3 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -544,7 +544,7 @@ func TestProcessVerkle(t *testing.T) { } } -func TestProcessVerkleiInvalidContractCreation(t *testing.T) { +func TestProcessVerkleInvalidContractCreation(t *testing.T) { var ( config = ¶ms.ChainConfig{ ChainID: big.NewInt(69420), @@ -682,6 +682,12 @@ func TestProcessVerkleiInvalidContractCreation(t *testing.T) { if stemStateDiff.SuffixDiffs[0].Suffix != 65 { t.Fatalf("invalid suffix diff value found for BLOCKHASH contract at block #2: %d != 65", stemStateDiff.SuffixDiffs[0].Suffix) } + if stemStateDiff.SuffixDiffs[0].NewValue == nil { + t.Fatalf("missing post state value for BLOCKHASH contract at block #2") + } + if *stemStateDiff.SuffixDiffs[0].NewValue != common.HexToHash("53abcdfb284720ea59efe923d3dc774bbb7e787d829599f8ec7a81d344dd3d17") { + t.Fatalf("invalid post state value for BLOCKHASH contract at block #2: %x != ", (*stemStateDiff.SuffixDiffs[0].NewValue)[:]) + } } else if suffixDiff.Suffix > 4 { t.Fatalf("invalid suffix diff found for %x in block #2: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) } From 7a0eb0b93b01aa6f08631d275b59e7d73e28e812 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Sat, 3 Feb 2024 09:16:29 +0100 Subject: [PATCH 74/99] test: check current and new values in state processor witness --- core/state_processor_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index e152338ac6e3..7317bb01217f 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -661,6 +661,13 @@ func TestProcessVerkleInvalidContractCreation(t *testing.T) { if stemStateDiff.SuffixDiffs[0].Suffix != 64 { t.Fatalf("invalid suffix diff value found for BLOCKHASH contract: %d != 64", stemStateDiff.SuffixDiffs[0].Suffix) } + // check that the "current value" is nil and that the new value isn't. + if stemStateDiff.SuffixDiffs[0].CurrentValue != nil { + t.Fatalf("non-nil current value in BLOCKHASH contract insert: %x", stemStateDiff.SuffixDiffs[0].CurrentValue) + } + if stemStateDiff.SuffixDiffs[0].NewValue == nil { + t.Fatalf("nil new value in BLOCKHASH contract insert") + } } else { for _, suffixDiff := range stemStateDiff.SuffixDiffs { if suffixDiff.Suffix > 4 { From 61ffef79613cda6cecfd4a9af2b5eed28f4467da Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 5 Feb 2024 20:52:31 +0100 Subject: [PATCH 75/99] fix: CREATE-time PUSHn adds non-existent entries to witness (#361) * fix: CREATE-time PUSHn adds non-existent entries to witness * this also affects CODECOPY * fix: add code returned by CREATE* to the witness * fix gas costs --- core/state_processor_test.go | 95 +++++++++++++++++++++++++++++++++++- core/vm/evm.go | 3 ++ core/vm/instructions.go | 6 +-- 3 files changed, 100 insertions(+), 4 deletions(-) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 7317bb01217f..f2e5c2769c99 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -484,7 +484,7 @@ func TestProcessVerkle(t *testing.T) { txCost1 := params.TxGas txCost2 := params.TxGas contractCreationCost := intrinsicContractCreationGas + uint64(7700 /* creation */ +2939 /* execution costs */) - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(7000 /* creation */ +315944 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(7000 /* creation */ +299744 /* execution costs */) blockGasUsagesExpected := []uint64{ txCost1*2 + txCost2, txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, @@ -701,3 +701,96 @@ func TestProcessVerkleInvalidContractCreation(t *testing.T) { } } } + +func TestProcessVerkleContractWithEmptyCode(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(69421), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") + account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account1: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account2: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 3, + }, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + genesis := gspec.MustCommit(bcdb) + + // Commit the genesis block to the block-generation database as it + // is now independent of the blockchain database. + gspec.MustCommit(gendb) + + _, _, _, statediff := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 1, func(i int, gen *BlockGen) { + gen.SetPoS() + var tx types.Transaction + // a transaction that does some PUSH1n but returns a 0-sized contract + txpayload := common.Hex2Bytes("02f8db83010f2d03843b9aca008444cf6a05830186a08080b8807fdfbbb59f2371a76485ce557fd0de00c298d3ede52a3eab56d35af674eb49ec5860335260826053536001605453604c60555360f3605653606060575360446058536096605953600c605a5360df605b5360f3605c5360fb605d53600c605e53609a605f53607f60605360fe606153603d60625360f4606353604b60645360cac001a0486b6dc55b8a311568b7239a2cae1d77e7446dba71df61eaafd53f73820a138fa010bd48a45e56133ac4c5645142c2ea48950d40eb35050e9510b6bad9e15c5865") + if err := tx.UnmarshalBinary(txpayload); err != nil { + t.Fatal(err) + } + gen.AddTx(&tx) + }) + + for _, stemStateDiff := range statediff[0] { + if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("97f2911f5efe08b74c28727d004e36d260225e73525fe2a300c8f58c7ffd76")) { + // BLOCKHASH contract stem + if len(stemStateDiff.SuffixDiffs) > 1 { + t.Fatalf("invalid suffix diff count found for BLOCKHASH contract: %d != 1", len(stemStateDiff.SuffixDiffs)) + } + if stemStateDiff.SuffixDiffs[0].Suffix != 64 { + t.Fatalf("invalid suffix diff value found for BLOCKHASH contract: %d != 64", stemStateDiff.SuffixDiffs[0].Suffix) + } + // check that the "current value" is nil and that the new value isn't. + if stemStateDiff.SuffixDiffs[0].CurrentValue != nil { + t.Fatalf("non-nil current value in BLOCKHASH contract insert: %x", stemStateDiff.SuffixDiffs[0].CurrentValue) + } + if stemStateDiff.SuffixDiffs[0].NewValue == nil { + t.Fatalf("nil new value in BLOCKHASH contract insert") + } + } else { + for _, suffixDiff := range stemStateDiff.SuffixDiffs { + if suffixDiff.Suffix > 4 { + // if d8898012c484fb48610ecb7963886339207dab004bce968b007b616ffa18e0 shows up, it means that the PUSHn + // in the transaction above added entries into the witness, when they should not have since they are + // part of a contract deployment. + t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) + } + } + } + } +} diff --git a/core/vm/evm.go b/core/vm/evm.go index 7ad6c13c5daf..bcd5248bb72c 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -530,6 +530,9 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } if err == nil && evm.chainRules.IsPrague { + if len(ret) > 0 { + touchCodeChunksRangeOnReadAndChargeGas(address.Bytes(), 0, uint64(len(ret)), uint64(len(ret)), evm.Accesses) + } if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:])) { evm.StateDB.RevertToSnapshot(snapshot) err = ErrOutOfGas diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 00787b3d030f..8b638a4bbad9 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -378,7 +378,7 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ contractAddr := scope.Contract.Address() paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) - if interpreter.evm.chainRules.IsPrague { + if interpreter.evm.chainRules.IsPrague && !scope.Contract.IsDeployment { statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], copyOffset, nonPaddedCopyLength, uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 @@ -998,7 +998,7 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by if *pc < codeLen { scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) - if interpreter.evm.chainRules.IsPrague && *pc%31 == 0 { + if !scope.Contract.IsDeployment && interpreter.evm.chainRules.IsPrague && *pc%31 == 0 { // touch next chunk if PUSH1 is at the boundary. if so, *pc has // advanced past this boundary. contractAddr := scope.Contract.Address() @@ -1029,7 +1029,7 @@ func makePush(size uint64, pushByteSize int) executionFunc { endMin = startMin + pushByteSize } - if interpreter.evm.chainRules.IsPrague { + if !scope.Contract.IsDeployment && interpreter.evm.chainRules.IsPrague { contractAddr := scope.Contract.Address() statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], uint64(startMin), uint64(pushByteSize), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) if !scope.Contract.UseGas(statelessGas) { From 2e671ea446f5d56d4b52958867157aad62c473b3 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 6 Feb 2024 09:52:24 +0100 Subject: [PATCH 76/99] eip2935 redesign: insert all 256 ancestors upon transition (#362) * eip2935 redesign: only read target block hash * review fixes * fix: t8n tool --- cmd/evm/internal/t8ntool/execution.go | 5 ++++- core/chain_makers.go | 7 ++++++- core/state_processor.go | 15 ++++++++++++++- core/vm/instructions.go | 20 ++++---------------- miner/worker.go | 7 ++++++- 5 files changed, 34 insertions(+), 20 deletions(-) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 8c6725cb316b..035b367d34e6 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -184,7 +184,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, misc.ApplyDAOHardFork(statedb) } if chainConfig.IsPrague(big.NewInt(int64(pre.Env.Number)), pre.Env.Timestamp) { - core.ProcessParentBlockHash(statedb, pre.Env.Number-1, *pre.Env.ParentHash) + // insert all parent hashes in the contract + for i := pre.Env.Number - 1; i > 0 && i >= pre.Env.Number-257; i-- { + core.ProcessParentBlockHash(statedb, i, pre.Env.BlockHashes[math.HexOrDecimal64(i)]) + } } var blobGasUsed uint64 for i, tx := range txs { diff --git a/core/chain_makers.go b/core/chain_makers.go index 37a5fbf5100f..5b9dc0c6ff08 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -386,7 +386,12 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine fmt.Println("prestate", preState.GetTrie().(*trie.VerkleTrie).ToDot()) if config.IsPrague(b.header.Number, b.header.Time) { - ProcessParentBlockHash(statedb, b.header.Number.Uint64()-1, b.header.ParentHash) + if !config.IsPrague(b.parent.Number(), b.parent.Time()) { + // Transition case: insert all 256 ancestors + InsertBlockHashHistoryAtEip2935Fork(statedb, b.header.Number.Uint64()-1, b.header.ParentHash, chainreader) + } else { + ProcessParentBlockHash(statedb, b.header.Number.Uint64()-1, b.header.ParentHash) + } } // Mutate the state and block according to any hard-fork specs diff --git a/core/state_processor.go b/core/state_processor.go index b22c45469e6f..cdb4e4af19be 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -87,7 +87,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg signer = types.MakeSigner(p.config, header.Number, header.Time) ) if p.config.IsPrague(block.Number(), block.Time()) { - ProcessParentBlockHash(statedb, block.NumberU64()-1, block.ParentHash()) + parent := p.bc.GetBlockByHash(block.ParentHash()) + if !p.config.IsPrague(parent.Number(), parent.Time()) { + InsertBlockHashHistoryAtEip2935Fork(statedb, block.NumberU64()-1, block.ParentHash(), p.bc) + } else { + ProcessParentBlockHash(statedb, block.NumberU64()-1, block.ParentHash()) + } } // Iterate over and process the individual transactions for i, tx := range block.Transactions() { @@ -364,6 +369,14 @@ func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) er return nil } +func InsertBlockHashHistoryAtEip2935Fork(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash, chain consensus.ChainHeaderReader) { + ancestor := chain.GetHeader(prevHash, prevNumber) + for i := prevNumber; i > 0 && i >= prevNumber-256; i-- { + ProcessParentBlockHash(statedb, i, ancestor.Hash()) + ancestor = chain.GetHeader(ancestor.ParentHash, ancestor.Number.Uint64()-1) + } +} + func ProcessParentBlockHash(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash) { var key common.Hash binary.BigEndian.PutUint64(key[24:], prevNumber) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 8b638a4bbad9..7fb340da7b24 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -515,22 +515,10 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( } evm := interpreter.evm - bnum := evm.Context.BlockNumber.Uint64() - // if Prague is active, check if we are past the 256th block so that - // reading from the contract can be activated (EIP 2935). - if evm.chainRules.IsPrague && bnum > 256 { - if getBlockHashFromContract(bnum-256, evm.StateDB, evm.Accesses) != (common.Hash{}) { - // EIP-2935 case: get the block number from the fork, as we are 256 blocks - // after the fork activation. - - num.SetBytes(getBlockHashFromContract(num64, evm.StateDB, evm.Accesses).Bytes()) - return nil, nil - } - - // if the 256th ancestor didn't have its hash stored in the - // history contract, then we are within 256 blocks of the - // fork activation, and the former behavior should be retained. - // Fall through the legacy use case. + // if Prague is active, read it from the history contract (EIP 2935). + if evm.chainRules.IsPrague { + num.SetBytes(getBlockHashFromContract(num64, evm.StateDB, evm.Accesses).Bytes()) + return nil, nil } var upper, lower uint64 diff --git a/miner/worker.go b/miner/worker.go index ef8d6087b7cd..aae4fe8b6454 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -921,7 +921,12 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { return nil, err } if w.chainConfig.IsPrague(header.Number, header.Time) { - core.ProcessParentBlockHash(env.state, header.Number.Uint64()-1, header.ParentHash) + parent := w.chain.GetHeaderByNumber(header.Number.Uint64() - 1) + if !w.chain.Config().IsPrague(parent.Number, parent.Time) { + core.InsertBlockHashHistoryAtEip2935Fork(env.state, header.Number.Uint64()-1, header.ParentHash, w.chain) + } else { + core.ProcessParentBlockHash(env.state, header.Number.Uint64()-1, header.ParentHash) + } } return env, nil } From 077ad8728fccefe8c582da6877739203a83b8e08 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Thu, 15 Feb 2024 09:46:09 -0300 Subject: [PATCH 77/99] Record witness access in EXTCODEHASH (#370) * instructions: add access witness recording for EXTCODEHASH * add test for EXTCODEHASH witness recording * add test for access witness EXTCODEHASH Signed-off-by: Ignacio Hagopian * do not touch version Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- core/state_processor_test.go | 133 +++++++++++++++++++++++++++++++++++ core/vm/instructions.go | 7 ++ 2 files changed, 140 insertions(+) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index f2e5c2769c99..1756daaf8c66 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -37,6 +37,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" //"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" @@ -794,3 +795,135 @@ func TestProcessVerkleContractWithEmptyCode(t *testing.T) { } } } + +func TestProcessVerklExtCodeHashOpcode(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(69421), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") + account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account1: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account2: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 3, + }, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + genesis := gspec.MustCommit(bcdb) + + // Commit the genesis block to the block-generation database as it + // is now independent of the blockchain database. + gspec.MustCommit(gendb) + + dummyContract := []byte{ + 0x60, 2, // PUSH1 2 + 0x60, 12, // PUSH1 12 + 0x60, 0x00, // PUSH1 0 + 0x39, // CODECOPY + + 0x60, 2, // PUSH1 2 + 0x60, 0x00, // PUSH1 0 + 0xF3, // RETURN + + // Contract that auto-calls EXTCODEHASH + 0x60, 42, // PUSH1 42 + } + dummyContractAddr := common.HexToAddress("3a220f351252089d385b29beca14e27f204c296a") + extCodeHashContract := []byte{ + 0x60, 22, // PUSH1 22 + 0x60, 12, // PUSH1 12 + 0x60, 0x00, // PUSH1 0 + 0x39, // CODECOPY + + 0x60, 22, // PUSH1 22 + 0x60, 0x00, // PUSH1 0 + 0xF3, // RETURN + + // Contract that auto-calls EXTCODEHASH + 0x73, // PUSH20 + 0x3a, 0x22, 0x0f, 0x35, 0x12, 0x52, 0x08, 0x9d, 0x38, 0x5b, 0x29, 0xbe, 0xca, 0x14, 0xe2, 0x7f, 0x20, 0x4c, 0x29, 0x6a, + 0x3F, // EXTCODEHASH + } + extCodeHashContractAddr := common.HexToAddress("db7d6ab1f17c6b31909ae466702703daef9269cf") + _, _, _, statediff := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 2, func(i int, gen *BlockGen) { + gen.SetPoS() + + if i == 0 { + // Create dummy contract. + tx, _ := types.SignTx(types.NewContractCreation(0, big.NewInt(0), 100_000, big.NewInt(875000000), dummyContract), signer, testKey) + gen.AddTx(tx) + + // Create contract with EXTCODEHASH opcode. + tx, _ = types.SignTx(types.NewContractCreation(1, big.NewInt(0), 100_000, big.NewInt(875000000), extCodeHashContract), signer, testKey) + gen.AddTx(tx) + } else { + tx, _ := types.SignTx(types.NewTransaction(2, extCodeHashContractAddr, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + } + + }) + + contractKeccakTreeKey := utils.GetTreeKeyCodeKeccak(dummyContractAddr[:]) + + var stateDiffIdx = -1 + for i, stemStateDiff := range statediff[1] { + if bytes.Equal(stemStateDiff.Stem[:], contractKeccakTreeKey[:31]) { + stateDiffIdx = i + break + } + } + if stateDiffIdx == -1 { + t.Fatalf("no state diff found for stem") + } + + codeHashStateDiff := statediff[1][stateDiffIdx].SuffixDiffs[0] + if codeHashStateDiff.Suffix != utils.CodeKeccakLeafKey { + t.Fatalf("code hash invalid suffix") + } + if codeHashStateDiff.CurrentValue == nil { + t.Fatalf("codeHash.CurrentValue must not be empty") + } + expCodeHash := crypto.Keccak256Hash(dummyContract[12:]) + if *codeHashStateDiff.CurrentValue != expCodeHash { + t.Fatalf("codeHash.CurrentValue unexpected code hash") + } + if codeHashStateDiff.NewValue != nil { + t.Fatalf("codeHash.NewValue must be nil") + } +} diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 7fb340da7b24..35d9ccc62197 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -485,6 +485,13 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) + if interpreter.evm.chainRules.IsPrague { + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeKeccakLeafKey) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + } if interpreter.evm.StateDB.Empty(address) { slot.Clear() } else { From 8ac057090e965d52ef7cd5fd0a6130bdc8eed3e6 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Thu, 15 Feb 2024 09:47:03 -0300 Subject: [PATCH 78/99] fix integer underflow (#374) Signed-off-by: Ignacio Hagopian --- core/vm/instructions.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 35d9ccc62197..372d4a900248 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -401,11 +401,13 @@ func touchCodeChunksRangeOnReadAndChargeGas(contractAddr []byte, startPC, size u return 0 } - // endPC is the last PC that must be touched. - endPC := startPC + size - 1 - if startPC+size > codeLen { + endPC := startPC + size + if endPC > codeLen { endPC = codeLen } + if endPC > 0 { + endPC -= 1 // endPC is the last bytecode that will be touched. + } var statelessGasCharged uint64 for chunkNumber := startPC / 31; chunkNumber <= endPC/31; chunkNumber++ { From 72211cd5d543d150f9f075224e7c6d2f47f8c19b Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Thu, 15 Feb 2024 10:08:01 -0300 Subject: [PATCH 79/99] Record witness access in BALANCE opcode (#369) * record access witness balance in BALANCE opcode Signed-off-by: Ignacio Hagopian * add tests for BALANCE opcode Signed-off-by: Ignacio Hagopian * add version touching Signed-off-by: Ignacio Hagopian * do not touch version Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- core/state_processor_test.go | 95 ++++++++++++++++++++++++++++++++++++ core/vm/instructions.go | 7 +++ 2 files changed, 102 insertions(+) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 1756daaf8c66..e9cd344f83e1 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -927,3 +927,98 @@ func TestProcessVerklExtCodeHashOpcode(t *testing.T) { t.Fatalf("codeHash.NewValue must be nil") } } + +func TestProcessVerkleBalanceOpcode(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(69421), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") + account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account1: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account2: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 3, + }, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + genesis := gspec.MustCommit(bcdb) + + // Commit the genesis block to the block-generation database as it + // is now independent of the blockchain database. + gspec.MustCommit(gendb) + + _, _, _, statediff := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 1, func(i int, gen *BlockGen) { + gen.SetPoS() + txData := []byte{ + 0x73, // PUSH20 + 0x61, 0x77, 0x84, 0x3d, 0xb3, 0x13, 0x8a, 0xe6, 0x96, 0x79, 0xA5, 0x4b, 0x95, 0xcf, 0x34, 0x5E, 0xD7, 0x59, 0x45, 0x0d, // 0x6177843db3138ae69679A54b95cf345ED759450d + 0x31, // BALANCE + } + tx, _ := types.SignTx(types.NewContractCreation(0, big.NewInt(0), 100_000, big.NewInt(875000000), txData), signer, testKey) + gen.AddTx(tx) + }) + + account2BalanceTreeKey := utils.GetTreeKeyBalance(account2[:]) + + var stateDiffIdx = -1 + for i, stemStateDiff := range statediff[0] { + if bytes.Equal(stemStateDiff.Stem[:], account2BalanceTreeKey[:31]) { + stateDiffIdx = i + break + } + } + if stateDiffIdx == -1 { + t.Fatalf("no state diff found for stem") + } + + var zero [32]byte + balanceStateDiff := statediff[0][stateDiffIdx].SuffixDiffs[0] + if balanceStateDiff.Suffix != utils.BalanceLeafKey { + t.Fatalf("invalid suffix diff") + } + if balanceStateDiff.CurrentValue == nil { + t.Fatalf("invalid current value") + } + if *balanceStateDiff.CurrentValue == zero { + t.Fatalf("invalid current value") + } + if balanceStateDiff.NewValue != nil { + t.Fatalf("invalid new value") + } +} diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 372d4a900248..a78ce26c25d5 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -264,6 +264,13 @@ func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) + if interpreter.evm.chainRules.IsPrague { + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.BalanceLeafKey) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + } slot.SetFromBig(interpreter.evm.StateDB.GetBalance(address)) return nil, nil } From f72f7820ddf8c29d76aa3b95bb260bd81d27490b Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Thu, 15 Feb 2024 13:57:51 -0300 Subject: [PATCH 80/99] fix addr alignement (#366) Signed-off-by: Ignacio Hagopian --- core/state/access_witness.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/access_witness.go b/core/state/access_witness.go index 7eb0990b645c..75cd85eeb892 100644 --- a/core/state/access_witness.go +++ b/core/state/access_witness.go @@ -244,7 +244,7 @@ type branchAccessKey struct { func newBranchAccessKey(addr []byte, treeIndex uint256.Int) branchAccessKey { var sk branchAccessKey - copy(sk.addr[:], addr) + copy(sk.addr[20-len(addr):], addr) sk.treeIndex = treeIndex return sk } From c2da1985bc8fabde3a776f39370ebbf90e4a7c98 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Fri, 16 Feb 2024 05:26:36 -0300 Subject: [PATCH 81/99] Fix SELFDESTRUCT incorrect account zeroing & missing beneficiary balance in witness (#378) * fix SELFDESTRUCT witness recording Signed-off-by: Ignacio Hagopian * add selfdestruct tests Signed-off-by: Ignacio Hagopian * solve lint nit Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- core/state/statedb.go | 8 + core/state_processor_test.go | 488 ++++++++++++++++++++++++++++++++++- core/vm/instructions.go | 16 ++ core/vm/interface.go | 2 + trie/verkle.go | 21 -- 5 files changed, 513 insertions(+), 22 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 1e7c892c2d8a..4b1ba52347ce 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -530,6 +530,14 @@ func (s *StateDB) Selfdestruct6780(addr common.Address) { } } +func (s *StateDB) WasCreatedInCurrentTx(addr common.Address) bool { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return false + } + return stateObject.created +} + // SetTransientState sets transient storage for a given account. It // adds the change to the journal so that it can be rolled back // to its previous value if there is a revert. diff --git a/core/state_processor_test.go b/core/state_processor_test.go index e9cd344f83e1..6b05435cd9ba 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -19,6 +19,7 @@ package core import ( "bytes" "crypto/ecdsa" + "encoding/binary" //"fmt" "math/big" @@ -896,7 +897,6 @@ func TestProcessVerklExtCodeHashOpcode(t *testing.T) { tx, _ := types.SignTx(types.NewTransaction(2, extCodeHashContractAddr, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) gen.AddTx(tx) } - }) contractKeccakTreeKey := utils.GetTreeKeyCodeKeccak(dummyContractAddr[:]) @@ -1022,3 +1022,489 @@ func TestProcessVerkleBalanceOpcode(t *testing.T) { t.Fatalf("invalid new value") } } + +func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(69421), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") + account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account1: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account2: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 3, + }, + }, + } + ) + genesis := gspec.MustCommit(bcdb) + gspec.MustCommit(gendb) + + // The goal of this test is to test SELFDESTRUCT that happens in a contract execution which is created + // in a previous transaction. + + selfDestructContract := []byte{ + 0x60, 22, // PUSH1 22 + 0x60, 12, // PUSH1 12 + 0x60, 0x00, // PUSH1 0 + 0x39, // CODECOPY + + 0x60, 22, // PUSH1 22 + 0x60, 0x00, // PUSH1 0 + 0xF3, // RETURN + + // Deployed code + 0x73, // PUSH20 + 0x61, 0x77, 0x84, 0x3d, 0xb3, 0x13, 0x8a, 0xe6, 0x96, 0x79, 0xA5, 0x4b, 0x95, 0xcf, 0x34, 0x5E, 0xD7, 0x59, 0x45, 0x0d, // 0x6177843db3138ae69679A54b95cf345ED759450d + 0xFF, // SELFDESTRUCT + } + selfDestructContractAddr := common.HexToAddress("3a220f351252089d385b29beca14e27f204c296a") + _, _, _, statediff := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 2, func(i int, gen *BlockGen) { + gen.SetPoS() + + if i == 0 { + // Create selfdestruct contract, sending 42 wei. + tx, _ := types.SignTx(types.NewContractCreation(0, big.NewInt(42), 100_000, big.NewInt(875000000), selfDestructContract), signer, testKey) + gen.AddTx(tx) + } else { + // Call it. + tx, _ := types.SignTx(types.NewTransaction(1, selfDestructContractAddr, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + } + }) + + var zero [32]byte + { // Check self-destructed contract in the witness + selfDestructContractTreeKey := utils.GetTreeKeyCodeKeccak(selfDestructContractAddr[:]) + + var stateDiffIdx = -1 + for i, stemStateDiff := range statediff[1] { + if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { + stateDiffIdx = i + break + } + } + if stateDiffIdx == -1 { + t.Fatalf("no state diff found for stem") + } + + balanceStateDiff := statediff[1][stateDiffIdx].SuffixDiffs[1] + if balanceStateDiff.Suffix != utils.BalanceLeafKey { + t.Fatalf("balance invalid suffix") + } + + // The original balance was 42. + var fourtyTwo [32]byte + fourtyTwo[0] = 42 + if *balanceStateDiff.CurrentValue != fourtyTwo { + t.Fatalf("the pre-state balance before self-destruct must be 42") + } + + // The new balance must be 0. + if *balanceStateDiff.NewValue != zero { + t.Fatalf("the post-state balance after self-destruct must be 0") + } + } + { // Check self-destructed target in the witness. + selfDestructTargetTreeKey := utils.GetTreeKeyCodeKeccak(account2[:]) + + var stateDiffIdx = -1 + for i, stemStateDiff := range statediff[1] { + if bytes.Equal(stemStateDiff.Stem[:], selfDestructTargetTreeKey[:31]) { + stateDiffIdx = i + break + } + } + if stateDiffIdx == -1 { + t.Fatalf("no state diff found for stem") + } + + balanceStateDiff := statediff[1][stateDiffIdx].SuffixDiffs[0] + if balanceStateDiff.Suffix != utils.BalanceLeafKey { + t.Fatalf("balance invalid suffix") + } + if balanceStateDiff.CurrentValue == nil { + t.Fatalf("codeHash.CurrentValue must not be empty") + } + if balanceStateDiff.NewValue == nil { + t.Fatalf("codeHash.NewValue must not be empty") + } + preStateBalance := binary.LittleEndian.Uint64(balanceStateDiff.CurrentValue[:]) + postStateBalance := binary.LittleEndian.Uint64(balanceStateDiff.NewValue[:]) + if postStateBalance-preStateBalance != 42 { + t.Fatalf("the post-state balance after self-destruct must be 42") + } + } +} + +func TestProcessVerkleSelfDestructInSameTx(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(69421), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") + account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account1: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account2: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 3, + }, + }, + } + ) + genesis := gspec.MustCommit(bcdb) + gspec.MustCommit(gendb) + + // The goal of this test is to test SELFDESTRUCT that happens in a contract execution which is created + // in **the same** transaction sending the remaining balance to an external (i.e: not itself) account. + + selfDestructContract := []byte{ + 0x73, // PUSH20 + 0x61, 0x77, 0x84, 0x3d, 0xb3, 0x13, 0x8a, 0xe6, 0x96, 0x79, 0xA5, 0x4b, 0x95, 0xcf, 0x34, 0x5E, 0xD7, 0x59, 0x45, 0x0d, // 0x6177843db3138ae69679A54b95cf345ED759450d + 0xFF, // SELFDESTRUCT + } + selfDestructContractAddr := common.HexToAddress("3a220f351252089d385b29beca14e27f204c296a") + _, _, _, statediff := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 1, func(i int, gen *BlockGen) { + gen.SetPoS() + tx, _ := types.SignTx(types.NewContractCreation(0, big.NewInt(42), 100_000, big.NewInt(875000000), selfDestructContract), signer, testKey) + gen.AddTx(tx) + }) + + { // Check self-destructed contract in the witness + selfDestructContractTreeKey := utils.GetTreeKeyCodeKeccak(selfDestructContractAddr[:]) + + var stateDiffIdx = -1 + for i, stemStateDiff := range statediff[0] { + if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { + stateDiffIdx = i + break + } + } + if stateDiffIdx == -1 { + t.Fatalf("no state diff found for stem") + } + + balanceStateDiff := statediff[0][stateDiffIdx].SuffixDiffs[1] + if balanceStateDiff.Suffix != utils.BalanceLeafKey { + t.Fatalf("balance invalid suffix") + } + + if balanceStateDiff.CurrentValue != nil { + t.Fatalf("the pre-state balance before must be nil, since the contract didn't exist") + } + + if balanceStateDiff.NewValue != nil { + t.Fatalf("the post-state balance after self-destruct must be nil since the contract shouldn't be created at all") + } + } + { // Check self-destructed target in the witness. + selfDestructTargetTreeKey := utils.GetTreeKeyCodeKeccak(account2[:]) + + var stateDiffIdx = -1 + for i, stemStateDiff := range statediff[0] { + if bytes.Equal(stemStateDiff.Stem[:], selfDestructTargetTreeKey[:31]) { + stateDiffIdx = i + break + } + } + if stateDiffIdx == -1 { + t.Fatalf("no state diff found for stem") + } + + balanceStateDiff := statediff[0][stateDiffIdx].SuffixDiffs[0] + if balanceStateDiff.Suffix != utils.BalanceLeafKey { + t.Fatalf("balance invalid suffix") + } + if balanceStateDiff.CurrentValue == nil { + t.Fatalf("codeHash.CurrentValue must not be empty") + } + if balanceStateDiff.NewValue == nil { + t.Fatalf("codeHash.NewValue must not be empty") + } + preStateBalance := binary.LittleEndian.Uint64(balanceStateDiff.CurrentValue[:]) + postStateBalance := binary.LittleEndian.Uint64(balanceStateDiff.NewValue[:]) + if postStateBalance-preStateBalance != 42 { + t.Fatalf("the post-state balance after self-destruct must be 42") + } + } +} + +func TestProcessVerkleSelfDestructInSeparateTxWithSelfBeneficiary(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(69421), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") + account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account1: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account2: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 3, + }, + }, + } + ) + genesis := gspec.MustCommit(bcdb) + gspec.MustCommit(gendb) + + // The goal of this test is to test SELFDESTRUCT that happens in a contract execution which is created + // in a *previous* transaction sending the remaining balance to itself. + + selfDestructContract := []byte{ + 0x60, 22, // PUSH1 22 + 0x60, 12, // PUSH1 12 + 0x60, 0x00, // PUSH1 0 + 0x39, // CODECOPY + + 0x60, 22, // PUSH1 22 + 0x60, 0x00, // PUSH1 0 + 0xF3, // RETURN + + // Deployed code + 0x73, // PUSH20 + 0x3a, 0x22, 0x0f, 0x35, 0x12, 0x52, 0x08, 0x9d, 0x38, 0x5b, 0x29, 0xbe, 0xca, 0x14, 0xe2, 0x7f, 0x20, 0x4c, 0x29, 0x6a, // 0x3a220f351252089d385b29beca14e27f204c296a + 0xFF, // SELFDESTRUCT + } + selfDestructContractAddr := common.HexToAddress("3a220f351252089d385b29beca14e27f204c296a") + _, _, _, statediff := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 2, func(i int, gen *BlockGen) { + gen.SetPoS() + if i == 0 { + // Create selfdestruct contract, sending 42 wei. + tx, _ := types.SignTx(types.NewContractCreation(0, big.NewInt(42), 100_000, big.NewInt(875000000), selfDestructContract), signer, testKey) + gen.AddTx(tx) + } else { + // Call it. + tx, _ := types.SignTx(types.NewTransaction(1, selfDestructContractAddr, big.NewInt(0), 100_000, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + } + }) + + { + // Check self-destructed contract in the witness. + // The way 6780 is implemented today, it always SubBalance from the self-destructed contract, and AddBalance + // to the beneficiary. In this case both addresses are the same, thus this might be optimizable from a gas + // perspective. But until that happens, we need to honor this "balance reading" adding it to the witness. + + selfDestructContractTreeKey := utils.GetTreeKeyCodeKeccak(selfDestructContractAddr[:]) + + var stateDiffIdx = -1 + for i, stemStateDiff := range statediff[1] { + if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { + stateDiffIdx = i + break + } + } + if stateDiffIdx == -1 { + t.Fatalf("no state diff found for stem") + } + + balanceStateDiff := statediff[1][stateDiffIdx].SuffixDiffs[1] + if balanceStateDiff.Suffix != utils.BalanceLeafKey { + t.Fatalf("balance invalid suffix") + } + + // The original balance was 42. + var fourtyTwo [32]byte + fourtyTwo[0] = 42 + if *balanceStateDiff.CurrentValue != fourtyTwo { + t.Fatalf("the pre-state balance before self-destruct must be 42") + } + + // Note that the SubBalance+AddBalance net effect is a 0 change, so NewValue + // must be nil. + if balanceStateDiff.NewValue != nil { + t.Fatalf("the post-state balance after self-destruct must be empty") + } + } +} + +func TestProcessVerkleSelfDestructInSameTxWithSelfBeneficiary(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(69421), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + account1 = common.HexToAddress("0x687704DB07e902e9A8B3754031D168D46E3D586e") + account2 = common.HexToAddress("0x6177843db3138ae69679A54b95cf345ED759450d") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account1: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + account2: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 3, + }, + }, + } + ) + genesis := gspec.MustCommit(bcdb) + gspec.MustCommit(gendb) + + // The goal of this test is to test SELFDESTRUCT that happens in a contract execution which is created + // in **the same** transaction sending the remaining balance to itself. + + selfDestructContract := []byte{ + 0x73, // PUSH20 + 0x3a, 0x22, 0x0f, 0x35, 0x12, 0x52, 0x08, 0x9d, 0x38, 0x5b, 0x29, 0xbe, 0xca, 0x14, 0xe2, 0x7f, 0x20, 0x4c, 0x29, 0x6a, // 0x3a220f351252089d385b29beca14e27f204c296a + 0xFF, // SELFDESTRUCT + } + selfDestructContractAddr := common.HexToAddress("3a220f351252089d385b29beca14e27f204c296a") + _, _, _, statediff := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 1, func(i int, gen *BlockGen) { + gen.SetPoS() + tx, _ := types.SignTx(types.NewContractCreation(0, big.NewInt(42), 100_000, big.NewInt(875000000), selfDestructContract), signer, testKey) + gen.AddTx(tx) + }) + + { // Check self-destructed contract in the witness + selfDestructContractTreeKey := utils.GetTreeKeyCodeKeccak(selfDestructContractAddr[:]) + + var stateDiffIdx = -1 + for i, stemStateDiff := range statediff[0] { + if bytes.Equal(stemStateDiff.Stem[:], selfDestructContractTreeKey[:31]) { + stateDiffIdx = i + break + } + } + if stateDiffIdx == -1 { + t.Fatalf("no state diff found for stem") + } + + balanceStateDiff := statediff[0][stateDiffIdx].SuffixDiffs[1] + if balanceStateDiff.Suffix != utils.BalanceLeafKey { + t.Fatalf("balance invalid suffix") + } + + if balanceStateDiff.CurrentValue != nil { + t.Fatalf("the pre-state balance before must be nil, since the contract didn't exist") + } + + if balanceStateDiff.NewValue != nil { + t.Fatalf("the post-state balance after self-destruct must be nil since the contract shouldn't be created at all") + } + } +} diff --git a/core/vm/instructions.go b/core/vm/instructions.go index a78ce26c25d5..6c40ff83a272 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -943,6 +943,22 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) tracer.CaptureExit([]byte{}, 0, nil) } + if interpreter.evm.chainRules.IsPrague { + contractAddr := scope.Contract.Address() + beneficiaryAddr := beneficiary.Bytes20() + // If the beneficiary isn't the contract, we need to touch the beneficiary's balance. + // If the beneficiary is the contract itself, there're two possibilities: + // 1. The contract was created in the same transaction: the balance is already touched (no need to touch again) + // 2. The contract wasn't created in the same transaction: there's no net change in balance, + // and SELFDESTRUCT will perform no action on the account header. (we touch since we did SubBalance+AddBalance above) + if contractAddr != beneficiaryAddr || interpreter.evm.StateDB.WasCreatedInCurrentTx(contractAddr) { + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(beneficiaryAddr[:], uint256.Int{}, trieUtils.BalanceLeafKey) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + } + } return nil, errStopToken } diff --git a/core/vm/interface.go b/core/vm/interface.go index 0a02a0181c05..4241b1d45a77 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -57,6 +57,8 @@ type StateDB interface { Selfdestruct6780(common.Address) + WasCreatedInCurrentTx(common.Address) bool + // Exist reports whether the given account exists in state. // Notably this should also return true for self-destructed accounts. Exist(common.Address) bool diff --git a/trie/verkle.go b/trie/verkle.go index 3a402bebe5b6..760e30c8cdaa 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -213,27 +213,6 @@ func (trie *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) } func (t *VerkleTrie) DeleteAccount(addr common.Address) error { - var ( - err error - values = make([][]byte, verkle.NodeWidth) - stem = t.pointCache.GetTreeKeyVersionCached(addr[:]) - ) - - for i := 0; i < verkle.NodeWidth; i++ { - values[i] = zero[:] - } - - switch root := t.root.(type) { - case *verkle.InternalNode: - err = root.InsertValuesAtStem(stem, values, t.FlatdbNodeResolver) - default: - return errInvalidRootType - } - if err != nil { - return fmt.Errorf("DeleteAccount (%x) error: %v", addr, err) - } - // TODO figure out if the code size needs to be updated, too - return nil } From 4dc4b398bb0d9b316786bb1604b2c7cb41d8a56e Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Tue, 27 Feb 2024 12:16:24 -0300 Subject: [PATCH 82/99] core/vm: fix contract creation witness gas charging logic (#389) Signed-off-by: Ignacio Hagopian --- core/vm/evm.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/core/vm/evm.go b/core/vm/evm.go index bcd5248bb72c..782fc6d56740 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -519,6 +519,15 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } + if err == nil && evm.chainRules.IsPrague { + if len(ret) > 0 { + touchCodeChunksRangeOnReadAndChargeGas(address.Bytes(), 0, uint64(len(ret)), uint64(len(ret)), evm.Accesses) + } + if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:])) { + err = ErrOutOfGas + } + } + // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally // when we're in homestead this also counts for code storage gas errors. @@ -529,16 +538,6 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } - if err == nil && evm.chainRules.IsPrague { - if len(ret) > 0 { - touchCodeChunksRangeOnReadAndChargeGas(address.Bytes(), 0, uint64(len(ret)), uint64(len(ret)), evm.Accesses) - } - if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:])) { - evm.StateDB.RevertToSnapshot(snapshot) - err = ErrOutOfGas - } - } - if evm.Config.Tracer != nil { if evm.depth == 0 { evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, err) From f667365a5d08ca331ffa4db05ed6d3b90888b5fc Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Wed, 28 Feb 2024 09:41:16 -0300 Subject: [PATCH 83/99] witness: fix contract creation init gas charging (#393) Signed-off-by: Ignacio Hagopian --- core/state/access_witness.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/state/access_witness.go b/core/state/access_witness.go index 75cd85eeb892..696da15fd1ef 100644 --- a/core/state/access_witness.go +++ b/core/state/access_witness.go @@ -118,7 +118,6 @@ func (aw *AccessWitness) TouchAndChargeContractCreateInit(addr []byte, createSen var gas uint64 gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) if createSendsValue { gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) } From c997469bbd6f170a67d1fa9f809ca592325e3b6d Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 29 Feb 2024 10:33:01 +0100 Subject: [PATCH 84/99] fix: missing MAIN_STORAGE_OFFSET when reading a block hash from the history contract (#390) Co-authored-by: Ignacio Hagopian --- core/vm/instructions.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 6c40ff83a272..095846feff1f 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -25,7 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - trieUtils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -265,7 +265,7 @@ func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.BalanceLeafKey) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, utils.BalanceLeafKey) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas @@ -355,7 +355,7 @@ func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) slot := scope.Stack.peek() cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, utils.CodeSizeLeafKey) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas @@ -495,7 +495,7 @@ func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeKeccakLeafKey) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, utils.CodeKeccakLeafKey) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas @@ -518,7 +518,8 @@ func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ func getBlockHashFromContract(number uint64, statedb StateDB, witness *state.AccessWitness) common.Hash { var pnum common.Hash binary.BigEndian.PutUint64(pnum[24:], number) - witness.TouchAddressOnReadAndComputeGas(params.HistoryStorageAddress[:], *uint256.NewInt(number / 256), byte(number&0xFF)) + treeIndex, suffix := utils.GetTreeKeyStorageSlotTreeIndexes(pnum.Bytes()) + witness.TouchAddressOnReadAndComputeGas(params.HistoryStorageAddress[:], *treeIndex, suffix) return statedb.GetState(params.HistoryStorageAddress, pnum) } @@ -952,7 +953,7 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext // 2. The contract wasn't created in the same transaction: there's no net change in balance, // and SELFDESTRUCT will perform no action on the account header. (we touch since we did SubBalance+AddBalance above) if contractAddr != beneficiaryAddr || interpreter.evm.StateDB.WasCreatedInCurrentTx(contractAddr) { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(beneficiaryAddr[:], uint256.Int{}, trieUtils.BalanceLeafKey) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(beneficiaryAddr[:], uint256.Int{}, utils.BalanceLeafKey) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas From 448b405d83493e80072ba34de51dfbaf51467126 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Mon, 4 Mar 2024 09:29:38 -0300 Subject: [PATCH 85/99] Truncate BALANCE opcode parameter for witness recording Signed-off-by: Ignacio Hagopian --- core/vm/instructions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 095846feff1f..177e882879bc 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -265,7 +265,7 @@ func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, utils.BalanceLeafKey) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(address[:], uint256.Int{}, utils.BalanceLeafKey) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas From b5495ad1f909446daa66c06907e49ac716f7a335 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Wed, 6 Mar 2024 03:41:13 -0300 Subject: [PATCH 86/99] more address truncation fixes (#399) Signed-off-by: Ignacio Hagopian --- core/vm/gas_table.go | 3 ++- core/vm/instructions.go | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index f367c3c92978..50ebf8c05b64 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -101,8 +101,9 @@ var ( func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { usedGas := uint64(0) slot := stack.Back(0) + address := slot.Bytes20() if evm.chainRules.IsPrague { - usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) + usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(address[:], uint256.Int{}, trieUtils.CodeSizeLeafKey) } return usedGas, nil diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 177e882879bc..cc3de642bd83 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -353,9 +353,10 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() - cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) + address := slot.Bytes20() + cs := uint64(interpreter.evm.StateDB.GetCodeSize(address)) if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, utils.CodeSizeLeafKey) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(address[:], uint256.Int{}, utils.CodeSizeLeafKey) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas @@ -495,7 +496,7 @@ func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, utils.CodeKeccakLeafKey) + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(address[:], uint256.Int{}, utils.CodeKeccakLeafKey) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas From 631099322c6bda8ff947df2ef65a3f2416c8e399 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Wed, 6 Mar 2024 05:48:02 -0300 Subject: [PATCH 87/99] Fix TestProcessVerkle tests (#400) * move self-destruct witness logic Signed-off-by: Ignacio Hagopian * fix test Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- core/state_processor_test.go | 4 ++-- core/vm/instructions.go | 32 ++++++++++++++++---------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 6b05435cd9ba..fb0420d69a97 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -485,8 +485,8 @@ func TestProcessVerkle(t *testing.T) { txCost1 := params.TxGas txCost2 := params.TxGas - contractCreationCost := intrinsicContractCreationGas + uint64(7700 /* creation */ +2939 /* execution costs */) - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(7000 /* creation */ +299744 /* execution costs */) + contractCreationCost := intrinsicContractCreationGas + uint64(5600+700+700+700 /* creation with value */ +2739 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(5600+700 /* creation */ +302044 /* execution costs */) blockGasUsagesExpected := []uint64{ txCost1*2 + txCost2, txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, diff --git a/core/vm/instructions.go b/core/vm/instructions.go index cc3de642bd83..eea71d66c56b 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -945,22 +945,6 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) tracer.CaptureExit([]byte{}, 0, nil) } - if interpreter.evm.chainRules.IsPrague { - contractAddr := scope.Contract.Address() - beneficiaryAddr := beneficiary.Bytes20() - // If the beneficiary isn't the contract, we need to touch the beneficiary's balance. - // If the beneficiary is the contract itself, there're two possibilities: - // 1. The contract was created in the same transaction: the balance is already touched (no need to touch again) - // 2. The contract wasn't created in the same transaction: there's no net change in balance, - // and SELFDESTRUCT will perform no action on the account header. (we touch since we did SubBalance+AddBalance above) - if contractAddr != beneficiaryAddr || interpreter.evm.StateDB.WasCreatedInCurrentTx(contractAddr) { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(beneficiaryAddr[:], uint256.Int{}, utils.BalanceLeafKey) - if !scope.Contract.UseGas(statelessGas) { - scope.Contract.Gas = 0 - return nil, ErrOutOfGas - } - } - } return nil, errStopToken } @@ -977,6 +961,22 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) tracer.CaptureExit([]byte{}, 0, nil) } + if interpreter.evm.chainRules.IsPrague { + contractAddr := scope.Contract.Address() + beneficiaryAddr := beneficiary.Bytes20() + // If the beneficiary isn't the contract, we need to touch the beneficiary's balance. + // If the beneficiary is the contract itself, there're two possibilities: + // 1. The contract was created in the same transaction: the balance is already touched (no need to touch again) + // 2. The contract wasn't created in the same transaction: there's no net change in balance, + // and SELFDESTRUCT will perform no action on the account header. (we touch since we did SubBalance+AddBalance above) + if contractAddr != beneficiaryAddr || interpreter.evm.StateDB.WasCreatedInCurrentTx(contractAddr) { + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(beneficiaryAddr[:], uint256.Int{}, utils.BalanceLeafKey) + if !scope.Contract.UseGas(statelessGas) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + } + } return nil, errStopToken } From 40d5153941ff5c1a7d8f5c6a7e4887ce410c07fe Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Tue, 12 Mar 2024 10:30:40 -0300 Subject: [PATCH 88/99] Use circular buffer for BLOCKHASH history (#402) * eip2935: use ring buffer Signed-off-by: Ignacio Hagopian * limit resolving scope Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- core/state_processor.go | 3 ++- core/vm/instructions.go | 15 ++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index cdb4e4af19be..d28df7aac1d0 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -378,8 +378,9 @@ func InsertBlockHashHistoryAtEip2935Fork(statedb *state.StateDB, prevNumber uint } func ProcessParentBlockHash(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash) { + ringIndex := prevNumber % 256 var key common.Hash - binary.BigEndian.PutUint64(key[24:], prevNumber) + binary.BigEndian.PutUint64(key[24:], ringIndex) statedb.SetState(params.HistoryStorageAddress, key, prevHash) index, suffix := utils.GetTreeKeyStorageSlotTreeIndexes(key[:]) statedb.Witness().TouchAddressOnWriteAndComputeGas(params.HistoryStorageAddress[:], *index, suffix) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index eea71d66c56b..14b5704c5b87 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -517,8 +517,9 @@ func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } func getBlockHashFromContract(number uint64, statedb StateDB, witness *state.AccessWitness) common.Hash { + ringIndex := number % 256 var pnum common.Hash - binary.BigEndian.PutUint64(pnum[24:], number) + binary.BigEndian.PutUint64(pnum[24:], ringIndex) treeIndex, suffix := utils.GetTreeKeyStorageSlotTreeIndexes(pnum.Bytes()) witness.TouchAddressOnReadAndComputeGas(params.HistoryStorageAddress[:], *treeIndex, suffix) return statedb.GetState(params.HistoryStorageAddress, pnum) @@ -533,11 +534,6 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( } evm := interpreter.evm - // if Prague is active, read it from the history contract (EIP 2935). - if evm.chainRules.IsPrague { - num.SetBytes(getBlockHashFromContract(num64, evm.StateDB, evm.Accesses).Bytes()) - return nil, nil - } var upper, lower uint64 upper = interpreter.evm.Context.BlockNumber.Uint64() @@ -547,7 +543,12 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( lower = upper - 256 } if num64 >= lower && num64 < upper { - num.SetBytes(interpreter.evm.Context.GetHash(num64).Bytes()) + // if Prague is active, read it from the history contract (EIP 2935). + if evm.chainRules.IsPrague { + num.SetBytes(getBlockHashFromContract(num64, evm.StateDB, evm.Accesses).Bytes()) + } else { + num.SetBytes(interpreter.evm.Context.GetHash(num64).Bytes()) + } } else { num.Clear() } From a3232b7cc65c6743524ee34658dacc1661d95c3d Mon Sep 17 00:00:00 2001 From: Ng Wei Han <47109095+weiihann@users.noreply.github.com> Date: Mon, 18 Mar 2024 03:55:57 +0800 Subject: [PATCH 89/99] core/txpool/blobpool: fix metrics name for prometheus export (#27901) (#403) Co-authored-by: imulmat4 <117636097+imulmat4@users.noreply.github.com> --- core/txpool/blobpool/metrics.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/txpool/blobpool/metrics.go b/core/txpool/blobpool/metrics.go index 280913b3a916..070cc5ca4712 100644 --- a/core/txpool/blobpool/metrics.go +++ b/core/txpool/blobpool/metrics.go @@ -35,15 +35,15 @@ var ( // The below metrics track the per-shelf metrics for the primary blob store // and the temporary limbo store. - shelfDatausedGaugeName = "blobpool/shelf-%d/dataused" - shelfDatagapsGaugeName = "blobpool/shelf-%d/datagaps" - shelfSlotusedGaugeName = "blobpool/shelf-%d/slotused" - shelfSlotgapsGaugeName = "blobpool/shelf-%d/slotgaps" + shelfDatausedGaugeName = "blobpool/shelf_%d/dataused" + shelfDatagapsGaugeName = "blobpool/shelf_%d/datagaps" + shelfSlotusedGaugeName = "blobpool/shelf_%d/slotused" + shelfSlotgapsGaugeName = "blobpool/shelf_%d/slotgaps" - limboShelfDatausedGaugeName = "blobpool/limbo/shelf-%d/dataused" - limboShelfDatagapsGaugeName = "blobpool/limbo/shelf-%d/datagaps" - limboShelfSlotusedGaugeName = "blobpool/limbo/shelf-%d/slotused" - limboShelfSlotgapsGaugeName = "blobpool/limbo/shelf-%d/slotgaps" + limboShelfDatausedGaugeName = "blobpool/limbo/shelf_%d/dataused" + limboShelfDatagapsGaugeName = "blobpool/limbo/shelf_%d/datagaps" + limboShelfSlotusedGaugeName = "blobpool/limbo/shelf_%d/slotused" + limboShelfSlotgapsGaugeName = "blobpool/limbo/shelf_%d/slotgaps" // The oversized metrics aggregate the shelf stats above the max blob count // limits to track transactions that are just huge, but don't contain blobs. From c790d1fdd63bca5d14e6f29a1310973155e61464 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Tue, 26 Mar 2024 06:02:05 -0300 Subject: [PATCH 90/99] trie/verkle: change tree key generation (#401) * trie/verkle: change tree key generation Signed-off-by: Ignacio Hagopian * fix tests Signed-off-by: Ignacio Hagopian * update go-verkle Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- go.mod | 5 ++--- go.sum | 12 ++++++------ trie/utils/verkle.go | 10 +--------- trie/utils/verkle_test.go | 2 +- trie/verkle_test.go | 2 +- 5 files changed, 11 insertions(+), 20 deletions(-) diff --git a/go.mod b/go.mod index 81367549719c..3f7010a4989d 100644 --- a/go.mod +++ b/go.mod @@ -14,19 +14,19 @@ require ( github.com/cloudflare/cloudflare-go v0.14.0 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/consensys/gnark-crypto v0.12.1 - github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 + github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 github.com/docker/docker v1.6.2 github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 github.com/ethereum/c-kzg-4844 v0.3.0 + github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 github.com/fatih/color v1.7.0 github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20231125115329-d193f0b46e01 github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 @@ -96,7 +96,6 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect - github.com/ethereum/go-verkle v0.1.1-0.20240119133216-f8289fc59149 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-ole/go-ole v1.2.1 // indirect diff --git a/go.sum b/go.sum index bf0e786ed54b..6b0fd1e7566c 100644 --- a/go.sum +++ b/go.sum @@ -84,8 +84,8 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I= +github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -127,8 +127,10 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/c-kzg-4844 v0.3.0 h1:3Y3hD6l5i0dEYsBL50C+Om644kve3pNqoAcvE26o9zI= github.com/ethereum/c-kzg-4844 v0.3.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8= -github.com/ethereum/go-verkle v0.1.1-0.20240119133216-f8289fc59149 h1:7gbu2YdLL8SicVklig4nyizkWkw367BP+5eEivNPy04= -github.com/ethereum/go-verkle v0.1.1-0.20240119133216-f8289fc59149/go.mod h1:cZmLDzTyZPwUygE2ksQEcxOLZ8YpfRghnVtfxRnhgJM= +github.com/ethereum/go-verkle v0.1.1-0.20240306114018-819f7d81e58c h1:+6lz/7jTYZSgL+I3guRRRqnD23ICKjEMvxVTgJw3P00= +github.com/ethereum/go-verkle v0.1.1-0.20240306114018-819f7d81e58c/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= +github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= +github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -146,8 +148,6 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231125115329-d193f0b46e01 h1:Jm7DG6/BptrrNgOh9Jb6LPBbz75VJA5FkFKB4O/zbQw= -github.com/gballet/go-verkle v0.1.1-0.20231125115329-d193f0b46e01/go.mod h1:OzHSBt37xRRHc27lb9PaCldBnJYQZP8KcMdYyOB2dtU= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 65f4c1fa2ab0..d1aa9dec57fa 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -187,15 +187,7 @@ func GetTreeKeyCodeChunkWithEvaluatedAddress(addressPoint *verkle.Point, chunk * } func PointToHash(evaluated *verkle.Point, suffix byte) []byte { - // The output of Byte() is big engian for banderwagon. This - // introduces an imbalance in the tree, because hashes are - // elements of a 253-bit field. This means more than half the - // tree would be empty. To avoid this problem, use a little - // endian commitment and chop the MSB. - retb := evaluated.Bytes() - for i := 0; i < 16; i++ { - retb[31-i], retb[i] = retb[i], retb[31-i] - } + retb := verkle.HashPointToBytes(evaluated) retb[31] = suffix return retb[:] } diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go index 66f1cc473ea6..02de69c9b712 100644 --- a/trie/utils/verkle_test.go +++ b/trie/utils/verkle_test.go @@ -38,7 +38,7 @@ func TestGetTreeKey(t *testing.T) { tk := GetTreeKey(addr[:], n, 1) got := hex.EncodeToString(tk) - exp := "f42f932f43faf5d14b292b9009c45c28da61dbf66e20dbedc2e02dfd64ff5a01" + exp := "6ede905763d5856cd2d67936541e82aa78f7141bf8cd5ff6c962170f3e9dc201" if got != exp { t.Fatalf("Generated trie key is incorrect: %s != %s", got, exp) } diff --git a/trie/verkle_test.go b/trie/verkle_test.go index 9f30c6bde858..8a4fb921bac9 100644 --- a/trie/verkle_test.go +++ b/trie/verkle_test.go @@ -371,7 +371,7 @@ func TestEmptyKeySetInProveAndSerialize(t *testing.T) { func TestGetTreeKeys(t *testing.T) { addr := common.Hex2Bytes("71562b71999873DB5b286dF957af199Ec94617f7") - target := common.Hex2Bytes("274cde18dd9dbb04caf16ad5ee969c19fe6ca764d5688b5e1d419f4ac6cd1600") + target := common.Hex2Bytes("1540dfad7755b40be0768c6aa0a5096fbf0215e0e8cf354dd928a17834646600") key := utils.GetTreeKeyVersion(addr) t.Logf("key=%x", key) t.Logf("actualKey=%x", target) From d0ec1be47309bc0415f35fa5dbc24aa888074c0e Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Sat, 6 Apr 2024 03:07:52 -0300 Subject: [PATCH 91/99] Fix TestProcessVerkle* tests (#410) * core: fix TestProcessVerkleInvalidContractCreation Signed-off-by: Ignacio Hagopian * core: fix TestProcessVerkleContractWithEmptyCode Signed-off-by: Ignacio Hagopian * add separate action for testprocessverkle tests Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- .github/workflows/go.yml | 12 ++++++++++++ core/state_processor_test.go | 14 +++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 76d69a7108db..b349f23d6617 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -36,6 +36,7 @@ jobs: test: runs-on: self-hosted + needs: [test-process-verkle] steps: - uses: actions/checkout@v2 - name: Set up Go @@ -44,3 +45,14 @@ jobs: go-version: 1.21.1 - name: Test run: go test ./... -timeout=20m + + test-process-verkle: + runs-on: self-hosted + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.21.1 + - name: Test + run: go test ./core -run=TestProcessVerkle diff --git a/core/state_processor_test.go b/core/state_processor_test.go index fb0420d69a97..a5fd70aada65 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -643,19 +643,19 @@ func TestProcessVerkleInvalidContractCreation(t *testing.T) { for _, stemStateDiff := range statediff[0] { // Check that the value 0x85, which is overflowing the account header, // is present. - if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("a10042195481d30478251625e1ccef0e2174dc4e083e81d2566d880373f791")) { + if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("917f78f74226b0e3755134ce3e3433cac8df5a657f6c9b9a3d0122a3e4beb0")) { for _, suffixDiff := range stemStateDiff.SuffixDiffs { if suffixDiff.Suffix != 133 { t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) } } - } else if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("b24fa84f214459af17d6e3f604811f252cac93146f02d67d7811bbcdfa448b")) { + } else if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("26f8a45c665b9ca0bf769b213ba9bb14e86d4028a41274ae2a509e71a89d86")) { for _, suffixDiff := range stemStateDiff.SuffixDiffs { if suffixDiff.Suffix != 105 && suffixDiff.Suffix != 0 && suffixDiff.Suffix != 2 && suffixDiff.Suffix != 3 { t.Fatalf("invalid suffix diff found for %x in block #1: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) } } - } else if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("97f2911f5efe08b74c28727d004e36d260225e73525fe2a300c8f58c7ffd76")) { + } else if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("5b5fdfedd6a0e932da408ac7d772a36513d1eee9b9926e52620c43a433aad7")) { // BLOCKHASH contract stem if len(stemStateDiff.SuffixDiffs) > 1 { t.Fatalf("invalid suffix diff count found for BLOCKHASH contract: %d != 1", len(stemStateDiff.SuffixDiffs)) @@ -683,7 +683,7 @@ func TestProcessVerkleInvalidContractCreation(t *testing.T) { // code should make it to the witness. for _, stemStateDiff := range statediff[1] { for _, suffixDiff := range stemStateDiff.SuffixDiffs { - if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("97f2911f5efe08b74c28727d004e36d260225e73525fe2a300c8f58c7ffd76")) { + if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("5b5fdfedd6a0e932da408ac7d772a36513d1eee9b9926e52620c43a433aad7")) { // BLOCKHASH contract stem if len(stemStateDiff.SuffixDiffs) > 1 { t.Fatalf("invalid suffix diff count found for BLOCKHASH contract at block #2: %d != 1", len(stemStateDiff.SuffixDiffs)) @@ -694,8 +694,8 @@ func TestProcessVerkleInvalidContractCreation(t *testing.T) { if stemStateDiff.SuffixDiffs[0].NewValue == nil { t.Fatalf("missing post state value for BLOCKHASH contract at block #2") } - if *stemStateDiff.SuffixDiffs[0].NewValue != common.HexToHash("53abcdfb284720ea59efe923d3dc774bbb7e787d829599f8ec7a81d344dd3d17") { - t.Fatalf("invalid post state value for BLOCKHASH contract at block #2: %x != ", (*stemStateDiff.SuffixDiffs[0].NewValue)[:]) + if *stemStateDiff.SuffixDiffs[0].NewValue != common.HexToHash("0a130e6478e47593861d8c3feb65045497327d89619dd12ae12d70e73a0191dd") { + t.Fatalf("invalid post state value for BLOCKHASH contract at block #2: 0a130e6478e47593861d8c3feb65045497327d89619dd12ae12d70e73a0191dd != %x", (*stemStateDiff.SuffixDiffs[0].NewValue)[:]) } } else if suffixDiff.Suffix > 4 { t.Fatalf("invalid suffix diff found for %x in block #2: %d\n", stemStateDiff.Stem, suffixDiff.Suffix) @@ -769,7 +769,7 @@ func TestProcessVerkleContractWithEmptyCode(t *testing.T) { }) for _, stemStateDiff := range statediff[0] { - if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("97f2911f5efe08b74c28727d004e36d260225e73525fe2a300c8f58c7ffd76")) { + if bytes.Equal(stemStateDiff.Stem[:], common.Hex2Bytes("5b5fdfedd6a0e932da408ac7d772a36513d1eee9b9926e52620c43a433aad7")) { // BLOCKHASH contract stem if len(stemStateDiff.SuffixDiffs) > 1 { t.Fatalf("invalid suffix diff count found for BLOCKHASH contract: %d != 1", len(stemStateDiff.SuffixDiffs)) From 0d70489104c36a5d118b3c62e58b2034e12ae31a Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Mon, 8 Apr 2024 16:00:56 -0300 Subject: [PATCH 92/99] core: change block hash history scope (#411) Signed-off-by: Ignacio Hagopian --- core/state_processor.go | 4 ++-- core/vm/instructions.go | 2 +- params/protocol_params.go | 3 +++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index d28df7aac1d0..6961a873602e 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -371,14 +371,14 @@ func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) er func InsertBlockHashHistoryAtEip2935Fork(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash, chain consensus.ChainHeaderReader) { ancestor := chain.GetHeader(prevHash, prevNumber) - for i := prevNumber; i > 0 && i >= prevNumber-256; i-- { + for i := prevNumber; i > 0 && i >= prevNumber-params.Eip2935BlockHashHistorySize; i-- { ProcessParentBlockHash(statedb, i, ancestor.Hash()) ancestor = chain.GetHeader(ancestor.ParentHash, ancestor.Number.Uint64()-1) } } func ProcessParentBlockHash(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash) { - ringIndex := prevNumber % 256 + ringIndex := prevNumber % params.Eip2935BlockHashHistorySize var key common.Hash binary.BigEndian.PutUint64(key[24:], ringIndex) statedb.SetState(params.HistoryStorageAddress, key, prevHash) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 14b5704c5b87..7073f2d7c297 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -517,7 +517,7 @@ func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } func getBlockHashFromContract(number uint64, statedb StateDB, witness *state.AccessWitness) common.Hash { - ringIndex := number % 256 + ringIndex := number % params.Eip2935BlockHashHistorySize var pnum common.Hash binary.BigEndian.PutUint64(pnum[24:], ringIndex) treeIndex, suffix := utils.GetTreeKeyStorageSlotTreeIndexes(pnum.Bytes()) diff --git a/params/protocol_params.go b/params/protocol_params.go index 6d91ee48a85f..8aad52103979 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -173,6 +173,9 @@ const ( BlobTxMinBlobGasprice = 1 // Minimum gas price for data blobs BlobTxBlobGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for blob gas price BlobTxPointEvaluationPrecompileGas = 50000 // Gas price for the point evaluation precompile. + + // Block hash history parameters + Eip2935BlockHashHistorySize = 8192 ) // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations From 1930b97b65ba0d1b5d46c8799a5141002e1b696f Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 15 Apr 2024 13:41:28 +0200 Subject: [PATCH 93/99] simplified gas accounting layer (#405) * simplified gas accounting layer * integrate some review feedback * Apply suggestions from code review Co-authored-by: Ignacio Hagopian * more suggestions from code review * don't charge creation gas + charge code chunks in create * A couple more fixes * make linter happy * fix create init gas consumption issue * fix: in gas funcs, use tx witness instead of global witness * fix linter issue * Apply suggestions from code review Co-authored-by: Ignacio Hagopian * fix: EXTCODECOPY gas consumption * fix warm gas costs * fix the order gas is charged in during contract creation epilogue * fix selfdestruct * fix #365 in eip rewrite (#407) * fix: OOG type in code creation OOG (#408) * core/vm: charge BLOCKHASH witness cost (#409) * core/vm: charge BLOCKHASH witness cost Signed-off-by: Ignacio Hagopian * remove gas optimization for now Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian * remove redundant logic for contract creation (#413) Signed-off-by: Ignacio Hagopian * fix precompile address check for charging witness costs & fix missing value-bearing rule (#412) Signed-off-by: Ignacio Hagopian * core/vm: fix wrong check (#416) Signed-off-by: Ignacio Hagopian * charge for account creation if selfdestruct creates a new account (#417) * add key comparison test (#418) * core/vm: charge contract init before execution logic (#419) * core/vm: charge contract init before execution logic Signed-off-by: Ignacio Hagopian * fix CREATE2 as well --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> * quell linter --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian --- consensus/beacon/consensus.go | 7 +- consensus/ethash/consensus.go | 11 --- core/state/access_witness.go | 122 ++++++++++++++++++---------- core/state/statedb.go | 2 +- core/state_processor.go | 12 ++- core/state_processor_test.go | 50 ++++++++++-- core/state_transition.go | 19 +---- core/vm/eips.go | 27 +++++++ core/vm/evm.go | 69 +++++++++------- core/vm/gas_table.go | 51 ++---------- core/vm/instructions.go | 126 +++++++---------------------- core/vm/interpreter.go | 4 +- core/vm/jump_table.go | 1 + core/vm/operations_acl.go | 13 --- core/vm/operations_verkle.go | 147 ++++++++++++++++++++++++++++++++++ params/config.go | 3 + params/protocol_params.go | 1 + trie/utils/verkle.go | 12 +-- trie/utils/verkle_test.go | 24 +++++- trie/verkle.go | 6 +- 20 files changed, 424 insertions(+), 283 deletions(-) create mode 100644 core/vm/operations_verkle.go diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 35a7ed2b56d0..ad8894cf4db0 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-verkle" - "github.com/holiman/uint256" ) // Proof-of-stake protocol constants. @@ -357,11 +356,7 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. state.AddBalance(w.Address, amount) // The returned gas is not charged - state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.VersionLeafKey) - state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.BalanceLeafKey) - state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.NonceLeafKey) - state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeKeccakLeafKey) - state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeSizeLeafKey) + state.Witness().TouchFullAccount(w.Address[:], true) } } diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 44aec25c1216..720e49b6e0d8 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -33,8 +33,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) @@ -568,19 +566,10 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Div(r, big8) // This should not happen, but it's useful for replay tests - if config.IsPrague(header.Number, header.Time) { - state.Witness().TouchAddressOnReadAndComputeGas(uncle.Coinbase.Bytes(), uint256.Int{}, utils.BalanceLeafKey) - } state.AddBalance(uncle.Coinbase, r) r.Div(blockReward, big32) reward.Add(reward, r) } - if config.IsPrague(header.Number, header.Time) { - state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.BalanceLeafKey) - state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.VersionLeafKey) - state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.NonceLeafKey) - state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.CodeKeccakLeafKey) - } state.AddBalance(header.Coinbase, reward) } diff --git a/core/state/access_witness.go b/core/state/access_witness.go index 696da15fd1ef..4c9d3218ad96 100644 --- a/core/state/access_witness.go +++ b/core/state/access_witness.go @@ -18,6 +18,7 @@ package state import ( "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" @@ -88,27 +89,25 @@ func (aw *AccessWitness) Copy() *AccessWitness { return naw } -func (aw *AccessWitness) TouchAndChargeProofOfAbsence(addr []byte) uint64 { +func (aw *AccessWitness) TouchFullAccount(addr []byte, isWrite bool) uint64 { var gas uint64 - gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) + for i := utils.VersionLeafKey; i <= utils.CodeSizeLeafKey; i++ { + gas += aw.touchAddressAndChargeGas(addr, zeroTreeIndex, byte(i), isWrite) + } return gas } func (aw *AccessWitness) TouchAndChargeMessageCall(addr []byte) uint64 { var gas uint64 - gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) - gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, false) + gas += aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, false) return gas } func (aw *AccessWitness) TouchAndChargeValueTransfer(callerAddr, targetAddr []byte) uint64 { var gas uint64 - gas += aw.TouchAddressOnWriteAndComputeGas(callerAddr, zeroTreeIndex, utils.BalanceLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + gas += aw.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, utils.BalanceLeafKey, true) + gas += aw.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey, true) return gas } @@ -116,33 +115,18 @@ func (aw *AccessWitness) TouchAndChargeValueTransfer(callerAddr, targetAddr []by // a contract creation func (aw *AccessWitness) TouchAndChargeContractCreateInit(addr []byte, createSendsValue bool) uint64 { var gas uint64 - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) + gas += aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, true) + gas += aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, true) if createSendsValue { - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) + gas += aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, true) } return gas } -// TouchAndChargeContractCreateCompleted charges access access costs after -// the completion of a contract creation to populate the created account in -// the tree -func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte) uint64 { - var gas uint64 - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) - gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) - return gas -} - func (aw *AccessWitness) TouchTxOriginAndComputeGas(originAddr []byte) uint64 { - aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey) - aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey) - aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) - aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey) - aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey) + for i := utils.VersionLeafKey; i <= utils.CodeSizeLeafKey; i++ { + aw.touchAddressAndChargeGas(originAddr, zeroTreeIndex, byte(i), i == utils.BalanceLeafKey || i == utils.NonceLeafKey) + } // Kaustinen note: we're currently experimenting with stop chargin gas for the origin address // so simple transfer still take 21000 gas. This is to potentially avoid breaking existing tooling. @@ -152,14 +136,14 @@ func (aw *AccessWitness) TouchTxOriginAndComputeGas(originAddr []byte) uint64 { } func (aw *AccessWitness) TouchTxExistingAndComputeGas(targetAddr []byte, sendsValue bool) uint64 { - aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.VersionLeafKey) - aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeSizeLeafKey) - aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) - aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.NonceLeafKey) + aw.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.VersionLeafKey, false) + aw.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.CodeSizeLeafKey, false) + aw.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.CodeHashLeafKey, false) + aw.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.NonceLeafKey, false) if sendsValue { - aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + aw.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey, true) } else { - aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + aw.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey, false) } // Kaustinen note: we're currently experimenting with stop chargin gas for the origin address @@ -169,12 +153,9 @@ func (aw *AccessWitness) TouchTxExistingAndComputeGas(targetAddr []byte, sendsVa return 0 } -func (aw *AccessWitness) TouchAddressOnWriteAndComputeGas(addr []byte, treeIndex uint256.Int, subIndex byte) uint64 { - return aw.touchAddressAndChargeGas(addr, treeIndex, subIndex, true) -} - -func (aw *AccessWitness) TouchAddressOnReadAndComputeGas(addr []byte, treeIndex uint256.Int, subIndex byte) uint64 { - return aw.touchAddressAndChargeGas(addr, treeIndex, subIndex, false) +func (aw *AccessWitness) TouchSlotAndChargeGas(addr []byte, slot common.Hash, isWrite bool) uint64 { + treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(slot.Bytes()) + return aw.touchAddressAndChargeGas(addr, *treeIndex, subIndex, isWrite) } func (aw *AccessWitness) touchAddressAndChargeGas(addr []byte, treeIndex uint256.Int, subIndex byte, isWrite bool) uint64 { @@ -259,3 +240,58 @@ func newChunkAccessKey(branchKey branchAccessKey, leafKey byte) chunkAccessKey { lk.leafKey = leafKey return lk } + +// touchCodeChunksRangeOnReadAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs +func (aw *AccessWitness) TouchCodeChunksRangeAndChargeGas(contractAddr []byte, startPC, size uint64, codeLen uint64, isWrite bool) uint64 { + // note that in the case where the copied code is outside the range of the + // contract code but touches the last leaf with contract code in it, + // we don't include the last leaf of code in the AccessWitness. The + // reason that we do not need the last leaf is the account's code size + // is already in the AccessWitness so a stateless verifier can see that + // the code from the last leaf is not needed. + if (codeLen == 0 && size == 0) || startPC > codeLen { + return 0 + } + + endPC := startPC + size + if endPC > codeLen { + endPC = codeLen + } + if endPC > 0 { + endPC -= 1 // endPC is the last bytecode that will be touched. + } + + var statelessGasCharged uint64 + for chunkNumber := startPC / 31; chunkNumber <= endPC/31; chunkNumber++ { + treeIndex := *uint256.NewInt((chunkNumber + 128) / 256) + subIndex := byte((chunkNumber + 128) % 256) + gas := aw.touchAddressAndChargeGas(contractAddr, treeIndex, subIndex, isWrite) + var overflow bool + statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, gas) + if overflow { + panic("overflow when adding gas") + } + } + + return statelessGasCharged +} + +func (aw *AccessWitness) TouchVersion(addr []byte, isWrite bool) uint64 { + return aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, isWrite) +} + +func (aw *AccessWitness) TouchBalance(addr []byte, isWrite bool) uint64 { + return aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, isWrite) +} + +func (aw *AccessWitness) TouchNonce(addr []byte, isWrite bool) uint64 { + return aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, isWrite) +} + +func (aw *AccessWitness) TouchCodeSize(addr []byte, isWrite bool) uint64 { + return aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, isWrite) +} + +func (aw *AccessWitness) TouchCodeHash(addr []byte, isWrite bool) uint64 { + return aw.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite) +} diff --git a/core/state/statedb.go b/core/state/statedb.go index 4b1ba52347ce..ab1065eb4cf5 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1367,7 +1367,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er // - Add coinbase to access list (EIP-3651) // - Reset transient storage (EIP-1153) func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { - if rules.IsBerlin { + if rules.IsEIP2929 { // Clear out any leftover from previous executions al := newAccessList() s.accessList = al diff --git a/core/state_processor.go b/core/state_processor.go index 6961a873602e..fbc6beda4a08 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -35,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/trie/utils" tutils "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-verkle" "github.com/holiman/uint256" @@ -261,7 +260,7 @@ func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) leafNodeData.Values[tutils.NonceLeafKey] = nonce[:] - leafNodeData.Values[tutils.CodeKeccakLeafKey] = acc.CodeHash[:] + leafNodeData.Values[tutils.CodeHashLeafKey] = acc.CodeHash[:] } func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { @@ -369,7 +368,13 @@ func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) er return nil } +// InsertBlockHashHistoryAtEip2935Fork handles the insertion of all previous 256 +// blocks on the eip2935 activation block. It also adds the account header of the +// history contract to the witness. func InsertBlockHashHistoryAtEip2935Fork(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash, chain consensus.ChainHeaderReader) { + // Make sure that the historical contract is added to the witness + statedb.Witness().TouchFullAccount(params.HistoryStorageAddress[:], true) + ancestor := chain.GetHeader(prevHash, prevNumber) for i := prevNumber; i > 0 && i >= prevNumber-params.Eip2935BlockHashHistorySize; i-- { ProcessParentBlockHash(statedb, i, ancestor.Hash()) @@ -382,6 +387,5 @@ func ProcessParentBlockHash(statedb *state.StateDB, prevNumber uint64, prevHash var key common.Hash binary.BigEndian.PutUint64(key[24:], ringIndex) statedb.SetState(params.HistoryStorageAddress, key, prevHash) - index, suffix := utils.GetTreeKeyStorageSlotTreeIndexes(key[:]) - statedb.Witness().TouchAddressOnWriteAndComputeGas(params.HistoryStorageAddress[:], *index, suffix) + statedb.Witness().TouchSlotAndChargeGas(params.HistoryStorageAddress[:], key, true) } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index a5fd70aada65..991f4e2de68e 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -20,6 +20,9 @@ import ( "bytes" "crypto/ecdsa" "encoding/binary" + "encoding/json" + "fmt" + "os" //"fmt" "math/big" @@ -37,7 +40,9 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/utils" //"github.com/ethereum/go-ethereum/rlp" @@ -472,11 +477,19 @@ func TestProcessVerkle(t *testing.T) { }, }, } + loggerCfg = &logger.Config{} ) + + os.MkdirAll("output", 0755) + traceFile, err := os.Create("./output/traces.jsonl") + if err != nil { + t.Fatal(err) + } + // Verkle trees use the snapshot, which must be enabled before the // data is saved into the tree+database. genesis := gspec.MustCommit(bcdb) - blockchain, _ := NewBlockChain(bcdb, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(bcdb, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{Tracer: logger.NewJSONLogger(loggerCfg, traceFile)}, nil, nil) defer blockchain.Stop() // Commit the genesis block to the block-generation database as it @@ -485,8 +498,8 @@ func TestProcessVerkle(t *testing.T) { txCost1 := params.TxGas txCost2 := params.TxGas - contractCreationCost := intrinsicContractCreationGas + uint64(5600+700+700+700 /* creation with value */ +2739 /* execution costs */) - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(5600+700 /* creation */ +302044 /* execution costs */) + contractCreationCost := intrinsicContractCreationGas + uint64(5600+700+700+700 /* creation with value */ +1439 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(5600+700 /* creation */ +44044 /* execution costs */) blockGasUsagesExpected := []uint64{ txCost1*2 + txCost2, txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, @@ -513,6 +526,33 @@ func TestProcessVerkle(t *testing.T) { } }) + kvjson, err := json.Marshal(keyvals) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile("./output/statediffs.json", kvjson, 0644) + if err != nil { + t.Fatal(err) + } + blockrlp, err := rlp.EncodeToBytes(genesis) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(fmt.Sprintf("./output/block%d.rlp.hex", 0), []byte(fmt.Sprintf("%x", blockrlp)), 0644) + if err != nil { + t.Fatal(err) + } + for _, block := range chain { + blockrlp, err := rlp.EncodeToBytes(block) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(fmt.Sprintf("./output/block%d.rlp.hex", block.NumberU64()), []byte(fmt.Sprintf("%x", blockrlp)), 0644) + if err != nil { + t.Fatal(err) + } + } + // Uncomment to extract block #2 //f, _ := os.Create("block2.rlp") //defer f.Close() @@ -521,7 +561,7 @@ func TestProcessVerkle(t *testing.T) { //f.Write(buf.Bytes()) //fmt.Printf("root= %x\n", chain[0].Root()) // check the proof for the last block - err := trie.DeserializeAndVerifyVerkleProof(proofs[1], chain[0].Root().Bytes(), chain[1].Root().Bytes(), keyvals[1]) + err = trie.DeserializeAndVerifyVerkleProof(proofs[1], chain[0].Root().Bytes(), chain[1].Root().Bytes(), keyvals[1]) if err != nil { t.Fatal(err) } @@ -913,7 +953,7 @@ func TestProcessVerklExtCodeHashOpcode(t *testing.T) { } codeHashStateDiff := statediff[1][stateDiffIdx].SuffixDiffs[0] - if codeHashStateDiff.Suffix != utils.CodeKeccakLeafKey { + if codeHashStateDiff.Suffix != utils.CodeHashLeafKey { t.Fatalf("code hash invalid suffix") } if codeHashStateDiff.CurrentValue == nil { diff --git a/core/state_transition.go b/core/state_transition.go index 969e7a75fb9b..0aa4a369018b 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -27,10 +27,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" - "github.com/holiman/uint256" ) // ExecutionResult includes all output after executing given evm @@ -405,7 +402,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } st.gasRemaining -= gas - if rules.IsPrague { + if rules.IsEIP4762 { targetAddr := msg.To originAddr := msg.From @@ -413,7 +410,6 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { if !tryConsumeGas(&st.gasRemaining, statelessGasOrigin) { return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gasRemaining, gas) } - originNonce := st.evm.StateDB.GetNonce(originAddr) if msg.To != nil { statelessGasDest := st.evm.Accesses.TouchTxExistingAndComputeGas(targetAddr.Bytes(), msg.Value.Sign() != 0) @@ -423,11 +419,6 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { // ensure the code size ends up in the access witness st.evm.StateDB.GetCodeSize(*targetAddr) - } else { - contractAddr := crypto.CreateAddress(originAddr, originNonce) - if !tryConsumeGas(&st.gasRemaining, st.evm.Accesses.TouchAndChargeContractCreateInit(contractAddr.Bytes(), msg.Value.Sign() != 0)) { - return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gasRemaining, gas) - } } } @@ -480,12 +471,8 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { st.state.AddBalance(st.evm.Context.Coinbase, fee) // add the coinbase to the witness iff the fee is greater than 0 - if rules.IsPrague && fee.Sign() != 0 { - st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.VersionLeafKey) - st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.BalanceLeafKey) - st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.NonceLeafKey) - st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.CodeKeccakLeafKey) - st.evm.Accesses.TouchAddressOnWriteAndComputeGas(st.evm.Context.Coinbase[:], uint256.Int{}, utils.CodeSizeLeafKey) + if rules.IsEIP4762 && fee.Sign() != 0 { + st.evm.Accesses.TouchFullAccount(st.evm.Context.Coinbase[:], true) } } diff --git a/core/vm/eips.go b/core/vm/eips.go index 704c1ce12745..1df79f6cffd0 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -37,6 +37,7 @@ var activators = map[int]func(*JumpTable){ 1884: enable1884, 1344: enable1344, 1153: enable1153, + 4762: enable4762, } // EnableEIP enables the given EIP on the config. @@ -303,3 +304,29 @@ func enable6780(jt *JumpTable) { maxStack: maxStack(1, 0), } } + +func enable4762(jt *JumpTable) { + jt[SSTORE].constantGas = 0 + jt[SSTORE].dynamicGas = gasSStore4762 + jt[SLOAD].constantGas = 0 + jt[SLOAD].dynamicGas = gasSLoad4762 + jt[BALANCE].dynamicGas = gasBalance4762 + jt[BALANCE].constantGas = 0 + jt[EXTCODESIZE].constantGas = 0 + jt[EXTCODESIZE].dynamicGas = gasExtCodeSize4762 + jt[EXTCODEHASH].constantGas = 0 + jt[EXTCODEHASH].dynamicGas = gasExtCodeHash4762 + jt[EXTCODECOPY].constantGas = 0 + jt[EXTCODECOPY].dynamicGas = gasExtCodeCopyEIP4762 + jt[SELFDESTRUCT].dynamicGas = gasSelfdestructEIP4762 + jt[CREATE].constantGas = params.CreateNGasEip4762 + jt[CREATE2].constantGas = params.CreateNGasEip4762 + jt[CALL].constantGas = 0 + jt[CALL].dynamicGas = gasCallEIP4762 + jt[CALLCODE].constantGas = 0 + jt[CALLCODE].dynamicGas = gasCallCodeEIP4762 + jt[STATICCALL].constantGas = 0 + jt[STATICCALL].dynamicGas = gasStaticCallEIP4762 + jt[DELEGATECALL].constantGas = 0 + jt[DELEGATECALL].dynamicGas = gasDelegateCallEIP4762 +} diff --git a/core/vm/evm.go b/core/vm/evm.go index 782fc6d56740..3278856f39a8 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -178,20 +178,6 @@ func (evm *EVM) SetBlockContext(blockCtx BlockContext) { evm.chainRules = evm.chainConfig.Rules(num, blockCtx.Random != nil, timestamp) } -// tryConsumeGas tries to subtract gas from gasPool, setting the result in gasPool -// if subtracting more gas than remains in gasPool, set gasPool = 0 and return false -// otherwise, do the subtraction setting the result in gasPool and return true -func tryConsumeGas(gasPool *uint64, gas uint64) bool { - // XXX check this is still needed as a func - if *gasPool < gas { - *gasPool = 0 - return false - } - - *gasPool -= gas - return true -} - // Call executes the contract associated with the addr with the given input as // parameters. It also handles any necessary value transfer required and takes // the necessary steps to create accounts and reverses the state in case of an @@ -211,11 +197,17 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas var creation bool if !evm.StateDB.Exist(addr) { - if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { - if evm.chainRules.IsPrague { - // proof of absence - tryConsumeGas(&gas, evm.Accesses.TouchAndChargeProofOfAbsence(addr.Bytes())) + if !isPrecompile && evm.chainRules.IsEIP4762 { + // add proof of absence to witness + wgas := evm.Accesses.TouchFullAccount(addr.Bytes(), false) + if gas < wgas { + evm.StateDB.RevertToSnapshot(snapshot) + return nil, 0, ErrOutOfGas } + gas -= wgas + } + + if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { // Calling a non existing account, don't do anything, but ping the tracer if debug { if evm.depth == 0 { @@ -463,7 +455,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, evm.StateDB.SetNonce(caller.Address(), nonce+1) // We add this to the access list _before_ taking a snapshot. Even if the creation fails, // the access-list change should not be rolled back - if evm.chainRules.IsBerlin { + if evm.chainRules.IsEIP2929 { evm.StateDB.AddAddressToAccessList(address) } // Ensure there's no existing contract already at the designated address @@ -486,6 +478,14 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, contract.SetCodeOptionalHash(&address, codeAndHash) contract.IsDeployment = true + // Charge the contract creation init gas in verkle mode + var err error + if evm.chainRules.IsEIP4762 { + if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateInit(address.Bytes(), value.Sign() != 0)) { + err = ErrOutOfGas + } + } + if evm.Config.Tracer != nil { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value) @@ -494,7 +494,10 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } - ret, err := evm.interpreter.Run(contract, nil, false) + var ret []byte + if err == nil { + ret, err = evm.interpreter.Run(contract, nil, false) + } // Check whether the max code size has been exceeded, assign err if the case. if err == nil && evm.chainRules.IsEIP158 && len(ret) > params.MaxCodeSize { @@ -511,20 +514,24 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // be stored due to not enough gas set an error and let it be handled // by the error checking condition below. if err == nil { - createDataGas := uint64(len(ret)) * params.CreateDataGas - if contract.UseGas(createDataGas) { - evm.StateDB.SetCode(address, ret) + if !evm.chainRules.IsEIP4762 { + createDataGas := uint64(len(ret)) * params.CreateDataGas + if !contract.UseGas(createDataGas) { + err = ErrCodeStoreOutOfGas + } } else { - err = ErrCodeStoreOutOfGas - } - } + // Contract creation completed, touch the missing fields in the contract + if !contract.UseGas(evm.Accesses.TouchFullAccount(address.Bytes()[:], true)) { + err = ErrCodeStoreOutOfGas + } - if err == nil && evm.chainRules.IsPrague { - if len(ret) > 0 { - touchCodeChunksRangeOnReadAndChargeGas(address.Bytes(), 0, uint64(len(ret)), uint64(len(ret)), evm.Accesses) + if err == nil && len(ret) > 0 && !contract.UseGas(evm.Accesses.TouchCodeChunksRangeAndChargeGas(address.Bytes(), 0, uint64(len(ret)), uint64(len(ret)), true)) { + err = ErrCodeStoreOutOfGas + } } - if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:])) { - err = ErrOutOfGas + + if err == nil { + evm.StateDB.SetCode(address, ret) } } diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 50ebf8c05b64..5f8183801f1c 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -23,8 +23,6 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - trieUtils "github.com/ethereum/go-ethereum/trie/utils" - "github.com/holiman/uint256" ) // memoryGasCost calculates the quadratic gas for memory expansion. It does so @@ -100,24 +98,12 @@ var ( func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { usedGas := uint64(0) - slot := stack.Back(0) - address := slot.Bytes20() - if evm.chainRules.IsPrague { - usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(address[:], uint256.Int{}, trieUtils.CodeSizeLeafKey) - } - return usedGas, nil } func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { usedGas := uint64(0) - if evm.chainRules.IsPrague { - where := stack.Back(0) - treeIndex, subIndex := trieUtils.GetTreeKeyStorageSlotTreeIndexes(where.Bytes()) - usedGas += evm.Accesses.TouchAddressOnReadAndComputeGas(contract.Address().Bytes(), *treeIndex, subIndex) - } - return usedGas, nil } @@ -405,7 +391,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize } else if !evm.StateDB.Exist(address) { gas += params.CallNewAccountGas } - if transfersValue { + if transfersValue && !evm.chainRules.IsEIP4762 { gas += params.CallValueTransferGas } memoryGas, err := memoryGasCost(mem, memorySize) @@ -424,13 +410,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsPrague { - if _, isPrecompile := evm.precompile(address); !isPrecompile { - gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes()[:])) - if overflow { - return 0, ErrGasUintOverflow - } - } + if evm.chainRules.IsEIP4762 { if transfersValue { gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeValueTransfer(contract.Address().Bytes()[:], address.Bytes()[:])) if overflow { @@ -451,7 +431,7 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory gas uint64 overflow bool ) - if stack.Back(2).Sign() != 0 { + if stack.Back(2).Sign() != 0 && !evm.chainRules.IsEIP4762 { gas += params.CallValueTransferGas } if gas, overflow = math.SafeAdd(gas, memoryGas); overflow { @@ -464,10 +444,11 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsPrague { + if evm.chainRules.IsEIP4762 { address := common.Address(stack.Back(1).Bytes20()) - if _, isPrecompile := evm.precompile(address); !isPrecompile { - gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + transfersValue := !stack.Back(2).IsZero() + if transfersValue { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeValueTransfer(contract.Address().Bytes()[:], address.Bytes()[:])) if overflow { return 0, ErrGasUintOverflow } @@ -489,15 +470,6 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsPrague { - address := common.Address(stack.Back(1).Bytes20()) - if _, isPrecompile := evm.precompile(address); !isPrecompile { - gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) - if overflow { - return 0, ErrGasUintOverflow - } - } - } return gas, nil } @@ -514,15 +486,6 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } - if evm.chainRules.IsPrague { - address := common.Address(stack.Back(1).Bytes20()) - if _, isPrecompile := evm.precompile(address); !isPrecompile { - gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) - if overflow { - return 0, ErrGasUintOverflow - } - } - } return gas, nil } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 7073f2d7c297..7a6db2e8eaaa 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -20,12 +20,10 @@ import ( "encoding/binary" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -264,13 +262,6 @@ func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) - if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(address[:], uint256.Int{}, utils.BalanceLeafKey) - if !scope.Contract.UseGas(statelessGas) { - scope.Contract.Gas = 0 - return nil, ErrOutOfGas - } - } slot.SetFromBig(interpreter.evm.StateDB.GetBalance(address)) return nil, nil } @@ -355,13 +346,6 @@ func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) slot := scope.Stack.peek() address := slot.Bytes20() cs := uint64(interpreter.evm.StateDB.GetCodeSize(address)) - if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(address[:], uint256.Int{}, utils.CodeSizeLeafKey) - if !scope.Contract.UseGas(statelessGas) { - scope.Contract.Gas = 0 - return nil, ErrOutOfGas - } - } slot.SetUint64(cs) return nil, nil } @@ -386,8 +370,8 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ contractAddr := scope.Contract.Address() paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) - if interpreter.evm.chainRules.IsPrague && !scope.Contract.IsDeployment { - statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], copyOffset, nonPaddedCopyLength, uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) + if interpreter.evm.chainRules.IsEIP4762 && !scope.Contract.IsDeployment { + statelessGas := interpreter.evm.Accesses.TouchCodeChunksRangeAndChargeGas(contractAddr[:], copyOffset, nonPaddedCopyLength, uint64(len(scope.Contract.Code)), false) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas @@ -397,41 +381,6 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ return nil, nil } -// touchCodeChunksRangeOnReadAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs -func touchCodeChunksRangeOnReadAndChargeGas(contractAddr []byte, startPC, size uint64, codeLen uint64, accesses *state.AccessWitness) uint64 { - // note that in the case where the copied code is outside the range of the - // contract code but touches the last leaf with contract code in it, - // we don't include the last leaf of code in the AccessWitness. The - // reason that we do not need the last leaf is the account's code size - // is already in the AccessWitness so a stateless verifier can see that - // the code from the last leaf is not needed. - if (codeLen == 0 && size == 0) || startPC > codeLen { - return 0 - } - - endPC := startPC + size - if endPC > codeLen { - endPC = codeLen - } - if endPC > 0 { - endPC -= 1 // endPC is the last bytecode that will be touched. - } - - var statelessGasCharged uint64 - for chunkNumber := startPC / 31; chunkNumber <= endPC/31; chunkNumber++ { - treeIndex := *uint256.NewInt((chunkNumber + 128) / 256) - subIndex := byte((chunkNumber + 128) % 256) - gas := accesses.TouchAddressOnReadAndComputeGas(contractAddr, treeIndex, subIndex) - var overflow bool - statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, gas) - if overflow { - panic("overflow when adding gas") - } - } - - return statelessGasCharged -} - func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( stack = scope.Stack @@ -445,14 +394,14 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) uint64CodeOffset = 0xffffffffffffffff } addr := common.Address(a.Bytes20()) - if interpreter.evm.chainRules.IsPrague { + if interpreter.evm.chainRules.IsEIP4762 { code := interpreter.evm.StateDB.GetCode(addr) contract := &Contract{ Code: code, self: AccountRef(addr), } paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) - statelessGas := touchCodeChunksRangeOnReadAndChargeGas(addr[:], copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), interpreter.evm.Accesses) + statelessGas := interpreter.evm.Accesses.TouchCodeChunksRangeAndChargeGas(addr[:], copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), false) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas @@ -495,13 +444,6 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) - if interpreter.evm.chainRules.IsPrague { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(address[:], uint256.Int{}, utils.CodeKeccakLeafKey) - if !scope.Contract.UseGas(statelessGas) { - scope.Contract.Gas = 0 - return nil, ErrOutOfGas - } - } if interpreter.evm.StateDB.Empty(address) { slot.Clear() } else { @@ -516,13 +458,12 @@ func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ return nil, nil } -func getBlockHashFromContract(number uint64, statedb StateDB, witness *state.AccessWitness) common.Hash { +func getBlockHashFromContract(number uint64, statedb StateDB, witness *state.AccessWitness) (common.Hash, uint64) { ringIndex := number % params.Eip2935BlockHashHistorySize var pnum common.Hash binary.BigEndian.PutUint64(pnum[24:], ringIndex) - treeIndex, suffix := utils.GetTreeKeyStorageSlotTreeIndexes(pnum.Bytes()) - witness.TouchAddressOnReadAndComputeGas(params.HistoryStorageAddress[:], *treeIndex, suffix) - return statedb.GetState(params.HistoryStorageAddress, pnum) + statelessGas := witness.TouchSlotAndChargeGas(params.HistoryStorageAddress[:], pnum, false) + return statedb.GetState(params.HistoryStorageAddress, pnum), statelessGas } func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { @@ -545,7 +486,13 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( if num64 >= lower && num64 < upper { // if Prague is active, read it from the history contract (EIP 2935). if evm.chainRules.IsPrague { - num.SetBytes(getBlockHashFromContract(num64, evm.StateDB, evm.Accesses).Bytes()) + blockHash, statelessGas := getBlockHashFromContract(num64, evm.StateDB, evm.Accesses) + if interpreter.evm.chainRules.IsEIP4762 { + if !scope.Contract.UseGas(statelessGas) { + return nil, ErrExecutionReverted + } + } + num.SetBytes(blockHash.Bytes()) } else { num.SetBytes(interpreter.evm.Context.GetHash(num64).Bytes()) } @@ -681,22 +628,25 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b if interpreter.readOnly { return nil, ErrWriteProtection } - var ( - value = scope.Stack.pop() - offset, size = scope.Stack.pop(), scope.Stack.pop() - input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) - gas = scope.Contract.Gas - ) - if interpreter.evm.chainRules.IsPrague { + + value := scope.Stack.pop() + if interpreter.evm.chainRules.IsEIP4762 { contractAddress := crypto.CreateAddress(scope.Contract.Address(), interpreter.evm.StateDB.GetNonce(scope.Contract.Address())) statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], value.Sign() != 0) - if !tryConsumeGas(&gas, statelessGas) { + if !scope.Contract.UseGas(statelessGas) { return nil, ErrExecutionReverted } } + + var ( + offset, size = scope.Stack.pop(), scope.Stack.pop() + input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + gas = scope.Contract.Gas + ) if interpreter.evm.chainRules.IsEIP150 { gas -= gas / 64 } + // reuse size int for stackvalue stackvalue := size @@ -739,17 +689,17 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] offset, size = scope.Stack.pop(), scope.Stack.pop() salt = scope.Stack.pop() input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) - gas = scope.Contract.Gas ) - if interpreter.evm.chainRules.IsPrague { + if interpreter.evm.chainRules.IsEIP4762 { codeAndHash := &codeAndHash{code: input} contractAddress := crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes()) statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], endowment.Sign() != 0) - if !tryConsumeGas(&gas, statelessGas) { + if !scope.Contract.UseGas(statelessGas) { return nil, ErrExecutionReverted } } + var gas = scope.Contract.Gas // Apply EIP150 gas -= gas / 64 scope.Contract.UseGas(gas) @@ -962,22 +912,6 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) tracer.CaptureExit([]byte{}, 0, nil) } - if interpreter.evm.chainRules.IsPrague { - contractAddr := scope.Contract.Address() - beneficiaryAddr := beneficiary.Bytes20() - // If the beneficiary isn't the contract, we need to touch the beneficiary's balance. - // If the beneficiary is the contract itself, there're two possibilities: - // 1. The contract was created in the same transaction: the balance is already touched (no need to touch again) - // 2. The contract wasn't created in the same transaction: there's no net change in balance, - // and SELFDESTRUCT will perform no action on the account header. (we touch since we did SubBalance+AddBalance above) - if contractAddr != beneficiaryAddr || interpreter.evm.StateDB.WasCreatedInCurrentTx(contractAddr) { - statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(beneficiaryAddr[:], uint256.Int{}, utils.BalanceLeafKey) - if !scope.Contract.UseGas(statelessGas) { - scope.Contract.Gas = 0 - return nil, ErrOutOfGas - } - } - } return nil, errStopToken } @@ -1025,7 +959,7 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by // touch next chunk if PUSH1 is at the boundary. if so, *pc has // advanced past this boundary. contractAddr := scope.Contract.Address() - statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], *pc+1, uint64(1), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) + statelessGas := interpreter.evm.Accesses.TouchCodeChunksRangeAndChargeGas(contractAddr[:], *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas @@ -1054,7 +988,7 @@ func makePush(size uint64, pushByteSize int) executionFunc { if !scope.Contract.IsDeployment && interpreter.evm.chainRules.IsPrague { contractAddr := scope.Contract.Address() - statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], uint64(startMin), uint64(pushByteSize), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) + statelessGas := interpreter.evm.Accesses.TouchCodeChunksRangeAndChargeGas(contractAddr[:], uint64(startMin), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false) if !scope.Contract.UseGas(statelessGas) { scope.Contract.Gas = 0 return nil, ErrOutOfGas diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 17b30fae1203..427ac4e44fc5 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -179,11 +179,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( logged, pcCopy, gasCopy = false, pc, contract.Gas } - if in.evm.chainRules.IsPrague && !contract.IsDeployment { + if in.evm.chainRules.IsEIP4762 && !contract.IsDeployment { // if the PC ends up in a new "chunk" of verkleized code, charge the // associated costs. contractAddr := contract.Address() - contract.Gas -= touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], pc, 1, uint64(len(contract.Code)), in.evm.TxContext.Accesses) + contract.Gas -= in.evm.TxContext.Accesses.TouchCodeChunksRangeAndChargeGas(contractAddr[:], pc, 1, uint64(len(contract.Code)), false) } // Get the operation from the jump table and validate the stack to ensure there are diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 5dcabe387d6f..11a04a201317 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -84,6 +84,7 @@ func validate(jt JumpTable) JumpTable { func newPragueInstructionSet() JumpTable { instructionSet := newShanghaiInstructionSet() enable6780(&instructionSet) + enable4762(&instructionSet) return validate(instructionSet) } diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 4d4fe8aed3e6..dde3338ec18d 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -22,7 +22,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie/utils" ) func makeGasSStoreFunc(clearingRefund uint64) gasFunc { @@ -52,11 +51,6 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { } value := common.Hash(y.Bytes32()) - if evm.chainRules.IsPrague { - treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(x.Bytes()) - cost += evm.Accesses.TouchAddressOnWriteAndComputeGas(contract.Address().Bytes(), *treeIndex, subIndex) - } - if current == value { // noop (1) // EIP 2200 original clause: // return params.SloadGasEIP2200, nil @@ -111,13 +105,6 @@ func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me slot := common.Hash(loc.Bytes32()) var gasUsed uint64 - if evm.chainRules.IsPrague { - where := stack.Back(0) - treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(where.Bytes()) - addr := contract.Address() - gasUsed += evm.Accesses.TouchAddressOnReadAndComputeGas(addr.Bytes(), *treeIndex, subIndex) - } - // Check slot presence in the access list if _, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { // If the caller cannot afford the cost, this change will be rolled back diff --git a/core/vm/operations_verkle.go b/core/vm/operations_verkle.go new file mode 100644 index 000000000000..8b55b83bb21f --- /dev/null +++ b/core/vm/operations_verkle.go @@ -0,0 +1,147 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vm + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/params" +) + +func gasSStore4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas := evm.Accesses.TouchSlotAndChargeGas(contract.Address().Bytes(), common.Hash(stack.peek().Bytes32()), true) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasSLoad4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas := evm.Accesses.TouchSlotAndChargeGas(contract.Address().Bytes(), common.Hash(stack.peek().Bytes32()), false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasBalance4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + gas := evm.Accesses.TouchBalance(address[:], false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasExtCodeSize4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + if _, isPrecompile := evm.precompile(address); isPrecompile { + return 0, nil + } + wgas := evm.Accesses.TouchVersion(address[:], false) + wgas += evm.Accesses.TouchCodeSize(address[:], false) + if wgas == 0 { + wgas = params.WarmStorageReadCostEIP2929 + } + return wgas, nil +} + +func gasExtCodeHash4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + if _, isPrecompile := evm.precompile(address); isPrecompile { + return 0, nil + } + codehashgas := evm.Accesses.TouchCodeHash(address[:], false) + if codehashgas == 0 { + codehashgas = params.WarmStorageReadCostEIP2929 + } + return codehashgas, nil +} + +func makeCallVariantGasEIP4762(oldCalculator gasFunc) gasFunc { + return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas, err := oldCalculator(evm, contract, stack, mem, memorySize) + if err != nil { + return 0, err + } + if _, isPrecompile := evm.precompile(contract.Address()); isPrecompile { + return gas, nil + } + wgas := evm.Accesses.TouchAndChargeMessageCall(contract.Address().Bytes()) + if wgas == 0 { + wgas = params.WarmStorageReadCostEIP2929 + } + return wgas + gas, nil + } +} + +var ( + gasCallEIP4762 = makeCallVariantGasEIP4762(gasCall) + gasCallCodeEIP4762 = makeCallVariantGasEIP4762(gasCallCode) + gasStaticCallEIP4762 = makeCallVariantGasEIP4762(gasStaticCall) + gasDelegateCallEIP4762 = makeCallVariantGasEIP4762(gasDelegateCall) +) + +func gasSelfdestructEIP4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + beneficiaryAddr := common.Address(stack.peek().Bytes20()) + if _, isPrecompile := evm.precompile(beneficiaryAddr); isPrecompile { + return 0, nil + } + + contractAddr := contract.Address() + statelessGas := evm.Accesses.TouchVersion(contractAddr[:], false) + statelessGas += evm.Accesses.TouchCodeSize(contractAddr[:], false) + statelessGas += evm.Accesses.TouchBalance(contractAddr[:], false) + if contractAddr != beneficiaryAddr { + statelessGas += evm.Accesses.TouchBalance(beneficiaryAddr[:], false) + } + // Charge write costs if it transfers value + if evm.StateDB.GetBalance(contractAddr).Sign() != 0 { + statelessGas += evm.Accesses.TouchBalance(contractAddr[:], true) + if contractAddr != beneficiaryAddr { + statelessGas += evm.Accesses.TouchBalance(beneficiaryAddr[:], true) + } + + // Case when the beneficiary does not exist: touch the account + // but leave code hash and size alone. + if evm.StateDB.Empty(beneficiaryAddr) { + statelessGas += evm.Accesses.TouchVersion(beneficiaryAddr[:], true) + statelessGas += evm.Accesses.TouchNonce(beneficiaryAddr[:], true) + } + } + return statelessGas, nil +} + +func gasExtCodeCopyEIP4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // memory expansion first (dynamic part of pre-2929 implementation) + gas, err := gasExtCodeCopy(evm, contract, stack, mem, memorySize) + if err != nil { + return 0, err + } + addr := common.Address(stack.peek().Bytes20()) + wgas := evm.Accesses.TouchVersion(addr[:], false) + wgas += evm.Accesses.TouchCodeSize(addr[:], false) + if wgas == 0 { + wgas = params.WarmStorageReadCostEIP2929 + } + var overflow bool + // We charge (cold-warm), since 'warm' is already charged as constantGas + if gas, overflow = math.SafeAdd(gas, wgas); overflow { + return 0, ErrGasUintOverflow + } + return gas, nil +} diff --git a/params/config.go b/params/config.go index 19e633a71def..94dcb57b2fe2 100644 --- a/params/config.go +++ b/params/config.go @@ -806,6 +806,7 @@ func (err *ConfigCompatError) Error() string { type Rules struct { ChainID *big.Int IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool + IsEIP2929, IsEIP4762 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool IsMerge, IsShanghai, IsCancun, IsPrague bool @@ -828,6 +829,8 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules IsPetersburg: c.IsPetersburg(num), IsIstanbul: c.IsIstanbul(num), IsBerlin: c.IsBerlin(num), + IsEIP2929: c.IsBerlin(num) && !c.IsPrague(num, timestamp), + IsEIP4762: c.IsPrague(num, timestamp), IsLondon: c.IsLondon(num), IsMerge: isMerge, IsShanghai: c.IsShanghai(num, timestamp), diff --git a/params/protocol_params.go b/params/protocol_params.go index 8aad52103979..e64836b31417 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -86,6 +86,7 @@ const ( LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction. Create2Gas uint64 = 32000 // Once per CREATE2 operation + CreateNGasEip4762 uint64 = 1000 // Once per CREATEn operations post-verkle SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation. MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL. diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index d1aa9dec57fa..af1240cb16bc 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -26,11 +26,11 @@ import ( ) const ( - VersionLeafKey = 0 - BalanceLeafKey = 1 - NonceLeafKey = 2 - CodeKeccakLeafKey = 3 - CodeSizeLeafKey = 4 + VersionLeafKey = 0 + BalanceLeafKey = 1 + NonceLeafKey = 2 + CodeHashLeafKey = 3 + CodeSizeLeafKey = 4 maxPointCacheByteSize = 100 << 20 ) @@ -152,7 +152,7 @@ func GetTreeKeyNonce(address []byte) []byte { } func GetTreeKeyCodeKeccak(address []byte) []byte { - return GetTreeKey(address, zero, CodeKeccakLeafKey) + return GetTreeKey(address, zero, CodeHashLeafKey) } func GetTreeKeyCodeSize(address []byte) []byte { diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go index 02de69c9b712..a5aec55a0660 100644 --- a/trie/utils/verkle_test.go +++ b/trie/utils/verkle_test.go @@ -17,10 +17,11 @@ package utils import ( + "bytes" + "crypto/rand" "crypto/sha256" "encoding/hex" "math/big" - "math/rand" "testing" "github.com/ethereum/go-verkle" @@ -77,7 +78,7 @@ func sha256GetTreeKeyCodeSize(addr []byte) []byte { copy(payload[:len(treeIndexBytes)], treeIndexBytes) digest.Write(payload[:]) h := digest.Sum(nil) - h[31] = CodeKeccakLeafKey + h[31] = CodeHashLeafKey return h } @@ -93,3 +94,22 @@ func BenchmarkSha256Hash(b *testing.B) { sha256GetTreeKeyCodeSize(addr[:]) } } + +func TestCompareGetTreeKeyWithEvaluated(t *testing.T) { + var addr [32]byte + rand.Read(addr[:]) + addrpoint := EvaluateAddressPoint(addr[:]) + for i := 0; i < 100; i++ { + var val [32]byte + rand.Read(val[:]) + n := uint256.NewInt(0).SetBytes(val[:]) + n.Lsh(n, 8) + subindex := val[0] + tk1 := GetTreeKey(addr[:], n, subindex) + tk2 := GetTreeKeyWithEvaluatedAddess(addrpoint, n, subindex) + + if !bytes.Equal(tk1, tk2) { + t.Fatalf("differing key: slot=%x, addr=%x", val, addr) + } + } +} diff --git a/trie/verkle.go b/trie/verkle.go index 760e30c8cdaa..ee953c232318 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -125,7 +125,7 @@ func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error // been recreated after that, then its code keccak will NOT be 0. So return `nil` if // the nonce, and values[10], and code keccak is 0. - if acc.Nonce == 0 && len(values) > 10 && len(values[10]) > 0 && bytes.Equal(values[utils.CodeKeccakLeafKey], zero[:]) { + if acc.Nonce == 0 && len(values) > 10 && len(values[10]) > 0 && bytes.Equal(values[utils.CodeHashLeafKey], zero[:]) { if !t.ended { return nil, errDeletedAccount } else { @@ -144,7 +144,7 @@ func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error // } // } acc.Balance = new(big.Int).SetBytes(balance[:]) - acc.CodeHash = values[utils.CodeKeccakLeafKey] + acc.CodeHash = values[utils.CodeHashLeafKey] // TODO fix the code size as well return acc, nil @@ -164,7 +164,7 @@ func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) values[utils.VersionLeafKey] = zero[:] values[utils.NonceLeafKey] = nonce[:] values[utils.BalanceLeafKey] = balance[:] - values[utils.CodeKeccakLeafKey] = acc.CodeHash[:] + values[utils.CodeHashLeafKey] = acc.CodeHash[:] binary.LittleEndian.PutUint64(nonce[:], acc.Nonce) bbytes := acc.Balance.Bytes() From 6051fd01e6da1839d1c7a09fb53bf6eadcf45127 Mon Sep 17 00:00:00 2001 From: Ignacio Hagopian Date: Wed, 24 Apr 2024 05:08:29 -0300 Subject: [PATCH 94/99] trie/utils: simplify GetTreeKeyWithEvaluatedAddress (#421) Signed-off-by: Ignacio Hagopian --- trie/utils/verkle.go | 12 +++--------- trie/utils/verkle_test.go | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index af1240cb16bc..ad6184baa99a 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -17,8 +17,6 @@ package utils import ( - "encoding/binary" - "github.com/crate-crypto/go-ipa/bandersnatch/fr" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-verkle" @@ -199,13 +197,9 @@ func GetTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.I poly[1].SetZero() poly[2].SetZero() - // little-endian, 32-byte aligned treeIndex - var index [32]byte - for i := 0; i < len(treeIndex); i++ { - binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i]) - } - verkle.FromLEBytes(&poly[3], index[:16]) - verkle.FromLEBytes(&poly[4], index[16:]) + trieIndexBytes := treeIndex.Bytes32() + verkle.FromBytes(&poly[3], trieIndexBytes[16:]) + verkle.FromBytes(&poly[4], trieIndexBytes[:16]) cfg := verkle.GetConfig() ret := cfg.CommitToPoly(poly[:], 0) diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go index a5aec55a0660..2cb9d2ad8690 100644 --- a/trie/utils/verkle_test.go +++ b/trie/utils/verkle_test.go @@ -113,3 +113,20 @@ func TestCompareGetTreeKeyWithEvaluated(t *testing.T) { } } } + +func BenchmarkGetTreeKeyWithEvaluatedAddress(b *testing.B) { + var buf [32]byte + rand.Read(buf[:]) + addrpoint := EvaluateAddressPoint(buf[:]) + + rand.Read(buf[:]) + n := uint256.NewInt(0).SetBytes32(buf[:]) + + _ = verkle.GetConfig() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = GetTreeKeyWithEvaluatedAddess(addrpoint, n, 0) + } +} From f50db1e15a75c10b52c23d28fba44c8ef920a7f5 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:31:25 +0200 Subject: [PATCH 95/99] remove unused code (#423) --- core/vm/evm.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/vm/evm.go b/core/vm/evm.go index 3278856f39a8..e036d2661768 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -195,7 +195,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas p, isPrecompile := evm.precompile(addr) debug := evm.Config.Tracer != nil - var creation bool if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP4762 { // add proof of absence to witness @@ -221,7 +220,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas return nil, gas, nil } evm.StateDB.CreateAccount(addr) - creation = true } evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value) @@ -256,7 +254,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // The depth-check is already done, and precompiles handled above contract := NewContract(caller, AccountRef(addrCopy), value, gas) contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code) - contract.IsDeployment = creation ret, err = evm.interpreter.Run(contract, input, false) gas = contract.Gas } From 1599b8f71c657954f814514ab906a436170870be Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Sat, 4 May 2024 11:45:09 +0200 Subject: [PATCH 96/99] post-genesis transition (#426) Support post-genesis transition Reactivate a post-genesis transition for a kaustinen-like testnet that goes through the conversion. It adds the ability: * To undergo the verkle transition "verge" past the genesis block, which so far was impossible with kaustinen-with-shapella. * To perform a shadow fork of holesky in the special kurtosis setup defined by the devops team. It is also serving as the basis for multiple other branches, to be merged at a later time: * The branch to replay historical blocks in verkle * The branch to execute transition tests Co-authored-by: Ignacio Hagopian --- cmd/geth/config.go | 11 + cmd/geth/main.go | 3 + cmd/utils/flags.go | 17 ++ consensus/beacon/consensus.go | 96 ++++--- core/block_validator.go | 4 + core/blockchain.go | 41 ++- core/chain_makers.go | 11 +- core/genesis.go | 27 +- .../conversion.go} | 208 ++++++++++++++- core/rawdb/accessors_overlay.go | 30 +++ core/rawdb/schema.go | 7 + core/state/database.go | 246 ++++++++++++++---- core/state/statedb.go | 11 +- core/state/trie_prefetcher.go | 2 +- core/state_processor.go | 191 -------------- eth/api_debug.go | 40 +++ eth/backend.go | 6 + eth/catalyst/api.go | 6 +- eth/ethconfig/config.go | 6 + light/trie.go | 33 ++- miner/worker.go | 7 +- params/config.go | 3 +- trie/transition.go | 8 +- 23 files changed, 682 insertions(+), 332 deletions(-) rename core/{overlay_transition.go => overlay/conversion.go} (54%) create mode 100644 core/rawdb/accessors_overlay.go diff --git a/cmd/geth/config.go b/cmd/geth/config.go index bf01c6f91857..4184290e86c6 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -175,6 +175,17 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { v := ctx.Uint64(utils.OverridePrague.Name) cfg.Eth.OverridePrague = &v } + if ctx.IsSet(utils.OverrideProofInBlock.Name) { + v := ctx.Bool(utils.OverrideProofInBlock.Name) + cfg.Eth.OverrideProofInBlock = &v + } + if ctx.IsSet(utils.OverrideOverlayStride.Name) { + v := ctx.Uint64(utils.OverrideOverlayStride.Name) + cfg.Eth.OverrideOverlayStride = &v + } + if ctx.IsSet(utils.ClearVerkleCosts.Name) { + params.ClearVerkleWitnessCosts() + } backend, eth := utils.RegisterEthService(stack, &cfg.Eth) // Configure log filter RPC API. diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 38fb755b4b5a..5cb1580df3d1 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -67,8 +67,11 @@ var ( utils.NoUSBFlag, utils.USBFlag, utils.SmartCardDaemonPathFlag, + utils.OverrideOverlayStride, utils.OverrideCancun, utils.OverridePrague, + utils.OverrideProofInBlock, + utils.ClearVerkleCosts, utils.EnablePersonal, utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index b927d0f94f83..18c7c396e4b5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -263,6 +263,12 @@ var ( Value: 2048, Category: flags.EthCategory, } + OverrideOverlayStride = &cli.Uint64Flag{ + Name: "override.overlay-stride", + Usage: "Manually specify the stride of the overlay transition, overriding the bundled setting", + Value: 10000, + Category: flags.EthCategory, + } OverrideCancun = &cli.Uint64Flag{ Name: "override.cancun", Usage: "Manually specify the Cancun fork timestamp, overriding the bundled setting", @@ -273,6 +279,17 @@ var ( Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } + OverrideProofInBlock = &cli.BoolFlag{ + Name: "override.blockproof", + Usage: "Manually specify the proof-in-block setting", + Value: true, + Category: flags.EthCategory, + } + ClearVerkleCosts = &cli.BoolFlag{ + Name: "clear.verkle.costs", + Usage: "Clear verkle costs (for shadow forks)", + Category: flags.EthCategory, + } // Light server and client settings LightServeFlag = &cli.IntFlag{ Name: "light.serve", diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index ad8894cf4db0..e40c180aa421 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -25,8 +25,10 @@ import ( "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core/overlay" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" @@ -358,6 +360,15 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. // The returned gas is not charged state.Witness().TouchFullAccount(w.Address[:], true) } + + if chain.Config().IsPrague(header.Number, header.Time) { + // uncomment when debugging + // fmt.Println("at block", header.Number, "performing transition?", state.Database().InTransition()) + parent := chain.GetHeaderByHash(header.ParentHash) + if err := overlay.OverlayVerkleTransition(state, parent.Root, chain.Config().OverlayStride); err != nil { + log.Error("error performing the transition", "err", err) + } + } } // FinalizeAndAssemble implements consensus.Engine, setting the final state and @@ -382,51 +393,72 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea // Assign the final state root to header. header.Root = state.IntermediateRoot(true) + // Associate current conversion state to computed state + // root and store it in the database for later recovery. + state.Database().SaveTransitionState(header.Root) var ( p *verkle.VerkleProof k verkle.StateDiff keys = state.Witness().Keys() ) - if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlocks { + if chain.Config().IsPrague(header.Number, header.Time) { // Open the pre-tree to prove the pre-state against parent := chain.GetHeaderByNumber(header.Number.Uint64() - 1) if parent == nil { return nil, fmt.Errorf("nil parent header for block %d", header.Number) } - preTrie, err := state.Database().OpenTrie(parent.Root) - if err != nil { - return nil, fmt.Errorf("error opening pre-state tree root: %w", err) - } + // Load transition state at beginning of block, because + // OpenTrie needs to know what the conversion status is. + state.Database().LoadTransitionState(parent.Root) - var okpre, okpost bool - var vtrpre, vtrpost *trie.VerkleTrie - switch pre := preTrie.(type) { - case *trie.VerkleTrie: - vtrpre, okpre = preTrie.(*trie.VerkleTrie) - vtrpost, okpost = state.GetTrie().(*trie.VerkleTrie) - case *trie.TransitionTrie: - vtrpre = pre.Overlay() - okpre = true - post, _ := state.GetTrie().(*trie.TransitionTrie) - vtrpost = post.Overlay() - okpost = true - default: - // This should only happen for the first block of the - // conversion, when the previous tree is a merkle tree. - // Logically, the "previous" verkle tree is an empty tree. - okpre = true - vtrpre = trie.NewVerkleTrie(verkle.New(), state.Database().TrieDB(), utils.NewPointCache(), false) - post := state.GetTrie().(*trie.TransitionTrie) - vtrpost = post.Overlay() - okpost = true - } - if okpre && okpost { - if len(keys) > 0 { - p, k, err = trie.ProveAndSerialize(vtrpre, vtrpost, keys, vtrpre.FlatdbNodeResolver) - if err != nil { - return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err) + if chain.Config().ProofInBlocks { + preTrie, err := state.Database().OpenTrie(parent.Root) + if err != nil { + return nil, fmt.Errorf("error opening pre-state tree root: %w", err) + } + + var okpre, okpost bool + var vtrpre, vtrpost *trie.VerkleTrie + switch pre := preTrie.(type) { + case *trie.VerkleTrie: + vtrpre, okpre = preTrie.(*trie.VerkleTrie) + switch tr := state.GetTrie().(type) { + case *trie.VerkleTrie: + vtrpost = tr + okpost = true + // This is to handle a situation right at the start of the conversion: + // the post trie is a transition tree when the pre tree is an empty + // verkle tree. + case *trie.TransitionTrie: + vtrpost = tr.Overlay() + okpost = true + default: + okpost = false + } + case *trie.TransitionTrie: + vtrpre = pre.Overlay() + okpre = true + post, _ := state.GetTrie().(*trie.TransitionTrie) + vtrpost = post.Overlay() + okpost = true + default: + // This should only happen for the first block of the + // conversion, when the previous tree is a merkle tree. + // Logically, the "previous" verkle tree is an empty tree. + okpre = true + vtrpre = trie.NewVerkleTrie(verkle.New(), state.Database().TrieDB(), utils.NewPointCache(), false) + post := state.GetTrie().(*trie.TransitionTrie) + vtrpost = post.Overlay() + okpost = true + } + if okpre && okpost { + if len(keys) > 0 { + p, k, err = trie.ProveAndSerialize(vtrpre, vtrpost, keys, vtrpre.FlatdbNodeResolver) + if err != nil { + return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err) + } } } } diff --git a/core/block_validator.go b/core/block_validator.go index b1ceab9d5c6c..337b61ac3396 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -131,6 +131,10 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) } + // Verify that the advertised root is correct before + // it can be used as an identifier for the conversion + // status. + statedb.Database().SaveTransitionState(header.Root) return nil } diff --git a/core/blockchain.go b/core/blockchain.go index 797d31388476..79ac4df147b9 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -251,6 +251,14 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { return nil, genesisErr } + if overrides != nil { + if overrides.OverrideProofInBlock != nil { + chainConfig.ProofInBlocks = *overrides.OverrideProofInBlock + } + if overrides.OverrideOverlayStride != nil { + chainConfig.OverlayStride = *overrides.OverrideOverlayStride + } + } log.Info("") log.Info(strings.Repeat("-", 153)) for _, line := range strings.Split(chainConfig.Description(), "\n") { @@ -312,7 +320,13 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis head := bc.CurrentBlock() // Declare the end of the verkle transition if need be - if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsPrague { + if bc.chainConfig.IsPrague(head.Number, head.Time) { + // TODO this only works when resuming a chain that has already gone + // through the conversion. All pointers should be saved to the DB + // for it to be able to recover if interrupted during the transition + // but that's left out to a later PR since there's not really a need + // right now. + bc.stateCache.InitTransitionStatus(true, true) bc.stateCache.EndVerkleTransition() } @@ -1746,8 +1760,22 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } + if bc.Config().IsPrague(block.Number(), block.Time()) { + bc.stateCache.LoadTransitionState(parent.Root) + + // pragueTime has been reached. If the transition isn't active, it means this + // is the fork block and that the conversion needs to be marked at started. + if !bc.stateCache.InTransition() && !bc.stateCache.Transitioned() { + bc.stateCache.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), bc.Config().PragueTime, parent.Root) + } + } else { + // If the verkle activation time hasn't started, declare it as "not started". + // This is so that if the miner activates the conversion, the insertion happens + // in the correct mode. + bc.stateCache.InitTransitionStatus(false, false) + } if parent.Number.Uint64() == conversionBlock { - bc.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), &parent.Time) + bc.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), &parent.Time, parent.Root) bc.stateCache.SetLastMerkleRoot(parent.Root) } statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) @@ -1989,7 +2017,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) } if parent == nil { - return it.index, errors.New("missing parent") + return it.index, fmt.Errorf("missing parent: hash=%x, number=%d", current.Hash(), current.Number) } // Import all the pruned blocks to make the state available var ( @@ -2050,7 +2078,7 @@ func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) } } if parent == nil { - return common.Hash{}, errors.New("missing parent") + return common.Hash{}, fmt.Errorf("missing parent during ancestor recovery: hash=%x, number=%d", block.ParentHash(), block.Number()) } // Import all the pruned blocks to make the state available for i := len(hashes) - 1; i >= 0; i-- { @@ -2288,6 +2316,7 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) { defer bc.chainmu.Unlock() // Re-execute the reorged chain in case the head state is missing. + log.Trace("looking for state", "root", head.Root(), "has state", bc.HasState(head.Root())) if !bc.HasState(head.Root()) { if latestValidHash, err := bc.recoverAncestors(head); err != nil { return latestValidHash, err @@ -2533,8 +2562,8 @@ func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } -func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64) { - bc.stateCache.StartVerkleTransition(originalRoot, translatedRoot, chainConfig, pragueTime) +func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64, root common.Hash) { + bc.stateCache.StartVerkleTransition(originalRoot, translatedRoot, chainConfig, pragueTime, root) } func (bc *BlockChain) ReorgThroughVerkleTransition() { bc.stateCache.ReorgThroughVerkleTransition() diff --git a/core/chain_makers.go b/core/chain_makers.go index 5b9dc0c6ff08..1c232e6b6d92 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -365,7 +365,7 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, return db, blocks, receipts } -func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) { +func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, diskdb ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) { if config == nil { config = params.TestChainConfig } @@ -434,13 +434,16 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine return nil, nil } var snaps *snapshot.Tree - triedb := state.NewDatabaseWithConfig(db, nil) - triedb.EndVerkleTransition() + db := state.NewDatabaseWithConfig(diskdb, nil) + db.StartVerkleTransition(common.Hash{}, common.Hash{}, config, config.PragueTime, common.Hash{}) + db.EndVerkleTransition() + db.SaveTransitionState(parent.Root()) for i := 0; i < n; i++ { - statedb, err := state.New(parent.Root(), triedb, snaps) + statedb, err := state.New(parent.Root(), db, snaps) if err != nil { panic(fmt.Sprintf("could not find state for block %d: err=%v, parent root=%x", i, err, parent.Root())) } + statedb.NewAccessWitness() block, receipt := genblock(i, parent, statedb) blocks[i] = block receipts[i] = receipt diff --git a/core/genesis.go b/core/genesis.go index c8a4bc5952d9..a2a331d1fe33 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -126,6 +126,7 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c // all the derived states will be discarded to not pollute disk. db := state.NewDatabase(rawdb.NewMemoryDatabase()) if cfg.IsPrague(big.NewInt(int64(0)), timestamp) { + db.StartVerkleTransition(common.Hash{}, common.Hash{}, cfg, ×tamp, common.Hash{}) db.EndVerkleTransition() } statedb, err := state.New(types.EmptyRootHash, db, nil) @@ -146,15 +147,17 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c // flush is very similar with deriveHash, but the main difference is // all the generated states will be persisted into the given database. // Also, the genesis state specification will be flushed as well. -func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash, cfg *params.ChainConfig) error { - statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) - if err != nil { - return err +func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash, cfg *params.ChainConfig, timestamp *uint64) error { + database := state.NewDatabaseWithNodeDB(db, triedb) + // End the verkle conversion at genesis if the fork block is 0 + if timestamp != nil && cfg.IsPrague(big.NewInt(int64(0)), *timestamp) { + database.StartVerkleTransition(common.Hash{}, common.Hash{}, cfg, timestamp, common.Hash{}) + database.EndVerkleTransition() } - // End the verkle conversion at genesis if the fork block is 0 - if triedb.IsVerkle() { - statedb.Database().EndVerkleTransition() + statedb, err := state.New(types.EmptyRootHash, database, nil) + if err != nil { + return err } for addr, account := range *ga { @@ -221,7 +224,7 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash comm return errors.New("not found") } } - return alloc.flush(db, triedb, blockhash, config) + return alloc.flush(db, triedb, blockhash, config, nil) } // GenesisAccount is an account in the state of the genesis block. @@ -288,8 +291,10 @@ func (e *GenesisMismatchError) Error() string { // ChainOverrides contains the changes to chain config. type ChainOverrides struct { - OverrideCancun *uint64 - OverridePrague *uint64 + OverrideCancun *uint64 + OverridePrague *uint64 + OverrideProofInBlock *bool + OverrideOverlayStride *uint64 } // SetupGenesisBlock writes or updates the genesis block in db. @@ -536,7 +541,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // All the checks has passed, flush the states derived from the genesis // specification as well as the specification itself into the provided // database. - if err := g.Alloc.flush(db, triedb, block.Hash(), g.Config); err != nil { + if err := g.Alloc.flush(db, triedb, block.Hash(), g.Config, &g.Timestamp); err != nil { return nil, err } rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) diff --git a/core/overlay_transition.go b/core/overlay/conversion.go similarity index 54% rename from core/overlay_transition.go rename to core/overlay/conversion.go index 35c09d22d938..0e83e2066353 100644 --- a/core/overlay_transition.go +++ b/core/overlay/conversion.go @@ -14,14 +14,17 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package overlay import ( "bufio" "bytes" + "encoding/binary" "fmt" "io" "os" + "runtime" + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -32,15 +35,199 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/ethereum/go-verkle" + "github.com/holiman/uint256" ) +var zeroTreeIndex uint256.Int + +// keyValueMigrator is a helper module that collects key-values from the overlay-tree migration for Verkle Trees. +// It assumes that the walk of the base tree is done in address-order, so it exploit that fact to +// collect the key-values in a way that is efficient. +type keyValueMigrator struct { + // leafData contains the values for the future leaf for a particular VKT branch. + leafData map[branchKey]*migratedKeyValue + + // When prepare() is called, it will start a background routine that will process the leafData + // saving the result in newLeaves to be used by migrateCollectedKeyValues(). The background + // routine signals that it is done by closing processingReady. + processingReady chan struct{} + newLeaves []verkle.LeafNode + prepareErr error +} + +func newKeyValueMigrator() *keyValueMigrator { + // We do initialize the VKT config since prepare() might indirectly make multiple GetConfig() calls + // in different goroutines when we never called GetConfig() before, causing a race considering the way + // that `config` is designed in go-verkle. + // TODO: jsign as a fix for this in the PR where we move to a file-less precomp, since it allows safe + // concurrent calls to GetConfig(). When that gets merged, we can remove this line. + _ = verkle.GetConfig() + return &keyValueMigrator{ + processingReady: make(chan struct{}), + leafData: make(map[branchKey]*migratedKeyValue, 10_000), + } +} + +type migratedKeyValue struct { + branchKey branchKey + leafNodeData verkle.BatchNewLeafNodeData +} +type branchKey struct { + addr common.Address + treeIndex uint256.Int +} + +func newBranchKey(addr []byte, treeIndex *uint256.Int) branchKey { + var sk branchKey + copy(sk.addr[:], addr) + sk.treeIndex = *treeIndex + return sk +} + +func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { + treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(slotNumber) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) + leafNodeData.Values[subIndex] = slotValue +} + +func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) + + var version [verkle.LeafValueSize]byte + leafNodeData.Values[utils.VersionLeafKey] = version[:] + + var balance [verkle.LeafValueSize]byte + for i, b := range acc.Balance.Bytes() { + balance[len(acc.Balance.Bytes())-1-i] = b + } + leafNodeData.Values[utils.BalanceLeafKey] = balance[:] + + var nonce [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) + leafNodeData.Values[utils.NonceLeafKey] = nonce[:] + + leafNodeData.Values[utils.CodeHashLeafKey] = acc.CodeHash[:] +} + +func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) + + // Save the code size. + var codeSizeBytes [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize) + leafNodeData.Values[utils.CodeSizeLeafKey] = codeSizeBytes[:] + + // The first 128 chunks are stored in the account header leaf. + for i := 0; i < 128 && i < len(chunks)/32; i++ { + leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)] + } + + // Potential further chunks, have their own leaf nodes. + for i := 128; i < len(chunks)/32; { + treeIndex, _ := utils.GetTreeKeyCodeChunkIndices(uint256.NewInt(uint64(i))) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) + + j := i + for ; (j-i) < 256 && j < len(chunks)/32; j++ { + leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)] + } + i = j + } +} + +func (kvm *keyValueMigrator) getOrInitLeafNodeData(bk branchKey) *verkle.BatchNewLeafNodeData { + if ld, ok := kvm.leafData[bk]; ok { + return &ld.leafNodeData + } + kvm.leafData[bk] = &migratedKeyValue{ + branchKey: bk, + leafNodeData: verkle.BatchNewLeafNodeData{ + Stem: nil, // It will be calculated in the prepare() phase, since it's CPU heavy. + Values: make(map[byte][]byte, 256), + }, + } + return &kvm.leafData[bk].leafNodeData +} + +func (kvm *keyValueMigrator) prepare() { + // We fire a background routine to process the leafData and save the result in newLeaves. + // The background routine signals that it is done by closing processingReady. + go func() { + // Step 1: We split kvm.leafData in numBatches batches, and we process each batch in a separate goroutine. + // This fills each leafNodeData.Stem with the correct value. + leafData := make([]migratedKeyValue, 0, len(kvm.leafData)) + for _, v := range kvm.leafData { + leafData = append(leafData, *v) + } + var wg sync.WaitGroup + batchNum := runtime.NumCPU() + batchSize := (len(kvm.leafData) + batchNum - 1) / batchNum + for i := 0; i < len(kvm.leafData); i += batchSize { + start := i + end := i + batchSize + if end > len(kvm.leafData) { + end = len(kvm.leafData) + } + wg.Add(1) + + batch := leafData[start:end] + go func() { + defer wg.Done() + var currAddr common.Address + var currPoint *verkle.Point + for i := range batch { + if batch[i].branchKey.addr != currAddr || currAddr == (common.Address{}) { + currAddr = batch[i].branchKey.addr + currPoint = utils.EvaluateAddressPoint(currAddr[:]) + } + stem := utils.GetTreeKeyWithEvaluatedAddess(currPoint, &batch[i].branchKey.treeIndex, 0) + stem = stem[:verkle.StemSize] + batch[i].leafNodeData.Stem = stem + } + }() + } + wg.Wait() + + // Step 2: Now that we have all stems (i.e: tree keys) calculated, we can create the new leaves. + nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData)) + for i := range leafData { + nodeValues[i] = leafData[i].leafNodeData + } + + // Create all leaves in batch mode so we can optimize cryptography operations. + kvm.newLeaves, kvm.prepareErr = verkle.BatchNewLeafNode(nodeValues) + close(kvm.processingReady) + }() +} + +func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error { + now := time.Now() + <-kvm.processingReady + if kvm.prepareErr != nil { + return fmt.Errorf("failed to prepare key values: %w", kvm.prepareErr) + } + log.Info("Prepared key values from base tree", "duration", time.Since(now)) + + // Insert into the tree. + if err := tree.InsertMigratedLeaves(kvm.newLeaves); err != nil { + return fmt.Errorf("failed to insert migrated leaves: %w", err) + } + + return nil +} + // OverlayVerkleTransition contains the overlay conversion logic -func OverlayVerkleTransition(statedb *state.StateDB) error { +func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash, maxMovedCount uint64) error { migrdb := statedb.Database() + migrdb.LockCurrentTransitionState() + defer migrdb.UnLockCurrentTransitionState() // verkle transition: if the conversion process is in progress, move // N values from the MPT into the verkle tree. if migrdb.InTransition() { + log.Debug("Processing verkle conversion starting", "account hash", migrdb.GetCurrentAccountHash(), "slot hash", migrdb.GetCurrentSlotHash(), "state root", root) var ( now = time.Now() tt = statedb.GetTrie().(*trie.TransitionTrie) @@ -92,14 +279,13 @@ func OverlayVerkleTransition(statedb *state.StateDB) error { preimageSeek += int64(len(addr)) } - const maxMovedCount = 10000 // mkv will be assiting in the collection of up to maxMovedCount key values to be migrated to the VKT. // It has internal caches to do efficient MPT->VKT key calculations, which will be discarded after // this function. mkv := newKeyValueMigrator() // move maxCount accounts into the verkle tree, starting with the // slots from the previous account. - count := 0 + count := uint64(0) // if less than maxCount slots were moved, move to the next account for count < maxMovedCount { @@ -124,7 +310,12 @@ func OverlayVerkleTransition(statedb *state.StateDB) error { if err != nil { return err } - stIt.Next() + processed := stIt.Next() + if processed { + log.Debug("account has storage and a next item") + } else { + log.Debug("account has storage and NO next item") + } // fdb.StorageProcessed will be initialized to `true` if the // entire storage for an account was not entirely processed @@ -133,6 +324,7 @@ func OverlayVerkleTransition(statedb *state.StateDB) error { // If the entire storage was processed, then the iterator was // created in vain, but it's ok as this will not happen often. for ; !migrdb.GetStorageProcessed() && count < maxMovedCount; count++ { + log.Trace("Processing storage", "count", count, "slot", stIt.Slot(), "storage processed", migrdb.GetStorageProcessed(), "current account", migrdb.GetCurrentAccountAddress(), "current account hash", migrdb.GetCurrentAccountHash()) var ( value []byte // slot value after RLP decoding safeValue [32]byte // 32-byte aligned value @@ -155,6 +347,7 @@ func OverlayVerkleTransition(statedb *state.StateDB) error { return fmt.Errorf("slotnr len is zero is not 32: %d", len(slotnr)) } } + log.Trace("found slot number", "number", slotnr) if crypto.Keccak256Hash(slotnr[:]) != stIt.Hash() { return fmt.Errorf("preimage file does not match storage hash: %s!=%s", crypto.Keccak256Hash(slotnr), stIt.Hash()) } @@ -196,6 +389,7 @@ func OverlayVerkleTransition(statedb *state.StateDB) error { // Move to the next account, if available - or end // the transition otherwise. if accIt.Next() { + log.Trace("Found another account to convert", "hash", accIt.Hash()) var addr common.Address if hasPreimagesBin { if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { @@ -207,10 +401,10 @@ func OverlayVerkleTransition(statedb *state.StateDB) error { return fmt.Errorf("account address len is zero is not 20: %d", len(addr)) } } - // fmt.Printf("account switch: %s != %s\n", crypto.Keccak256Hash(addr[:]), accIt.Hash()) if crypto.Keccak256Hash(addr[:]) != accIt.Hash() { return fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) } + log.Trace("Converting account address", "hash", accIt.Hash(), "addr", addr) preimageSeek += int64(len(addr)) migrdb.SetCurrentAccountAddress(addr) } else { @@ -223,7 +417,7 @@ func OverlayVerkleTransition(statedb *state.StateDB) error { } migrdb.SetCurrentPreimageOffset(preimageSeek) - log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash()) + log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account hash", statedb.Database().GetCurrentAccountHash(), "last account address", statedb.Database().GetCurrentAccountAddress(), "storage processed", statedb.Database().GetStorageProcessed(), "last storage", statedb.Database().GetCurrentSlotHash()) // Take all the collected key-values and prepare the new leaf values. // This fires a background routine that will start doing the work that diff --git a/core/rawdb/accessors_overlay.go b/core/rawdb/accessors_overlay.go new file mode 100644 index 000000000000..5a371b9d307f --- /dev/null +++ b/core/rawdb/accessors_overlay.go @@ -0,0 +1,30 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" +) + +func ReadVerkleTransitionState(db ethdb.KeyValueReader, hash common.Hash) ([]byte, error) { + return db.Get(transitionStateKey(hash)) +} + +func WriteVerkleTransitionState(db ethdb.KeyValueWriter, hash common.Hash, state []byte) error { + return db.Put(transitionStateKey(hash), state) +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 940ce01549cd..029c09aec370 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -129,6 +129,8 @@ var ( CliqueSnapshotPrefix = []byte("clique-") + VerkleTransitionStatePrefix = []byte("verkle-transition-state-") + preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) ) @@ -262,6 +264,11 @@ func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte { return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...) } +// transitionStateKey = transitionStatusKey + hash +func transitionStateKey(hash common.Hash) []byte { + return append(VerkleTransitionStatePrefix, hash.Bytes()...) +} + // IsLegacyTrieNode reports whether a provided database entry is a legacy trie // node. The characteristics of legacy trie node are: // - the key length is 32 bytes diff --git a/core/state/database.go b/core/state/database.go index 5707e2c88b60..826c03cd9f05 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -17,8 +17,11 @@ package state import ( + "bytes" + "encoding/gob" "errors" "fmt" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" @@ -26,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" @@ -64,7 +68,7 @@ type Database interface { // TrieDB retrieves the low level trie database used for data storage. TrieDB() *trie.Database - StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64) + StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64, root common.Hash) ReorgThroughVerkleTransition() @@ -74,7 +78,9 @@ type Database interface { Transitioned() bool - SetCurrentSlotHash(hash common.Hash) + InitTransitionStatus(bool, bool) + + SetCurrentSlotHash(common.Hash) GetCurrentAccountAddress() *common.Address @@ -94,7 +100,15 @@ type Database interface { AddRootTranslation(originalRoot, translatedRoot common.Hash) - SetLastMerkleRoot(root common.Hash) + SetLastMerkleRoot(common.Hash) + + SaveTransitionState(common.Hash) + + LoadTransitionState(common.Hash) + + LockCurrentTransitionState() + + UnLockCurrentTransitionState() } // Trie is a Ethereum Merkle Patricia trie. @@ -182,36 +196,37 @@ func NewDatabase(db ethdb.Database) Database { // large memory cache. func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { return &cachingDB{ - disk: db, - codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: trie.NewDatabaseWithConfig(db, config), - addrToPoint: utils.NewPointCache(), + disk: db, + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), + triedb: trie.NewDatabaseWithConfig(db, config), + addrToPoint: utils.NewPointCache(), + TransitionStatePerRoot: lru.NewBasicLRU[common.Hash, *TransitionState](100), } } // NewDatabaseWithNodeDB creates a state database with an already initialized node database. func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { return &cachingDB{ - disk: db, - codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: triedb, - addrToPoint: utils.NewPointCache(), - ended: triedb.IsVerkle(), + disk: db, + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), + triedb: triedb, + addrToPoint: utils.NewPointCache(), + TransitionStatePerRoot: lru.NewBasicLRU[common.Hash, *TransitionState](100), } } func (db *cachingDB) InTransition() bool { - return db.started && !db.ended + return db.CurrentTransitionState != nil && db.CurrentTransitionState.Started && !db.CurrentTransitionState.Ended } func (db *cachingDB) Transitioned() bool { - return db.ended + return db.CurrentTransitionState != nil && db.CurrentTransitionState.Ended } // Fork implements the fork -func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64) { +func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64, root common.Hash) { fmt.Println(` __________.__ .__ .__ __ .__ .__ ____ \__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ __ _ _|__| ____ / ___\ ______ @@ -219,24 +234,35 @@ func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.H | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ \ /| | | \\___ /\___ \ |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ \/\_/ |__|___| /_____//_____/ |__|`) - db.started = true - db.ended = false + db.CurrentTransitionState = &TransitionState{ + Started: true, + // initialize so that the first storage-less accounts are processed + StorageProcessed: true, + } // db.AddTranslation(originalRoot, translatedRoot) db.baseRoot = originalRoot - // initialize so that the first storage-less accounts are processed - db.StorageProcessed = true + + // Reinitialize values in case of a reorg if pragueTime != nil { chainConfig.PragueTime = pragueTime } } func (db *cachingDB) ReorgThroughVerkleTransition() { - db.ended, db.started = false, false + log.Warn("trying to reorg through the transition, which makes no sense at this point") +} + +func (db *cachingDB) InitTransitionStatus(started, ended bool) { + db.CurrentTransitionState = &TransitionState{ + Ended: ended, + Started: started, + // TODO add other fields when we handle mid-transition interrupts + } } func (db *cachingDB) EndVerkleTransition() { - if !db.started { - db.started = true + if !db.CurrentTransitionState.Started { + db.CurrentTransitionState.Started = true } fmt.Println(` @@ -246,7 +272,36 @@ func (db *cachingDB) EndVerkleTransition() { | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ | |__/ __ \| | / /_/ \ ___// /_/ | |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ |____(____ |___| \____ |\___ \____ | |__|`) - db.ended = true + db.CurrentTransitionState.Ended = true +} + +type TransitionState struct { + CurrentAccountAddress *common.Address // addresss of the last translated account + CurrentSlotHash common.Hash // hash of the last translated storage slot + CurrentPreimageOffset int64 // next byte to read from the preimage file + Started, Ended bool + + // Mark whether the storage for an account has been processed. This is useful if the + // maximum number of leaves of the conversion is reached before the whole storage is + // processed. + StorageProcessed bool +} + +func (ts *TransitionState) Copy() *TransitionState { + ret := &TransitionState{ + Started: ts.Started, + Ended: ts.Ended, + CurrentSlotHash: ts.CurrentSlotHash, + CurrentPreimageOffset: ts.CurrentPreimageOffset, + StorageProcessed: ts.StorageProcessed, + } + + if ts.CurrentAccountAddress != nil { + ret.CurrentAccountAddress = &common.Address{} + copy(ret.CurrentAccountAddress[:], ts.CurrentAccountAddress[:]) + } + + return ret } type cachingDB struct { @@ -255,22 +310,16 @@ type cachingDB struct { codeCache *lru.SizeConstrainedCache[common.Hash, []byte] triedb *trie.Database - // Verkle specific fields + // Transition-specific fields // TODO ensure that this info is in the DB - started, ended bool - LastMerkleRoot common.Hash // root hash of the read-only base tree + LastMerkleRoot common.Hash // root hash of the read-only base tree + CurrentTransitionState *TransitionState + TransitionStatePerRoot lru.BasicLRU[common.Hash, *TransitionState] + transitionStateLock sync.Mutex addrToPoint *utils.PointCache - baseRoot common.Hash // hash of the read-only base tree - CurrentAccountAddress *common.Address // addresss of the last translated account - CurrentSlotHash common.Hash // hash of the last translated storage slot - CurrentPreimageOffset int64 // next byte to read from the preimage file - - // Mark whether the storage for an account has been processed. This is useful if the - // maximum number of leaves of the conversion is reached before the whole storage is - // processed. - StorageProcessed bool + baseRoot common.Hash // hash of the read-only base tree } func (db *cachingDB) openMPTTrie(root common.Hash) (Trie, error) { @@ -284,14 +333,14 @@ func (db *cachingDB) openMPTTrie(root common.Hash) (Trie, error) { func (db *cachingDB) openVKTrie(root common.Hash) (Trie, error) { payload, err := db.DiskDB().Get(trie.FlatDBVerkleNodeKeyPrefix) if err != nil { - return trie.NewVerkleTrie(verkle.New(), db.triedb, db.addrToPoint, db.ended), nil + return trie.NewVerkleTrie(verkle.New(), db.triedb, db.addrToPoint, db.CurrentTransitionState.Ended), nil } r, err := verkle.ParseNode(payload, 0) if err != nil { panic(err) } - return trie.NewVerkleTrie(r, db.triedb, db.addrToPoint, db.ended), err + return trie.NewVerkleTrie(r, db.triedb, db.addrToPoint, db.CurrentTransitionState.Ended), err } // OpenTrie opens the main account trie at a specific root hash. @@ -300,19 +349,22 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { mpt Trie err error ) + fmt.Printf("opening trie with root %x, %v %v\n", root, db.InTransition(), db.Transitioned()) // TODO separate both cases when I can be certain that it won't // find a Verkle trie where is expects a Transitoion trie. - if db.started || db.ended { + if db.InTransition() || db.Transitioned() { // NOTE this is a kaustinen-only change, it will break replay vkt, err := db.openVKTrie(root) if err != nil { + log.Error("failed to open the vkt", "err", err) return nil, err } // If the verkle conversion has ended, return a single // verkle trie. - if db.ended { + if db.CurrentTransitionState.Ended { + log.Debug("transition ended, returning a simple verkle tree") return vkt, nil } @@ -320,6 +372,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { // trie and an overlay, verkle trie. mpt, err = db.openMPTTrie(db.baseRoot) if err != nil { + log.Error("failed to open the mpt", "err", err, "root", db.baseRoot) return nil, err } @@ -345,7 +398,7 @@ func (db *cachingDB) openStorageMPTrie(stateRoot common.Hash, address common.Add // OpenStorageTrie opens the storage trie of an account func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) { // TODO this should only return a verkle tree - if db.ended { + if db.Transitioned() { mpt, err := db.openStorageMPTrie(types.EmptyRootHash, address, common.Hash{}, self) if err != nil { return nil, err @@ -361,7 +414,8 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre panic("unexpected trie type") } } - if db.started { + if db.InTransition() { + fmt.Printf("OpenStorageTrie during transition, state root=%x root=%x\n", stateRoot, root) mpt, err := db.openStorageMPTrie(db.LastMerkleRoot, address, root, nil) if err != nil { return nil, err @@ -374,7 +428,7 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre case *trie.TransitionTrie: return trie.NewTransitionTree(mpt.(*trie.SecureTrie), self.Overlay(), true), nil default: - panic("unexpected trie type") + return nil, errors.New("expected a verkle account tree, and found another type") } } mpt, err := db.openStorageMPTrie(stateRoot, address, root, nil) @@ -451,48 +505,128 @@ func (db *cachingDB) GetTreeKeyHeader(addr []byte) *verkle.Point { } func (db *cachingDB) SetCurrentAccountAddress(addr common.Address) { - db.CurrentAccountAddress = &addr + db.CurrentTransitionState.CurrentAccountAddress = &addr } func (db *cachingDB) GetCurrentAccountHash() common.Hash { var addrHash common.Hash - if db.CurrentAccountAddress != nil { - addrHash = crypto.Keccak256Hash(db.CurrentAccountAddress[:]) + if db.CurrentTransitionState.CurrentAccountAddress != nil { + addrHash = crypto.Keccak256Hash(db.CurrentTransitionState.CurrentAccountAddress[:]) } return addrHash } func (db *cachingDB) GetCurrentAccountAddress() *common.Address { - return db.CurrentAccountAddress + return db.CurrentTransitionState.CurrentAccountAddress } func (db *cachingDB) GetCurrentPreimageOffset() int64 { - return db.CurrentPreimageOffset + return db.CurrentTransitionState.CurrentPreimageOffset } func (db *cachingDB) SetCurrentPreimageOffset(offset int64) { - db.CurrentPreimageOffset = offset + db.CurrentTransitionState.CurrentPreimageOffset = offset } func (db *cachingDB) SetCurrentSlotHash(hash common.Hash) { - db.CurrentSlotHash = hash + db.CurrentTransitionState.CurrentSlotHash = hash } func (db *cachingDB) GetCurrentSlotHash() common.Hash { - return db.CurrentSlotHash + return db.CurrentTransitionState.CurrentSlotHash } func (db *cachingDB) SetStorageProcessed(processed bool) { - db.StorageProcessed = processed + db.CurrentTransitionState.StorageProcessed = processed } func (db *cachingDB) GetStorageProcessed() bool { - return db.StorageProcessed + return db.CurrentTransitionState.StorageProcessed } func (db *cachingDB) AddRootTranslation(originalRoot, translatedRoot common.Hash) { } -func (db *cachingDB) SetLastMerkleRoot(root common.Hash) { - db.LastMerkleRoot = root +func (db *cachingDB) SetLastMerkleRoot(merkleRoot common.Hash) { + db.LastMerkleRoot = merkleRoot +} + +func (db *cachingDB) SaveTransitionState(root common.Hash) { + db.transitionStateLock.Lock() + defer db.transitionStateLock.Unlock() + if db.CurrentTransitionState != nil { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(db.CurrentTransitionState) + if err != nil { + log.Error("failed to encode transition state", "err", err) + return + } + + if !db.TransitionStatePerRoot.Contains(root) { + // Copy so that the address pointer isn't updated after + // it has been saved. + db.TransitionStatePerRoot.Add(root, db.CurrentTransitionState.Copy()) + + rawdb.WriteVerkleTransitionState(db.DiskDB(), root, buf.Bytes()) + } + + log.Debug("saving transition state", "storage processed", db.CurrentTransitionState.StorageProcessed, "addr", db.CurrentTransitionState.CurrentAccountAddress, "slot hash", db.CurrentTransitionState.CurrentSlotHash, "root", root, "ended", db.CurrentTransitionState.Ended, "started", db.CurrentTransitionState.Started) + } +} + +func (db *cachingDB) LoadTransitionState(root common.Hash) { + db.transitionStateLock.Lock() + defer db.transitionStateLock.Unlock() + // Try to get the transition state from the cache and + // the DB if it's not there. + ts, ok := db.TransitionStatePerRoot.Get(root) + if !ok { + // Not in the cache, try getting it from the DB + data, err := rawdb.ReadVerkleTransitionState(db.DiskDB(), root) + if err != nil { + log.Error("failed to read transition state", "err", err) + return + } + + // if a state could be read from the db, attempt to decode it + if len(data) > 0 { + var ( + newts TransitionState + buf = bytes.NewBuffer(data[:]) + dec = gob.NewDecoder(buf) + ) + // Decode transition state + err = dec.Decode(&newts) + if err != nil { + log.Error("failed to decode transition state", "err", err) + return + } + ts = &newts + } + + // Fallback that should only happen before the transition + if ts == nil { + // Initialize the first transition state, with the "ended" + // field set to true if the database was created + // as a verkle database. + log.Debug("no transition state found, starting fresh", "is verkle", db.triedb.IsVerkle()) + // Start with a fresh state + ts = &TransitionState{Ended: db.triedb.IsVerkle()} + } + } + + // Copy so that the CurrentAddress pointer in the map + // doesn't get overwritten. + db.CurrentTransitionState = ts.Copy() + + log.Debug("loaded transition state", "storage processed", db.CurrentTransitionState.StorageProcessed, "addr", db.CurrentTransitionState.CurrentAccountAddress, "slot hash", db.CurrentTransitionState.CurrentSlotHash, "root", root, "ended", db.CurrentTransitionState.Ended, "started", db.CurrentTransitionState.Started) +} + +func (db *cachingDB) LockCurrentTransitionState() { + db.transitionStateLock.Lock() +} + +func (db *cachingDB) UnLockCurrentTransitionState() { + db.transitionStateLock.Unlock() } diff --git a/core/state/statedb.go b/core/state/statedb.go index ab1065eb4cf5..48bcca154343 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -176,10 +176,11 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) if tr.IsVerkle() { sdb.witness = sdb.NewAccessWitness() } - // if sdb.snaps != nil { - // if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil { - // } - // } + if sdb.snaps != nil { + // if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil { + // } + sdb.snap = sdb.snaps.Snapshot(root) + } return sdb, nil } @@ -1317,7 +1318,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er // - head layer is paired with HEAD state // - head-1 layer is paired with HEAD-1 state // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - if err := s.snaps.Cap(root, 128); err != nil { + if err := s.snaps.Cap(root, 8192); err != nil { log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err) } } diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 6c5c158cc239..4521736c7beb 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -302,7 +302,7 @@ func (sf *subfetcher) loop() { } sf.trie = trie } else { - trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil /* safe to set to nil for now, as there is no prefetcher for verkle */) + trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, sf.trie) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) return diff --git a/core/state_processor.go b/core/state_processor.go index fbc6beda4a08..d6a01673c6ab 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -21,9 +21,6 @@ import ( "errors" "fmt" "math/big" - "runtime" - "sync" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" @@ -34,10 +31,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" - tutils "github.com/ethereum/go-ethereum/trie/utils" - "github.com/ethereum/go-verkle" - "github.com/holiman/uint256" ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -113,11 +106,6 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg return nil, nil, 0, errors.New("withdrawals before shanghai") } - // Perform the overlay transition, if relevant - if err := OverlayVerkleTransition(statedb); err != nil { - return nil, nil, 0, fmt.Errorf("error performing verkle overlay transition: %w", err) - } - // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) @@ -192,185 +180,6 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } -var zeroTreeIndex uint256.Int - -// keyValueMigrator is a helper module that collects key-values from the overlay-tree migration for Verkle Trees. -// It assumes that the walk of the base tree is done in address-order, so it exploit that fact to -// collect the key-values in a way that is efficient. -type keyValueMigrator struct { - // leafData contains the values for the future leaf for a particular VKT branch. - leafData []migratedKeyValue - - // When prepare() is called, it will start a background routine that will process the leafData - // saving the result in newLeaves to be used by migrateCollectedKeyValues(). The background - // routine signals that it is done by closing processingReady. - processingReady chan struct{} - newLeaves []verkle.LeafNode - prepareErr error -} - -func newKeyValueMigrator() *keyValueMigrator { - // We do initialize the VKT config since prepare() might indirectly make multiple GetConfig() calls - // in different goroutines when we never called GetConfig() before, causing a race considering the way - // that `config` is designed in go-verkle. - // TODO: jsign as a fix for this in the PR where we move to a file-less precomp, since it allows safe - // concurrent calls to GetConfig(). When that gets merged, we can remove this line. - _ = verkle.GetConfig() - return &keyValueMigrator{ - processingReady: make(chan struct{}), - leafData: make([]migratedKeyValue, 0, 10_000), - } -} - -type migratedKeyValue struct { - branchKey branchKey - leafNodeData verkle.BatchNewLeafNodeData -} -type branchKey struct { - addr common.Address - treeIndex uint256.Int -} - -func newBranchKey(addr []byte, treeIndex *uint256.Int) branchKey { - var sk branchKey - copy(sk.addr[:], addr) - sk.treeIndex = *treeIndex - return sk -} - -func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { - treeIndex, subIndex := tutils.GetTreeKeyStorageSlotTreeIndexes(slotNumber) - leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) - leafNodeData.Values[subIndex] = slotValue -} - -func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { - leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) - - var version [verkle.LeafValueSize]byte - leafNodeData.Values[tutils.VersionLeafKey] = version[:] - - var balance [verkle.LeafValueSize]byte - for i, b := range acc.Balance.Bytes() { - balance[len(acc.Balance.Bytes())-1-i] = b - } - leafNodeData.Values[tutils.BalanceLeafKey] = balance[:] - - var nonce [verkle.LeafValueSize]byte - binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) - leafNodeData.Values[tutils.NonceLeafKey] = nonce[:] - - leafNodeData.Values[tutils.CodeHashLeafKey] = acc.CodeHash[:] -} - -func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { - leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) - - // Save the code size. - var codeSizeBytes [verkle.LeafValueSize]byte - binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize) - leafNodeData.Values[tutils.CodeSizeLeafKey] = codeSizeBytes[:] - - // The first 128 chunks are stored in the account header leaf. - for i := 0; i < 128 && i < len(chunks)/32; i++ { - leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)] - } - - // Potential further chunks, have their own leaf nodes. - for i := 128; i < len(chunks)/32; { - treeIndex, _ := tutils.GetTreeKeyCodeChunkIndices(uint256.NewInt(uint64(i))) - leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) - - j := i - for ; (j-i) < 256 && j < len(chunks)/32; j++ { - leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)] - } - i = j - } -} - -func (kvm *keyValueMigrator) getOrInitLeafNodeData(bk branchKey) *verkle.BatchNewLeafNodeData { - // Remember that keyValueMigration receives actions ordered by (address, subtreeIndex). - // This means that we can assume that the last element of leafData is the one that we - // are looking for, or that we need to create a new one. - if len(kvm.leafData) == 0 || kvm.leafData[len(kvm.leafData)-1].branchKey != bk { - kvm.leafData = append(kvm.leafData, migratedKeyValue{ - branchKey: bk, - leafNodeData: verkle.BatchNewLeafNodeData{ - Stem: nil, // It will be calculated in the prepare() phase, since it's CPU heavy. - Values: make(map[byte][]byte), - }, - }) - } - return &kvm.leafData[len(kvm.leafData)-1].leafNodeData -} - -func (kvm *keyValueMigrator) prepare() { - // We fire a background routine to process the leafData and save the result in newLeaves. - // The background routine signals that it is done by closing processingReady. - go func() { - // Step 1: We split kvm.leafData in numBatches batches, and we process each batch in a separate goroutine. - // This fills each leafNodeData.Stem with the correct value. - var wg sync.WaitGroup - batchNum := runtime.NumCPU() - batchSize := (len(kvm.leafData) + batchNum - 1) / batchNum - for i := 0; i < len(kvm.leafData); i += batchSize { - start := i - end := i + batchSize - if end > len(kvm.leafData) { - end = len(kvm.leafData) - } - wg.Add(1) - - batch := kvm.leafData[start:end] - go func() { - defer wg.Done() - var currAddr common.Address - var currPoint *verkle.Point - for i := range batch { - if batch[i].branchKey.addr != currAddr { - currAddr = batch[i].branchKey.addr - currPoint = tutils.EvaluateAddressPoint(currAddr[:]) - } - stem := tutils.GetTreeKeyWithEvaluatedAddess(currPoint, &batch[i].branchKey.treeIndex, 0) - stem = stem[:verkle.StemSize] - batch[i].leafNodeData.Stem = stem - } - }() - } - wg.Wait() - - // Step 2: Now that we have all stems (i.e: tree keys) calculated, we can create the new leaves. - nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData)) - for i := range kvm.leafData { - nodeValues[i] = kvm.leafData[i].leafNodeData - } - - // Create all leaves in batch mode so we can optimize cryptography operations. - kvm.newLeaves, kvm.prepareErr = verkle.BatchNewLeafNode(nodeValues) - close(kvm.processingReady) - }() -} - -func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error { - now := time.Now() - <-kvm.processingReady - if kvm.prepareErr != nil { - return fmt.Errorf("failed to prepare key values: %w", kvm.prepareErr) - } - log.Info("Prepared key values from base tree", "duration", time.Since(now)) - - // Insert into the tree. - if err := tree.InsertMigratedLeaves(kvm.newLeaves); err != nil { - return fmt.Errorf("failed to insert migrated leaves: %w", err) - } - - return nil -} - -// InsertBlockHashHistoryAtEip2935Fork handles the insertion of all previous 256 -// blocks on the eip2935 activation block. It also adds the account header of the -// history contract to the witness. func InsertBlockHashHistoryAtEip2935Fork(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash, chain consensus.ChainHeaderReader) { // Make sure that the historical contract is added to the witness statedb.Witness().TouchFullAccount(params.HistoryStorageAddress[:], true) diff --git a/eth/api_debug.go b/eth/api_debug.go index 9cfa9103fb58..3e0daac1b5b0 100644 --- a/eth/api_debug.go +++ b/eth/api_debug.go @@ -17,7 +17,9 @@ package eth import ( + "bytes" "context" + "encoding/gob" "errors" "fmt" "time" @@ -432,3 +434,41 @@ func (api *DebugAPI) SetTrieFlushInterval(interval string) error { func (api *DebugAPI) GetTrieFlushInterval() string { return api.eth.blockchain.GetTrieFlushInterval().String() } + +type ConversionStatusResult struct { + Started bool `json:"started"` + Ended bool `json:"ended"` +} + +func (api *DebugAPI) ConversionStatus(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*ConversionStatusResult, error) { + block, err := api.eth.APIBackend.BlockByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return nil, err + } + data, err := rawdb.ReadVerkleTransitionState(api.eth.ChainDb(), block.Root()) + if err != nil { + if err.Error() == "pebble: not found" { + return &ConversionStatusResult{}, nil + } + return nil, err + } + log.Info("found entry", "data", data) + if len(data) == 0 { + log.Info("found no data") + // started and ended will be false as no conversion has started + return &ConversionStatusResult{}, nil + } + + var ( + ts state.TransitionState + buf = bytes.NewBuffer(data[:]) + dec = gob.NewDecoder(buf) + ) + // Decode transition state + err = dec.Decode(&ts) + if err != nil { + return nil, fmt.Errorf("failed to decode transition state, err=%v", err) + } + + return &ConversionStatusResult{Started: ts.Started, Ended: ts.Ended}, nil +} diff --git a/eth/backend.go b/eth/backend.go index a6c80159077d..c47bc6b5bb35 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -201,6 +201,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.OverridePrague != nil { overrides.OverridePrague = config.OverridePrague } + if config.OverrideProofInBlock != nil { + overrides.OverrideProofInBlock = config.OverrideProofInBlock + } + if config.OverrideOverlayStride != nil { + overrides.OverrideOverlayStride = config.OverrideOverlayStride + } eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit) if err != nil { return nil, err diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 63079415fc14..925494a74d5f 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -532,13 +532,9 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe if api.eth.BlockChain().Config().IsPrague(block.Number(), block.Time()) && !api.eth.BlockChain().Config().IsPrague(parent.Number(), parent.Time()) { parent := api.eth.BlockChain().GetHeaderByNumber(block.NumberU64() - 1) if !api.eth.BlockChain().Config().IsPrague(parent.Number, parent.Time) { - api.eth.BlockChain().StartVerkleTransition(parent.Root, common.Hash{}, api.eth.BlockChain().Config(), nil) + api.eth.BlockChain().StartVerkleTransition(parent.Root, common.Hash{}, api.eth.BlockChain().Config(), nil, parent.Root) } } - // Reset db merge state in case of a reorg - if !api.eth.BlockChain().Config().IsPrague(block.Number(), block.Time()) { - api.eth.BlockChain().ReorgThroughVerkleTransition() - } // Another cornercase: if the node is in snap sync mode, but the CL client // tries to make it import a block. That should be denied as pushing something // into the database directly will conflict with the assumptions of snap sync diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 4606b60408dd..fc9550147bcc 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -158,6 +158,12 @@ type Config struct { // OverrideVerkle (TODO: remove after the fork) OverridePrague *uint64 `toml:",omitempty"` + + // OverrideProofInBlock + OverrideProofInBlock *bool `toml:",omitempty"` + + // OverrideOverlayStride + OverrideOverlayStride *uint64 `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/light/trie.go b/light/trie.go index 53d54615d909..7e7c03bc16c1 100644 --- a/light/trie.go +++ b/light/trie.go @@ -101,7 +101,7 @@ func (db *odrDatabase) DiskDB() ethdb.KeyValueStore { panic("not implemented") } -func (db *odrDatabase) StartVerkleTransition(originalRoot common.Hash, translatedRoot common.Hash, chainConfig *params.ChainConfig, _ *uint64) { +func (db *odrDatabase) StartVerkleTransition(originalRoot common.Hash, translatedRoot common.Hash, chainConfig *params.ChainConfig, _ *uint64, _ common.Hash) { panic("not implemented") // TODO: Implement } @@ -121,7 +121,11 @@ func (db *odrDatabase) Transitioned() bool { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetCurrentSlotHash(hash common.Hash) { +func (db *odrDatabase) InitTransitionStatus(bool, bool) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetCurrentSlotHash(common.Hash) { panic("not implemented") // TODO: Implement } @@ -129,7 +133,7 @@ func (db *odrDatabase) GetCurrentAccountAddress() *common.Address { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetCurrentAccountAddress(_ common.Address) { +func (db *odrDatabase) SetCurrentAccountAddress(common.Address) { panic("not implemented") // TODO: Implement } @@ -141,7 +145,7 @@ func (db *odrDatabase) GetCurrentSlotHash() common.Hash { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetStorageProcessed(_ bool) { +func (db *odrDatabase) SetStorageProcessed(bool) { panic("not implemented") // TODO: Implement } @@ -153,15 +157,30 @@ func (db *odrDatabase) GetCurrentPreimageOffset() int64 { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetCurrentPreimageOffset(_ int64) { +func (db *odrDatabase) SetCurrentPreimageOffset(int64) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) AddRootTranslation(common.Hash, common.Hash) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetLastMerkleRoot(common.Hash) { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) AddRootTranslation(originalRoot common.Hash, translatedRoot common.Hash) { +func (db *odrDatabase) SaveTransitionState(common.Hash) { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetLastMerkleRoot(root common.Hash) { +func (db *odrDatabase) LoadTransitionState(common.Hash) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) LockCurrentTransitionState() { + panic("not implemented") // TODO: Implement +} +func (db *odrDatabase) UnLockCurrentTransitionState() { panic("not implemented") // TODO: Implement } diff --git a/miner/worker.go b/miner/worker.go index aae4fe8b6454..3fb4a3fa43e5 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -852,7 +852,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { if genParams.parentHash != (common.Hash{}) { block := w.chain.GetBlockByHash(genParams.parentHash) if block == nil { - return nil, fmt.Errorf("missing parent") + return nil, fmt.Errorf("missing parent: %x", genParams.parentHash) } parent = block.Header() } @@ -894,7 +894,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { if w.chain.Config().IsPrague(header.Number, header.Time) { parent := w.chain.GetHeaderByNumber(header.Number.Uint64() - 1) if !w.chain.Config().IsPrague(parent.Number, parent.Time) { - w.chain.StartVerkleTransition(parent.Root, common.Hash{}, w.chain.Config(), nil) + w.chain.StartVerkleTransition(parent.Root, common.Hash{}, w.chain.Config(), w.chain.Config().PragueTime, parent.Root) } } @@ -904,9 +904,6 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { if err != nil { return nil, err } - if w.chain.Config().IsPrague(header.Number, header.Time) { - core.OverlayVerkleTransition(state) - } // Run the consensus preparation with the default or customized consensus engine. if err := w.engine.Prepare(w.chain, header); err != nil { log.Error("Failed to prepare header for sealing", "err", err) diff --git a/params/config.go b/params/config.go index 94dcb57b2fe2..5b55c5197700 100644 --- a/params/config.go +++ b/params/config.go @@ -301,7 +301,8 @@ type ChainConfig struct { IsDevMode bool `json:"isDev,omitempty"` // Proof in block - ProofInBlocks bool `json:"proofInBlocks,omitempty"` + ProofInBlocks bool `json:"proofInBlocks,omitempty"` + OverlayStride uint64 `json:"overlayStride,omitempty"` } // EthashConfig is the consensus engine configs for proof-of-work based sealing. diff --git a/trie/transition.go b/trie/transition.go index 24daf436ed8a..0fe197336524 100644 --- a/trie/transition.go +++ b/trie/transition.go @@ -17,6 +17,8 @@ package trie import ( + "fmt" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" @@ -62,7 +64,11 @@ func (t *TransitionTrie) GetKey(key []byte) []byte { // not be modified by the caller. If a node was not found in the database, a // trie.MissingNodeError is returned. func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) { - if val, err := t.overlay.GetStorage(addr, key); len(val) != 0 || err != nil { + val, err := t.overlay.GetStorage(addr, key) + if err != nil { + return nil, fmt.Errorf("get storage from overlay: %s", err) + } + if len(val) != 0 { return val, nil } // TODO also insert value into overlay From 8cbdd335ae867517144fe7609f157755f1e3cf07 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Mon, 6 May 2024 09:36:02 +0200 Subject: [PATCH 97/99] add quick conversion test (#429) * add quick conversion test * add debug traces * activate debug rpc * where I discover I have an outdated version of geth on my desktop --- .github/workflows/conversion.yml | 75 ++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 .github/workflows/conversion.yml diff --git a/.github/workflows/conversion.yml b/.github/workflows/conversion.yml new file mode 100644 index 000000000000..b8441ffccc8a --- /dev/null +++ b/.github/workflows/conversion.yml @@ -0,0 +1,75 @@ +name: Overlay conversion + +on: + push: + branches: [ master, transition-post-genesis, store-transition-state-in-db ] + pull_request: + branches: [ master, kaustinen-with-shapella, transition-post-genesis, store-transition-state-in-db, lock-overlay-transition ] + workflow_dispatch: + +jobs: + conversion: + runs-on: self-hosted + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.22.2 + + - name: Cleanup from previous runs + run: | + rm -f log.txt + rm -rf .shadowfork + rm -f genesis.json + - name: Download genesis file + run: wget https://gist.githubusercontent.com/gballet/0b02a025428aa0e7b67941864d54716c/raw/bfb4e158bca5217b356a19b2ec55c4a45a7b2bad/genesis.json + + - name: Init data + run: go run ./cmd/geth --dev --cache.preimages init genesis.json + + - name: Run geth in devmode + run: go run ./cmd/geth --dev --dev.period=5 --cache.preimages --http --datadir=.shadowfork --override.overlay-stride=10 --override.prague=$(($(date +%s) + 45)) --http.api=debug & + + - name: Wait for the transition to start + run: | + start_time=$(date +%s) + while true; do + sleep 5 + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + # 2 minute timeout + if [ $elapsed_time -ge 120 ]; then + kill -9 $(pgrep -f geth) + exit 1 + fi + pgrep -l geth + # Check for signs that the conversion has started + started=`curl -X POST -H "Content-Type: application/json" -d '{ "id": 7, "jsonrpc": "2.0", "method": "debug_conversionStatus", "params": ["latest"]}' http://localhost:8545 -s | jq '.result.started'` + echo $? + echo $started + if [ "$started" == "true" ]; then + break + fi + echo "looping" + done + - name: Wait for the transition to end + run: | + start_time=$(date +%s) + while true; do + sleep 5 + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + # 10 minute timeout + if [ $elapsed_time -ge 300 ]; then + cat log.txt + kill -9 $(pgrep -f geth) + exit 1 + fi + # Check for signs that the conversion has ended + ended=`curl -X POST -H "Content-Type: application/json" -d '{ "id": 7, "jsonrpc": "2.0", "method": "debug_conversionStatus", "params": ["latest"]}' http://localhost:8545 -s | jq '.result.started'` + if [ "$ended" == "true" ]; then + kill -9 $(pgrep -f geth) + break + fi + done From 5e02d05155b6575d1acd9ecc0ed0df4f8acfe89a Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 7 May 2024 13:29:05 +0200 Subject: [PATCH 98/99] Fix iterator from t8n (#434) * various verkle iterator fixes * remove unused nodeToDBKey --- trie/verkle.go | 5 ----- trie/verkle_iterator.go | 10 +++++----- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/trie/verkle.go b/trie/verkle.go index ee953c232318..f9a5f912f2ea 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -231,11 +231,6 @@ func (trie *VerkleTrie) Hash() common.Hash { return trie.root.Commit().Bytes() } -func nodeToDBKey(n verkle.VerkleNode) []byte { - ret := n.Commitment().Bytes() - return ret[:] -} - // Commit writes all nodes to the trie's memory database, tracking the internal // and external (for account tries) references. func (trie *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet, error) { diff --git a/trie/verkle_iterator.go b/trie/verkle_iterator.go index 5f5fc725ed46..16de8746b6d5 100644 --- a/trie/verkle_iterator.go +++ b/trie/verkle_iterator.go @@ -24,7 +24,7 @@ import ( type verkleNodeIteratorState struct { Node verkle.VerkleNode - Index int + Index int // points to _next_ value } type verkleNodeIterator struct { @@ -97,9 +97,9 @@ func (it *verkleNodeIterator) Next(descend bool) bool { it.current = it.stack[len(it.stack)-1].Node it.stack[len(it.stack)-1].Index++ return it.Next(descend) - case *verkle.HashedNode: + case verkle.HashedNode: // resolve the node - data, err := it.trie.db.diskdb.Get(nodeToDBKey(node)) + data, err := it.trie.FlatdbNodeResolver(it.Path()) if err != nil { panic(err) } @@ -112,7 +112,7 @@ func (it *verkleNodeIterator) Next(descend bool) bool { it.stack[len(it.stack)-1].Node = it.current parent := &it.stack[len(it.stack)-2] parent.Node.(*verkle.InternalNode).SetChild(parent.Index, it.current) - return true + return it.Next(true) default: panic("invalid node type") } @@ -147,7 +147,7 @@ func (it *verkleNodeIterator) Path() []byte { var path []byte for i, state := range it.stack { // skip the last byte - if i <= len(it.stack)-1 { + if i >= len(it.stack)-1 { break } path = append(path, byte(state.Index)) From 537d5c08cb6d5567fad11cefe5c684823a22d397 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Wed, 8 May 2024 13:34:25 +0200 Subject: [PATCH 99/99] update branches that CI runs on --- .github/workflows/conversion.yml | 4 ++-- .github/workflows/go.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/conversion.yml b/.github/workflows/conversion.yml index b8441ffccc8a..cf4e9a4b8e48 100644 --- a/.github/workflows/conversion.yml +++ b/.github/workflows/conversion.yml @@ -2,9 +2,9 @@ name: Overlay conversion on: push: - branches: [ master, transition-post-genesis, store-transition-state-in-db ] + branches: [ kaustinen-with-shapella, kaustinen-with-shapella-rebase-pathdb ] pull_request: - branches: [ master, kaustinen-with-shapella, transition-post-genesis, store-transition-state-in-db, lock-overlay-transition ] + branches: [ kaustinen-with-shapella ] workflow_dispatch: jobs: diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index b349f23d6617..564a41e8bdb1 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -2,9 +2,9 @@ name: Go lint and test on: push: - branches: [ master ] + branches: [ kaustinen-with-shapella, kaustinen-with-shapella-rebase-pathdb ] pull_request: - branches: [ master, verkle-trie-proof-in-block-rebased, verkle-trie-post-merge, beverly-hills-head, 'verkle/replay-change-with-tree-group-tryupdate', beverly-hills-just-before-pbss, kaustinen-with-shapella ] + branches: [ kaustinen-with-shapella, kaustinen-with-shapella-rebase-pathdb ] workflow_dispatch: jobs: