From 3564bf0f59d82863cf0bae3ea64b41e032437274 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Nowosielski?= Date: Fri, 13 Sep 2024 13:28:35 +0200 Subject: [PATCH] Fix: Trie iteration --- adapters/p2p2core/felt.go | 4 +- core/trie/snap_support.go | 43 +++++++ core/trie/snap_support_test.go | 220 +++++++++++++++++++++++++++++++++ p2p/snap_server.go | 73 +++-------- p2p/snap_server_test.go | 113 ++++++++++++++++- p2p/snap_syncer.go | 4 +- 6 files changed, 395 insertions(+), 62 deletions(-) diff --git a/adapters/p2p2core/felt.go b/adapters/p2p2core/felt.go index 62c5f64cd7..103e57b399 100644 --- a/adapters/p2p2core/felt.go +++ b/adapters/p2p2core/felt.go @@ -4,6 +4,7 @@ import ( "github.com/NethermindEth/juno/core/felt" "github.com/NethermindEth/juno/p2p/starknet/spec" "github.com/ethereum/go-ethereum/common" + "reflect" ) func AdaptHash(h *spec.Hash) *felt.Felt { @@ -23,10 +24,9 @@ func AdaptFelt(f *spec.Felt252) *felt.Felt { } func adapt(v interface{ GetElements() []byte }) *felt.Felt { - if v == nil { + if v == nil || reflect.ValueOf(v).IsNil() { return nil } - return new(felt.Felt).SetBytes(v.GetElements()) } diff --git a/core/trie/snap_support.go b/core/trie/snap_support.go index 84f3f1e2c4..4f6fdb2061 100644 --- a/core/trie/snap_support.go +++ b/core/trie/snap_support.go @@ -2,6 +2,7 @@ package trie import ( "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/utils" ) func (t *Trie) IterateAndGenerateProof(startValue *felt.Felt, consumer func(key, value *felt.Felt) (bool, error), @@ -56,6 +57,48 @@ func (t *Trie) IterateAndGenerateProof(startValue *felt.Felt, consumer func(key, return proofs, finished, nil } +func (t *Trie) IterateWithLimit( + startAddr *felt.Felt, + limitAddr *felt.Felt, + maxNodes uint32, + // TODO: remove the logger - and move to the tree + logger utils.SimpleLogger, + consumer func(key, value *felt.Felt) error, +) ([]ProofNode, bool, error) { + pathes := make([]*felt.Felt, 0) + hashes := make([]*felt.Felt, 0) + + count := uint32(0) + proof, finished, err := t.IterateAndGenerateProof(startAddr, func(key *felt.Felt, value *felt.Felt) (bool, error) { + // Need at least one. + if limitAddr != nil && key.Cmp(limitAddr) > 0 { + return true, nil + } + + pathes = append(pathes, key) + hashes = append(hashes, value) + + err := consumer(key, value) + if err != nil { + logger.Errorw("error from consumer function", "err", err) + return false, err + } + + count++ + if count >= maxNodes { + logger.Infow("Max nodes reached", "count", count) + return false, nil + } + return true, nil + }) + if err != nil { + logger.Errorw("IterateAndGenerateProof", "err", err, "finished", finished) + return nil, finished, err + } + + return proof, finished, err +} + func VerifyRange(root, startKey *felt.Felt, keys, values []*felt.Felt, proofs []ProofNode, hash hashFunc, treeHeight uint8, ) (hasMore, valid bool, oerr error) { diff --git a/core/trie/snap_support_test.go b/core/trie/snap_support_test.go index 9a3501bb6d..e77e974a02 100644 --- a/core/trie/snap_support_test.go +++ b/core/trie/snap_support_test.go @@ -1,6 +1,11 @@ package trie_test import ( + "fmt" + "github.com/NethermindEth/juno/db/pebble" + "github.com/NethermindEth/juno/utils" + "github.com/stretchr/testify/require" + "math" "testing" "github.com/NethermindEth/juno/core/crypto" @@ -276,3 +281,218 @@ func TestRangeAndVerifyReject(t *testing.T) { }) } } + +func TestIterateOverTrie(t *testing.T) { + memdb := pebble.NewMemTest(t) + txn, err := memdb.NewTransaction(true) + require.NoError(t, err) + logger := utils.NewNopZapLogger() + + tempTrie, err := trie.NewTriePedersen(trie.NewStorage(txn, []byte{0}), 251) + require.NoError(t, err) + + // key ranges + var ( + bigPrefix = uint64(1000 * 1000 * 1000 * 1000) + count = 100 + ranges = 5 + fstInt, lstInt uint64 + fstKey, lstKey *felt.Felt + ) + for i := range ranges { + for j := range count { + lstInt = bigPrefix*uint64(i) + uint64(count+j) + lstKey = new(felt.Felt).SetUint64(lstInt) + value := new(felt.Felt).SetUint64(uint64(10*count + j + i)) + + if fstKey == nil { + fstKey = lstKey + fstInt = lstInt + } + + _, err := tempTrie.Put(lstKey, value) + require.NoError(t, err) + } + } + + maxNodes := uint32(ranges*count + 1) + startZero := felt.Zero.Clone() + + visitor := func(start, limit *felt.Felt, max uint32) (int, bool, *felt.Felt, *felt.Felt) { + visited := 0 + var fst, lst *felt.Felt + _, finish, err := tempTrie.IterateWithLimit( + start, + limit, + max, + logger, + func(key, value *felt.Felt) error { + if fst == nil { + fst = key + } + lst = key + visited++ + return nil + }) + require.NoError(t, err) + return visited, finish, fst, lst + } + + t.Run("iterate without limit", func(t *testing.T) { + expectedLeaves := ranges * count + visited, finish, fst, lst := visitor(nil, nil, maxNodes) + require.Equal(t, expectedLeaves, visited) + require.True(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, lstKey, lst) + fmt.Println("Visited:", visited, "\tFinish:", finish, "\tRange:", fst.Uint64(), "-", lst.Uint64()) + }) + + t.Run("iterate over trie im chunks", func(t *testing.T) { + chunkSize := 77 + lstChunkSize := int(math.Mod(float64(ranges*count), float64(chunkSize))) + startKey := startZero + for { + visited, finish, fst, lst := visitor(startKey, nil, uint32(chunkSize)) + fmt.Println("Finish:", finish, "\tstart:", startKey.Uint64(), "\trange:", fst.Uint64(), "-", lst.Uint64()) + if finish { + require.Equal(t, lstChunkSize, visited) + break + } + require.Equal(t, chunkSize, visited) + require.False(t, finish) + startKey = new(felt.Felt).SetUint64(lst.Uint64() + 1) + } + }) + + t.Run("iterate over trie im groups", func(t *testing.T) { + startKey := startZero + for { + visited, finish, fst, lst := visitor(startKey, nil, uint32(count)) + if finish { + require.Equal(t, 0, visited) + fmt.Println("Finish:", finish, "\tstart:", startKey.Uint64(), "\trange: ") + break + } + fmt.Println("Finish:", finish, "\tstart:", startKey.Uint64(), "\trange:", fst.Uint64(), "-", lst.Uint64()) + require.Equal(t, count, visited) + require.False(t, finish) + if lst != nil { + startKey = new(felt.Felt).SetUint64(lst.Uint64() + 1) + } + } + }) + + t.Run("stop before first key", func(t *testing.T) { + lowerBound := new(felt.Felt).SetUint64(fstInt - 1) + visited, finish, _, _ := visitor(startZero, lowerBound, maxNodes) + require.True(t, finish) + require.Equal(t, 0, visited) + }) + + t.Run("first key is a limit", func(t *testing.T) { + visited, finish, fst, lst := visitor(startZero, fstKey, maxNodes) + require.Equal(t, 1, visited) + require.True(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, fstKey, lst) + }) + + t.Run("start is the last key", func(t *testing.T) { + visited, finish, fst, lst := visitor(lstKey, nil, maxNodes) + require.Equal(t, 1, visited) + require.True(t, finish) + require.Equal(t, lstKey, fst) + require.Equal(t, lstKey, lst) + }) + + t.Run("start and limit are the last key", func(t *testing.T) { + visited, finish, fst, lst := visitor(lstKey, lstKey, maxNodes) + require.Equal(t, 1, visited) + require.True(t, finish) + require.Equal(t, lstKey, fst) + require.Equal(t, lstKey, lst) + }) + + t.Run("iterate after last key yields no key", func(t *testing.T) { + upperBound := new(felt.Felt).SetUint64(lstInt + 1) + visited, finish, fst, _ := visitor(upperBound, nil, maxNodes) + require.Equal(t, 0, visited) + require.True(t, finish) + require.Nil(t, fst) + }) + + t.Run("iterate with reversed bounds yields no key", func(t *testing.T) { + visited, finish, fst, _ := visitor(lstKey, fstKey, maxNodes) + require.Equal(t, 0, visited) + require.True(t, finish) + require.Nil(t, fst) + }) + + t.Run("iterate over the first group", func(t *testing.T) { + fstGrpBound := new(felt.Felt).SetUint64(fstInt + uint64(count-1)) + visited, finish, fst, lst := visitor(fstKey, fstGrpBound, maxNodes) + require.Equal(t, count, visited) + require.True(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, fstGrpBound, lst) + }) + + t.Run("iterate over the first group no lower bound", func(t *testing.T) { + fstGrpBound := new(felt.Felt).SetUint64(fstInt + uint64(count-1)) + visited, finish, fst, lst := visitor(nil, fstGrpBound, maxNodes) + require.Equal(t, count, visited) + require.True(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, fstGrpBound, lst) + }) + + t.Run("iterate over the first group by max nodes", func(t *testing.T) { + fstGrpBound := new(felt.Felt).SetUint64(fstInt + uint64(count-1)) + visited, finish, fst, lst := visitor(fstKey, nil, uint32(count)) + require.Equal(t, count, visited) + require.False(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, fstGrpBound, lst) + }) + + t.Run("iterate over the last group, start before group bound", func(t *testing.T) { + lstGrpStartInt := lstInt - uint64(count-1) + lstGrpFstKey := new(felt.Felt).SetUint64(lstGrpStartInt) + startKey := new(felt.Felt).SetUint64(lstGrpStartInt - uint64(count)) + + visited, finish, fst, lst := visitor(startKey, nil, maxNodes) + require.Equal(t, count, visited) + require.True(t, finish) + require.Equal(t, lstGrpFstKey, fst) + require.Equal(t, lstKey, lst) + }) + + sndGrpFstKey := new(felt.Felt).SetUint64(bigPrefix + uint64(count)) + sndGrpLstKey := new(felt.Felt).SetUint64(bigPrefix + uint64(2*count-1)) + t.Run("second group key selection", func(t *testing.T) { + visited, _, _, lst := visitor(fstKey, nil, uint32(count+1)) + require.Equal(t, count+1, visited) + require.Equal(t, sndGrpFstKey, lst) + + visited, finish, fst, lst := visitor(sndGrpFstKey, sndGrpLstKey, maxNodes) + require.Equal(t, count, visited) + require.True(t, finish) + require.Equal(t, sndGrpFstKey, fst) + require.Equal(t, sndGrpLstKey, lst) + }) + + t.Run("second group key selection 2", func(t *testing.T) { + nodeAfterFstGrp := new(felt.Felt).SetUint64(fstInt + uint64(count+1)) + visited, _, fst, lst := visitor(nodeAfterFstGrp, nil, 1) + require.Equal(t, 1, visited) + require.Equal(t, sndGrpFstKey, fst) + require.Equal(t, fst, lst) + + visited, finish, fst, lst := visitor(sndGrpFstKey, nil, uint32(count)) + require.Equal(t, count, visited) + require.False(t, finish) + require.Equal(t, sndGrpFstKey, fst) + require.Equal(t, sndGrpLstKey, lst) + }) +} diff --git a/p2p/snap_server.go b/p2p/snap_server.go index 08bf0b3def..a6e9ec7fe5 100644 --- a/p2p/snap_server.go +++ b/p2p/snap_server.go @@ -97,7 +97,7 @@ func (b *snapServer) GetClassRange(request *spec.ClassRangeRequest) (iter.Seq[pr stateRoot := p2p2core.AdaptHash(request.Root) startAddr := p2p2core.AdaptHash(request.Start) - b.log.Debugw("GetClassRange", "root", stateRoot, "start", startAddr, "chunks", request.ChunksPerProof) + b.log.Debugw("GetClassRange", "start", startAddr, "chunks", request.ChunksPerProof) return func(yield yieldFunc) { s, err := b.blockchain.GetStateForStateRoot(stateRoot) @@ -121,7 +121,7 @@ func (b *snapServer) GetClassRange(request *spec.ClassRangeRequest) (iter.Seq[pr startAddr := p2p2core.AdaptHash(request.Start) limitAddr := p2p2core.AdaptHash(request.End) - if limitAddr.IsZero() { + if limitAddr != nil && limitAddr.IsZero() { limitAddr = nil } @@ -131,7 +131,7 @@ func (b *snapServer) GetClassRange(request *spec.ClassRangeRequest) (iter.Seq[pr } classkeys := []*felt.Felt{} - proofs, finished, err := iterateWithLimit(ctrie, startAddr, limitAddr, determineMaxNodes(request.ChunksPerProof), b.log, + proofs, finished, err := ctrie.IterateWithLimit(startAddr, limitAddr, determineMaxNodes(request.ChunksPerProof), b.log, func(key, value *felt.Felt) error { classkeys = append(classkeys, key) return nil @@ -164,7 +164,9 @@ func (b *snapServer) GetClassRange(request *spec.ClassRangeRequest) (iter.Seq[pr RangeProof: Core2P2pProof(proofs), } - b.log.Infow("sending class range response", "len(classes)", len(classkeys)) + first := classkeys[0] + last := classkeys[len(classkeys)-1] + b.log.Infow("sending class range response", "len(classes)", len(classkeys), "first", first, "last", last) if !yield(clsMsg) { // we should not send `FinMsg` when the client explicitly asks to stop return @@ -176,7 +178,7 @@ func (b *snapServer) GetClassRange(request *spec.ClassRangeRequest) (iter.Seq[pr } yield(finMsg) - b.log.Infow("class range iteration completed") + b.log.Infow("GetClassRange iteration completed") }, nil } @@ -213,7 +215,7 @@ func (b *snapServer) GetContractRange(request *spec.ContractRangeRequest) (iter. states := []*spec.ContractState{} for { - proofs, finished, err := iterateWithLimit(strie, startAddr, limitAddr, determineMaxNodes(request.ChunksPerProof), b.log, + proofs, finished, err := strie.IterateWithLimit(startAddr, limitAddr, determineMaxNodes(request.ChunksPerProof), b.log, func(key, value *felt.Felt) error { classHash, err := s.ContractClassHash(key) if err != nil { @@ -244,6 +246,7 @@ func (b *snapServer) GetContractRange(request *spec.ContractRangeRequest) (iter. }) return nil }) + if err != nil { log.Error("error iterating storage trie", "err", err) return @@ -261,7 +264,12 @@ func (b *snapServer) GetContractRange(request *spec.ContractRangeRequest) (iter. }, } - b.log.Infow("sending contract range response", "len(states)", len(states)) + var first, last *felt.Felt + if len(states) > 0 { + first = p2p2core.AdaptAddress(states[0].Address) + last = p2p2core.AdaptAddress(states[len(states)-1].Address) + } + b.log.Infow("sending contract range response", "len(states)", len(states), "first", first, "last", last) if !yield(cntrMsg) { // we should not send `FinMsg` when the client explicitly asks to stop return @@ -269,6 +277,8 @@ func (b *snapServer) GetContractRange(request *spec.ContractRangeRequest) (iter. if finished { break } + + states = states[:0] } yield(finMsg) @@ -302,7 +312,8 @@ func (b *snapServer) GetStorageRange(request *spec.ContractStorageRequest) (iter strie, err := s.StorageTrieForAddr(p2p2core.AdaptAddress(query.Address)) if err != nil { - log.Error("error getting storage trie for address", "addr", query.Address.String(), "err", err) + addr := p2p2core.AdaptAddress(query.Address) + log.Error("error getting storage trie for address", "addr", addr, "err", err) return } @@ -399,7 +410,7 @@ func (b *snapServer) handleStorageRangeRequest( limit = nodeLimit } - proofs, finish, err := iterateWithLimit(stTrie, startAddr, endAddr, limit, logger, + proofs, finish, err := stTrie.IterateWithLimit(startAddr, endAddr, limit, logger, func(key, value *felt.Felt) error { response = append(response, &spec.ContractStoredValue{ Key: core2p2p.AdaptFelt(key), @@ -434,50 +445,6 @@ func (b *snapServer) handleStorageRangeRequest( return uint32(totalSent), nil } -func iterateWithLimit( - srcTrie *trie.Trie, - startAddr *felt.Felt, - limitAddr *felt.Felt, - maxNodes uint32, - logger utils.SimpleLogger, - consumer func(key, value *felt.Felt) error, -) ([]trie.ProofNode, bool, error) { - pathes := make([]*felt.Felt, 0) - hashes := make([]*felt.Felt, 0) - - logger.Infow("entering IterateAndGenerateProof", "startAddr", startAddr, "endAddr", limitAddr, "maxNodes", maxNodes) - count := uint32(0) - proof, finished, err := srcTrie.IterateAndGenerateProof(startAddr, func(key *felt.Felt, value *felt.Felt) (bool, error) { - // Need at least one. - if limitAddr != nil && key.Cmp(limitAddr) > 1 && count > 0 { - return false, nil - } - - pathes = append(pathes, key) - hashes = append(hashes, value) - - err := consumer(key, value) - if err != nil { - logger.Errorw("error from consumer function", "err", err) - return false, err - } - - count++ - if count >= maxNodes { - logger.Infow("Max nodes reached", "count", count) - return false, nil - } - return true, nil - }) - if err != nil { - logger.Errorw("IterateAndGenerateProof", "err", err, "finished", finished) - return nil, finished, err - } - - logger.Infow("exiting IterateAndGenerateProof", "len(proof)", len(proof), "finished", finished, "err", err) - return proof, finished, err -} - func Core2P2pProof(proofs []trie.ProofNode) *spec.PatriciaRangeProof { nodes := make([]*spec.PatriciaNode, len(proofs)) diff --git a/p2p/snap_server_test.go b/p2p/snap_server_test.go index 227b1b65c1..f315d3cdb1 100644 --- a/p2p/snap_server_test.go +++ b/p2p/snap_server_test.go @@ -19,9 +19,9 @@ import ( func TestClassRange(t *testing.T) { // Note: set to true to make test super long to complete - shouldFetchAllClasses := true + shouldFetchAllClasses := false var d db.DB - //t.Skip("DB snapshot is needed for this test") + t.Skip("DB snapshot is needed for this test") d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) defer func() { _ = d.Close() }() bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered @@ -91,7 +91,7 @@ func TestClassRange(t *testing.T) { func TestContractRange(t *testing.T) { var d db.DB - //t.Skip("DB snapshot is needed for this test") + t.Skip("DB snapshot is needed for this test") d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) defer func() { _ = d.Close() }() bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered @@ -180,7 +180,7 @@ func TestContractRange_FinMsg_Received(t *testing.T) { func TestContractStorageRange(t *testing.T) { var d db.DB - //t.Skip("DB snapshot is needed for this test") + t.Skip("DB snapshot is needed for this test") d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) defer func() { _ = d.Close() }() bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered @@ -205,6 +205,16 @@ func TestContractStorageRange(t *testing.T) { storageRoot *felt.Felt expectedLeaves int }{ + { + address: feltFromString("0x5eb8d1bc5aaf2f323f2a807d429686ac012ca16f90740071d2f3a160dc231"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 0, + }, + { + address: feltFromString("0x614a5e0519963324acb5640321240827c0cd6a9f7cf5f17a80c1596e607d0"), + storageRoot: feltFromString("0x55ee7fd57d0aa3da8b89ea2feda16f9435186988a8b00b6f22f5ba39f3cf172"), + expectedLeaves: 1, + }, { address: feltFromString("0x3deecdb26a60e4c062d5bd98ab37f72ea2acc37f28dae6923359627ebde9"), storageRoot: feltFromString("0x276edbc91a945d11645ba0b8298c7d657e554d06ab2bb765cbc44d61fa01fd5"), @@ -277,7 +287,7 @@ func TestContractStorageRange(t *testing.T) { func TestGetClassesByHash(t *testing.T) { var d db.DB - //t.Skip("DB snapshot is needed for this test") + t.Skip("DB snapshot is needed for this test") d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) defer func() { _ = d.Close() }() bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered @@ -333,6 +343,99 @@ func TestGetClassesByHash(t *testing.T) { assert.True(t, finMsgReceived) } +func TestGetContractStorageRoot(t *testing.T) { + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered + + b, err := bc.Head() + assert.NoError(t, err) + + fmt.Printf("headblock %d\n", b.Number) + + stateRoot := b.GlobalStateRoot + + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + server := &snapServer{ + log: logger, + blockchain: bc, + } + + tests := []struct { + address *felt.Felt + storageRoot *felt.Felt + }{ + { + address: feltFromString("0x5eb8d1bc5aaf2f323f2a807d429686ac012ca16f90740071d2f3a160dc231"), + storageRoot: feltFromString("0x0"), + }, + { + address: feltFromString("0x5ec87443bcb74e1e58762be15c3c513926a91a5d5b4a204e9e7b5ca884fb7"), + storageRoot: feltFromString("0x36fc3942926334a24b739065f26ffe547044af7466a6f8d391e0750603ffa8c"), + }, + { + address: feltFromString("0x614a5e0519963324acb5640321240827c0cd6a9f7cf5f17a80c1596e607d0"), + storageRoot: feltFromString("0x55ee7fd57d0aa3da8b89ea2feda16f9435186988a8b00b6f22f5ba39f3cf172"), + }, + { + address: feltFromString("0x6b7d60ec8176d8a1c77afdca05191dad1e1a20fef2e5e3aceccee0b3cbd6a"), + storageRoot: feltFromString("0x726d42240f103a32ce1b6acc7498f52fdf83e308cf70e0a6394591cee1886c8"), + }, + { + address: feltFromString("0x3deecdb26a60e4c062d5bd98ab37f72ea2acc37f28dae6923359627ebde9"), + storageRoot: feltFromString("0x276edbc91a945d11645ba0b8298c7d657e554d06ab2bb765cbc44d61fa01fd5"), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%.7s...", test.address), func(t *testing.T) { + request := &spec.ContractRangeRequest{ + ChunksPerProof: 10, + Start: core2p2p.AdaptAddress(test.address), + End: core2p2p.AdaptAddress(test.address), + StateRoot: core2p2p.AdaptHash(stateRoot), + } + + iter, err := server.GetContractRange(request) + assert.NoError(t, err) + + finMsgReceived := false + for res := range iter { + assert.NotNil(t, res) + resT, ok := res.(*spec.ContractRangeResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + i := 0 + switch v := resT.GetResponses().(type) { + case *spec.ContractRangeResponse_Range: + assert.False(t, finMsgReceived) + assert.Len(t, v.Range.State, 1) + contract := v.Range.State[0] + fmt.Println("Contract:", p2p2core.AdaptAddress(contract.Address), "StorageRoot:", p2p2core.AdaptHash(contract.Storage)) + assert.Equal(t, test.address, p2p2core.AdaptAddress(contract.Address)) + assert.Equal(t, test.storageRoot, p2p2core.AdaptHash(contract.Storage)) + + for j, s := range v.Range.State { + fmt.Println("[", j, "] Contract:", p2p2core.AdaptAddress(s.Address), "StorageRoot:", p2p2core.AdaptHash(s.Storage)) + } + i++ + if i > 5 { + t.Fatal("Too many contracts received") + } + case *spec.ContractRangeResponse_Fin: + finMsgReceived = true + default: + // we expect no any other message only just one range because we break the iteration + t.Fatal("received unexpected message", "type", v) + } + } + }) + } +} + func feltFromString(str string) *felt.Felt { f, err := (&felt.Felt{}).SetString(str) if err != nil { diff --git a/p2p/snap_syncer.go b/p2p/snap_syncer.go index 141961ec01..7998b3c360 100644 --- a/p2p/snap_syncer.go +++ b/p2p/snap_syncer.go @@ -116,8 +116,8 @@ var ( // For some reason, the trie throughput is higher if the batch size is small. classRangeChunksPerProof = 500 - contractRangeChunkPerProof = 500 - storageRangeChunkPerProof = 500 + contractRangeChunkPerProof = 501 + storageRangeChunkPerProof = 502 maxStorageBatchSize = 500 maxMaxPerStorageSize = 500