diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index e821a9f7c1..36c98946c4 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -829,13 +829,6 @@ func (b *Blockchain) HeadStateFreakingState() (*core.State, StateCloser, error) return nil, nil, err } - /* - _, err = chainHeight(txn) - if err != nil { - return nil, nil, utils.RunAndWrapOnError(txn.Discard, err) - } - */ - return core.NewState(txn), txn.Discard, nil } diff --git a/blockchain/snap_server_interface.go b/blockchain/snap_server_interface.go index deda7b8f81..e06e3b6c7a 100644 --- a/blockchain/snap_server_interface.go +++ b/blockchain/snap_server_interface.go @@ -9,6 +9,8 @@ import ( "github.com/NethermindEth/juno/db" ) +const MaxSnapshots = 128 + type snapshotRecord struct { stateRoot *felt.Felt contractsRoot *felt.Felt @@ -108,7 +110,7 @@ func (b *Blockchain) seedSnapshot() error { return err } - defer srCloser() + defer func() { _ = srCloser() }() state := stateR.(*core.State) contractsRoot, classRoot, err := state.StateAndClassRoot() @@ -139,7 +141,7 @@ func (b *Blockchain) seedSnapshot() error { // TODO: Reorgs b.snapshots = append(b.snapshots, &dbsnap) - if len(b.snapshots) > 128 { + if len(b.snapshots) > MaxSnapshots { toremove := b.snapshots[0] err = toremove.closer() if err != nil { @@ -156,6 +158,7 @@ func (b *Blockchain) seedSnapshot() error { func (b *Blockchain) Close() { for _, snapshot := range b.snapshots { - snapshot.closer() + // ignore the errors here as it's most likely called on shutdown + _ = snapshot.closer() } } diff --git a/core/trie/trie.go b/core/trie/trie.go index ffa45cab6a..5ab0f2c481 100644 --- a/core/trie/trie.go +++ b/core/trie/trie.go @@ -21,11 +21,6 @@ type IterableStorage interface { IterateLeaf(startKey *Key, consumer func(key, value *felt.Felt) (bool, error)) (bool, error) } -var usingIterableStorage = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "juno_trie_iterable_storage", - Help: "Time in address get", -}, []string{"iterable"}) - type HashFunc func(*felt.Felt, *felt.Felt) *felt.Felt // Trie is a dense Merkle Patricia Trie (i.e., all internal nodes have two children). diff --git a/sync/snap_server.go b/sync/snap_server.go index e24b226c38..fe372df3d0 100644 --- a/sync/snap_server.go +++ b/sync/snap_server.go @@ -61,10 +61,14 @@ type snapServer struct { var _ SnapServerBlockchain = &blockchain.Blockchain{} -const maxNodePerRequest = 1024 * 1024 // I just want it to process faster +const ( + _1024 = 1024 + maxNodePerRequest = _1024 * _1024 // I just want it to process faster +) + func determineMaxNodes(specifiedMaxNodes uint64) uint64 { if specifiedMaxNodes == 0 { - return 1024 * 16 + return _1024 * 16 } if specifiedMaxNodes < maxNodePerRequest { @@ -94,7 +98,7 @@ func (b *snapServer) GetClassRange(ctx context.Context, request *spec.ClassRange yield(nil, err) return } - defer classCloser() + defer func() { _ = classCloser() }() startAddr := p2p2core.AdaptHash(request.Start) limitAddr := p2p2core.AdaptHash(request.End) @@ -108,10 +112,11 @@ func (b *snapServer) GetClassRange(ctx context.Context, request *spec.ClassRange } classkeys := []*felt.Felt{} - proofs, finished, err := iterateWithLimit(ctrie, startAddr, limitAddr, determineMaxNodes(uint64(request.ChunksPerProof)), func(key, value *felt.Felt) error { - classkeys = append(classkeys, key) - return nil - }) + proofs, finished, err := iterateWithLimit(ctrie, startAddr, limitAddr, determineMaxNodes(uint64(request.ChunksPerProof)), + func(key, value *felt.Felt) error { + classkeys = append(classkeys, key) + return nil + }) coreclasses, err := b.blockchain.GetClasses(classkeys) if err != nil { @@ -145,7 +150,7 @@ func (b *snapServer) GetClassRange(ctx context.Context, request *spec.ClassRange startAddr = classkeys[len(classkeys)-1] } - // will this send a `Fin` as in https://github.com/starknet-io/starknet-p2p-specs/blob/e335372d39b728372c0ff393bef78763deeb3fcb/p2p/proto/snapshot.proto#L77 + // TODO: not needed? - just stop the loop yield(nil, nil) } } @@ -171,43 +176,44 @@ func (b *snapServer) GetContractRange(ctx context.Context, request *spec.Contrac yield(nil, err) return } - defer scloser() + defer func() { _ = scloser() }() startAddr := p2p2core.AdaptAddress(request.Start) limitAddr := p2p2core.AdaptAddress(request.End) states := []*spec.ContractState{} for { - proofs, finished, err := iterateWithLimit(strie, startAddr, limitAddr, determineMaxNodes(uint64(request.ChunksPerProof)), func(key, value *felt.Felt) error { - classHash, err := s.ContractClassHash(key) - if err != nil { - return err - } - - nonce, err := s.ContractNonce(key) - if err != nil { - return err - } - - ctr, err := s.StorageTrieForAddr(key) - if err != nil { - return err - } - - croot, err := ctr.Root() - if err != nil { - return err - } - - startAddr = key - states = append(states, &spec.ContractState{ - Address: core2p2p.AdaptAddress(key), - Class: core2p2p.AdaptHash(classHash), - Storage: core2p2p.AdaptHash(croot), - Nonce: nonce.Uint64(), + proofs, finished, err := iterateWithLimit(strie, startAddr, limitAddr, determineMaxNodes(uint64(request.ChunksPerProof)), + func(key, value *felt.Felt) error { + classHash, err := s.ContractClassHash(key) + if err != nil { + return err + } + + nonce, err := s.ContractNonce(key) + if err != nil { + return err + } + + ctr, err := s.StorageTrieForAddr(key) + if err != nil { + return err + } + + croot, err := ctr.Root() + if err != nil { + return err + } + + startAddr = key + states = append(states, &spec.ContractState{ + Address: core2p2p.AdaptAddress(key), + Class: core2p2p.AdaptHash(classHash), + Storage: core2p2p.AdaptHash(croot), + Nonce: nonce.Uint64(), + }) + return nil }) - return nil - }) if err != nil { yield(nil, err) return @@ -260,15 +266,16 @@ func (b *snapServer) GetStorageRange(ctx context.Context, request *StorageRangeR return } - handled, err := b.handleStorageRangeRequest(ctx, strie, query, request.ChunkPerProof, contractLimit, func(values []*spec.ContractStoredValue, proofs []trie.ProofNode) { - yield(&StorageRangeStreamingResult{ - ContractsRoot: contractRoot, - ClassesRoot: classRoot, - StorageAddr: p2p2core.AdaptAddress(query.Address), - Range: values, - RangeProof: Core2P2pProof(proofs), - }, nil) - }) + handled, err := b.handleStorageRangeRequest(ctx, strie, query, request.ChunkPerProof, contractLimit, + func(values []*spec.ContractStoredValue, proofs []trie.ProofNode) { + yield(&StorageRangeStreamingResult{ + ContractsRoot: contractRoot, + ClassesRoot: classRoot, + StorageAddr: p2p2core.AdaptAddress(query.Address), + Range: values, + RangeProof: Core2P2pProof(proofs), + }, nil) + }) if err != nil { yield(nil, err) return @@ -299,7 +306,7 @@ func (b *snapServer) GetClasses(ctx context.Context, felts []*felt.Felt) ([]*spe func (b *snapServer) handleStorageRangeRequest( ctx context.Context, - trie *trie.Trie, + stTrie *trie.Trie, request *spec.StorageRangeQuery, maxChunkPerProof uint64, nodeLimit uint64, @@ -325,7 +332,7 @@ func (b *snapServer) handleStorageRangeRequest( limit = nodeLimit } - proofs, finish, err := iterateWithLimit(trie, startAddr, endAddr, limit, func(key, value *felt.Felt) error { + proofs, finish, err := iterateWithLimit(stTrie, startAddr, endAddr, limit, func(key, value *felt.Felt) error { response = append(response, &spec.ContractStoredValue{ Key: core2p2p.AdaptFelt(key), Value: core2p2p.AdaptFelt(value), @@ -345,11 +352,8 @@ func (b *snapServer) handleStorageRangeRequest( } yield(response, proofs) - if finished { - return totalSent, nil - } - totalSent += totalSent + totalSent += int64(len(response)) nodeLimit -= limit asBint := startAddr.BigInt(big.NewInt(0)) diff --git a/sync/snap_server_test.go b/sync/snap_server_test.go index 80046fbd08..8c846999b3 100644 --- a/sync/snap_server_test.go +++ b/sync/snap_server_test.go @@ -24,8 +24,6 @@ func TestClassRange(t *testing.T) { d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) defer func() { _ = d.Close() }() bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered - _, err := utils.NewZapLogger(utils.DEBUG, false) - assert.NoError(t, err) b, err := bc.Head() assert.NoError(t, err) @@ -73,9 +71,6 @@ func TestContractRange(t *testing.T) { defer func() { _ = d.Close() }() bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered - _, err := utils.NewZapLogger(utils.DEBUG, false) - assert.NoError(t, err) - b, err := bc.Head() assert.NoError(t, err) @@ -150,9 +145,6 @@ func TestContractStorageRange(t *testing.T) { defer func() { _ = d.Close() }() bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered - _, err := utils.NewZapLogger(utils.DEBUG, false) - assert.NoError(t, err) - b, err := bc.Head() assert.NoError(t, err) diff --git a/sync/snapsyncer.go b/sync/snapsyncer.go index 21d4204042..988ea68068 100644 --- a/sync/snapsyncer.go +++ b/sync/snapsyncer.go @@ -26,6 +26,8 @@ import ( "golang.org/x/sync/errgroup" ) +const JOB_DURATION = time.Second * 10 + type Blockchain interface { GetClasses(felts []*felt.Felt) ([]core.Class, error) PutClasses(blockNumber uint64, v1CompiledHashes map[felt.Felt]*felt.Felt, newClasses map[felt.Felt]core.Class) error @@ -71,36 +73,23 @@ func NewSnapSyncer( baseSyncher service.Service, consensus starknetdata.StarknetData, server SnapServer, - blockchain *blockchain.Blockchain, + bc *blockchain.Blockchain, log utils.Logger, ) *SnapSyncher { return &SnapSyncher{ baseSync: baseSyncher, starknetData: consensus, snapServer: server, - blockchain: blockchain, + blockchain: bc, log: log, } } var ( - addressDurations = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Name: "juno_address_durations", - Help: "Time in address get", - Buckets: prometheus.ExponentialBuckets(1.0, 1.7, 30), - }, []string{"phase"}) - storageDurations = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "juno_storage_durations", - Help: "Time in address get", - }, []string{"phase"}) - storageStoreSize = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "juno_storage_store_size", - Help: "Time in address get", - }) - storageStoreSizeTotal = promauto.NewCounter(prometheus.CounterOpts{ - Name: "juno_storage_store_size_total", - Help: "Time in address get", - }) + // magic values linter does not like + start = float64(1.0) + factor = float64(1.5) + count = 30 rangeProgress = promauto.NewGauge(prometheus.GaugeOpts{ Name: "juno_range_progress", @@ -112,25 +101,10 @@ var ( Help: "Time in address get", }) - updateContractTotal = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "juno_updated_contract_totals", - Help: "Time in address get", - }, []string{"location"}) - - storageLeafSize = promauto.NewHistogram(prometheus.HistogramOpts{ - Name: "juno_storage_leaf_size", - Help: "Time in address get", - Buckets: prometheus.ExponentialBuckets(1.0, 1.5, 30), - }) storageAddressCount = promauto.NewHistogram(prometheus.HistogramOpts{ Name: "juno_storage_address_count", Help: "Time in address get", - Buckets: prometheus.ExponentialBuckets(1.0, 1.5, 30), - }) - storageLargeLeafSize = promauto.NewHistogram(prometheus.HistogramOpts{ - Name: "juno_storage_large_leaf_size", - Help: "Time in address get", - Buckets: prometheus.ExponentialBuckets(1.0, 1.5, 30), + Buckets: prometheus.ExponentialBuckets(start, factor, count), }) ) @@ -172,27 +146,17 @@ func (s *SnapSyncher) Run(ctx context.Context) error { return err } - /* - for i := s.startingBlock.Number; i <= s.lastBlock.Number; i++ { - s.log.Infow("applying block", "blockNumber", i, "lastBlock", s.lastBlock.Number) - - err = s.ApplyStateUpdate(uint64(i)) - if err != nil { - return errors.Join(err, errors.New("error applying state update")) - } - } - - err = s.verifyTrie(ctx) - if err != nil { - return err - } - */ - s.log.Infow("delegating to standard synchronizer") return s.baseSync.Run(ctx) } -func VerifyTrie(expectedRoot *felt.Felt, paths, hashes []*felt.Felt, proofs []trie.ProofNode, height uint8, hash func(*felt.Felt, *felt.Felt) *felt.Felt) (bool, error) { +func VerifyTrie( + expectedRoot *felt.Felt, + paths, hashes []*felt.Felt, + proofs []trie.ProofNode, + height uint8, + hash func(*felt.Felt, *felt.Felt) *felt.Felt, +) (bool, error) { hasMore, valid, err := trie.VerifyRange(expectedRoot, nil, paths, hashes, proofs, hash, height) if err != nil { return false, err @@ -204,12 +168,13 @@ func VerifyTrie(expectedRoot *felt.Felt, paths, hashes []*felt.Felt, proofs []tr return hasMore, nil } +// nolint func (s *SnapSyncher) runPhase1(ctx context.Context) error { starttime := time.Now() err := s.initState(ctx) if err != nil { - return errors.Join(err, errors.New("error initializing snap syncer state")) + return errors.Join(err, errors.New("error initialising snap syncer state")) } eg, ectx := errgroup.WithContext(ctx) @@ -218,7 +183,7 @@ func (s *SnapSyncher) runPhase1(ctx context.Context) error { defer func() { s.log.Infow("pool latest block done") if err := recover(); err != nil { - s.log.Errorw("latest block pool paniced", "err", err) + s.log.Errorw("latest block pool panicked", "err", err) } }() @@ -228,7 +193,7 @@ func (s *SnapSyncher) runPhase1(ctx context.Context) error { eg.Go(func() error { defer func() { if err := recover(); err != nil { - s.log.Errorw("class range paniced", "err", err) + s.log.Errorw("class range panicked", "err", err) } }() @@ -322,7 +287,7 @@ func (s *SnapSyncher) runPhase1(ctx context.Context) error { return err } - s.log.Infow("first phase completed", "duration", time.Now().Sub(starttime).String()) + s.log.Infow("first phase completed", "duration", time.Since(starttime)) return nil } @@ -377,16 +342,18 @@ func (s *SnapSyncher) initState(ctx context.Context) error { } func calculatePercentage(f *felt.Felt) uint64 { + const MAX_PERCENT = 100 maxint := big.NewInt(1) - maxint.Lsh(maxint, 251) + maxint.Lsh(maxint, core.GlobalTrieHeight) - theint := f.BigInt(big.NewInt(0)) - theint.Mul(theint, big.NewInt(100)) - theint.Div(theint, maxint) + percent := f.BigInt(big.NewInt(0)) + percent.Mul(percent, big.NewInt(MAX_PERCENT)) + percent.Div(percent, maxint) - return theint.Uint64() + return percent.Uint64() } +// nolint func (s *SnapSyncher) runClassRangeWorker(ctx context.Context) error { totaladded := 0 completed := false @@ -417,7 +384,7 @@ func (s *SnapSyncher) runClassRangeWorker(ctx context.Context) error { } s.log.Infow("got", "res", len(response.Range.Classes), "err", reqErr, "startAdr", startAddr) - err := VerifyGlobalStateRoot(stateRoot, response.ClassesRoot, response.ContractsRoot) + err = VerifyGlobalStateRoot(stateRoot, response.ClassesRoot, response.ContractsRoot) if err != nil { s.log.Infow("global state root verification failure") // Root verification failed @@ -532,7 +499,7 @@ func P2pProofToTrieProofs(proof *spec.PatriciaRangeProof) []trie.ProofNode { var stateVersion = new(felt.Felt).SetBytes([]byte(`STARKNET_STATE_V0`)) -func VerifyGlobalStateRoot(globalStateRoot *felt.Felt, classRoot *felt.Felt, storageRoot *felt.Felt) error { +func VerifyGlobalStateRoot(globalStateRoot, classRoot, storageRoot *felt.Felt) error { if classRoot.IsZero() { if globalStateRoot.Equal(storageRoot) { return nil @@ -556,6 +523,7 @@ func CalculateClassHash(cls core.Class) *felt.Felt { return hash } +// nolint func (s *SnapSyncher) runContractRangeWorker(ctx context.Context) error { startAddr := &felt.Zero completed := false @@ -570,7 +538,7 @@ func (s *SnapSyncher) runContractRangeWorker(ctx context.Context) error { Start: core2p2p.AdaptAddress(startAddr), End: nil, // No need for now. ChunksPerProof: uint32(contractRangeChunkPerProof), - })(func(response *ContractRangeStreamingResult, err error) bool { + })(func(response *ContractRangeStreamingResult, _err error) bool { s.log.Infow("snap range progress", "progress", calculatePercentage(startAddr), "addr", startAddr) rangeProgress.Set(float64(calculatePercentage(startAddr))) @@ -595,10 +563,9 @@ func (s *SnapSyncher) runContractRangeWorker(ctx context.Context) error { } proofs := P2pProofToTrieProofs(response.RangeProof) - hasNext, ierr := VerifyTrie(response.ContractsRoot, paths, values, proofs, core.GlobalTrieHeight, crypto.Pedersen) - if ierr != nil { - err = ierr - // The peer should get penalized in this case + hasNext, err := VerifyTrie(response.ContractsRoot, paths, values, proofs, core.GlobalTrieHeight, crypto.Pedersen) + if err != nil { + // The peer should get penalised in this case return false } @@ -673,17 +640,6 @@ func calculateContractCommitment(storageRoot, classHash, nonce *felt.Felt) *felt return crypto.Pedersen(crypto.Pedersen(crypto.Pedersen(classHash, storageRoot), nonce), &felt.Zero) } -/** -type StateDiff struct { - StorageDiffs map[felt.Felt]map[felt.Felt]*felt.Felt // addr -> {key -> value, ...} - Nonces map[felt.Felt]*felt.Felt // addr -> nonce - DeployedContracts map[felt.Felt]*felt.Felt // addr -> class hash - DeclaredV0Classes []*felt.Felt // class hashes - DeclaredV1Classes map[felt.Felt]*felt.Felt // class hash -> compiled class hash - ReplacedClasses map[felt.Felt]*felt.Felt // addr -> class hash -} -*/ - func (s *SnapSyncher) queueClassJob(ctx context.Context, classHash *felt.Felt) error { queued := false for !queued { @@ -699,7 +655,7 @@ func (s *SnapSyncher) queueClassJob(ctx context.Context, classHash *felt.Felt) e return nil } -func (s *SnapSyncher) queueStorageRangeJob(ctx context.Context, path *felt.Felt, storageRoot *felt.Felt, classHash *felt.Felt, nonce uint64) error { +func (s *SnapSyncher) queueStorageRangeJob(ctx context.Context, path, storageRoot, classHash *felt.Felt, nonce uint64) error { return s.queueStorageRangeJobJob(ctx, &storageRangeJob{ path: path, storageRoot: storageRoot, @@ -718,7 +674,7 @@ func (s *SnapSyncher) queueStorageRangeJobJob(ctx context.Context, job *storageR atomic.AddInt32(&s.storageRangeJobCount, 1) case <-ctx.Done(): return ctx.Err() - case <-time.After(time.Second * 10): + case <-time.After(JOB_DURATION): s.log.Infow("queue storage range stall") } } @@ -741,6 +697,7 @@ func (s *SnapSyncher) queueStorageRefreshJob(ctx context.Context, job *storageRa return nil } +// nolint func (s *SnapSyncher) runStorageRangeWorker(ctx context.Context, workerIdx int) error { nextjobs := make([]*storageRangeJob, 0) for { @@ -758,7 +715,7 @@ func (s *SnapSyncher) runStorageRangeWorker(ctx context.Context, workerIdx int) jobs = append(jobs, job) case <-ctx.Done(): return ctx.Err() - case <-time.After(time.Second * 10): + case <-time.After(JOB_DURATION): if len(jobs) > 0 { break requestloop } @@ -797,10 +754,11 @@ func (s *SnapSyncher) runStorageRangeWorker(ctx context.Context, workerIdx int) StateRoot: stateRoot, ChunkPerProof: uint64(storageRangeChunkPerProof), Queries: requests, - })(func(response *StorageRangeStreamingResult, err error) bool { + })(func(response *StorageRangeStreamingResult, _err error) bool { job := jobs[processedJobs] if !job.path.Equal(response.StorageAddr) { - s.log.Errorw(fmt.Sprintf("storage addr differ %s %s %d\n", job.path, response.StorageAddr, workerIdx)) + s.log.Errorw(fmt.Sprintf( + "storage addr differ %s %s %d\n", job.path, response.StorageAddr, workerIdx)) return false } @@ -890,7 +848,7 @@ func (s *SnapSyncher) runStorageRangeWorker(ctx context.Context, workerIdx int) return err } - // TODO: Just slice? + // TODO: assign to nil to clear memory nextjobs = make([]*storageRangeJob, 0) for i := processedJobs; i < len(jobs); i++ { unprocessedRequest := jobs[i] @@ -904,7 +862,7 @@ func (s *SnapSyncher) poolLatestBlock(ctx context.Context) error { select { case <-ctx.Done(): return nil - case <-time.After(time.Second * 10): + case <-time.After(JOB_DURATION): break case <-s.storageRangeDone: return nil @@ -917,7 +875,8 @@ func (s *SnapSyncher) poolLatestBlock(ctx context.Context) error { // TODO: Race issue if newTarget.Number-s.lastBlock.Number < uint64(maxPivotDistance) { - s.log.Infow("Not updating pivot yet", "lastblock", s.lastBlock.Number, "newTarget", newTarget.Number, "diff", newTarget.Number-s.lastBlock.Number) + s.log.Infow("Not updating pivot yet", "lastblock", s.lastBlock.Number, + "newTarget", newTarget.Number, "diff", newTarget.Number-s.lastBlock.Number) continue } @@ -944,7 +903,7 @@ func (s *SnapSyncher) runFetchClassJob(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() - case <-time.After(time.Second * 10): + case <-time.After(JOB_DURATION): // Just request whatever we have if len(keyBatches) > 0 { break requestloop @@ -1003,7 +962,7 @@ func (s *SnapSyncher) runFetchClassJob(ctx context.Context) error { if !h.Equal(keyBatches[i]) { s.log.Warnw("invalid classhash", "got", h, "expected", keyBatches[i]) - // return errors.New("invalid class hash") + return errors.New("invalid class hash") } if coreClass.Version() == 1 { @@ -1021,7 +980,7 @@ func (s *SnapSyncher) runFetchClassJob(ctx context.Context) error { } } else { s.log.Errorw("Unable to fetch any class from peer") - // TODO: Penalize peer? + // TODO: Penalise peer? } newBatch := make([]*felt.Felt, 0) @@ -1053,7 +1012,7 @@ func (s *SnapSyncher) runStorageRefreshWorker(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() - case <-time.After(time.Second * 10): + case <-time.After(JOB_DURATION): s.log.Infow("waiting for more refresh job", "count", s.storageRangeJobCount) case <-contractDoneChecker: // Its done... @@ -1066,8 +1025,8 @@ func (s *SnapSyncher) runStorageRefreshWorker(ctx context.Context) error { bigIntAdd := job.startAddress.BigInt(&big.Int{}) bigIntAdd = (&big.Int{}).Add(bigIntAdd, big.NewInt(1)) - fp := fp.NewElement(0) - limitAddr := felt.NewFelt((&fp).SetBigInt(bigIntAdd)) + elem := fp.NewElement(0) + limitAddr := felt.NewFelt((&elem).SetBigInt(bigIntAdd)) var err error stateRoot := s.currentGlobalStateRoot @@ -1077,7 +1036,7 @@ func (s *SnapSyncher) runStorageRefreshWorker(ctx context.Context) error { Start: core2p2p.AdaptAddress(job.startAddress), End: core2p2p.AdaptAddress(limitAddr), ChunksPerProof: 10000, - })(func(response *ContractRangeStreamingResult, err error) bool { + })(func(response *ContractRangeStreamingResult, _err error) bool { if response.Range == nil && response.RangeProof == nil { // State root missing. return false @@ -1106,7 +1065,7 @@ func (s *SnapSyncher) runStorageRefreshWorker(ctx context.Context) error { proofs := P2pProofToTrieProofs(response.RangeProof) _, err = VerifyTrie(response.ContractsRoot, paths, values, proofs, core.GlobalTrieHeight, crypto.Pedersen) if err != nil { - // The peer should get penalized in this case + // The peer should get penalised in this case return false } diff --git a/sync/snapsyncer_test.go b/sync/snapsyncer_test.go index f7e0aa8363..33efa41d0c 100644 --- a/sync/snapsyncer_test.go +++ b/sync/snapsyncer_test.go @@ -229,6 +229,7 @@ func TestSnapOfflineCopy(t *testing.T) { bc2 := blockchain.New(d2, &utils.Sepolia) logger, err := utils.NewZapLogger(utils.DEBUG, false) + assert.NoError(t, err) syncer := NewSnapSyncer( &NoopService{ @@ -279,7 +280,7 @@ func TestSnapCopyTrie(t *testing.T) { go func() { http.Handle("/metrics", promhttp.Handler()) - http.ListenAndServe(":9201", nil) + _ = http.ListenAndServe(":9201", nil) }() var d2 db.DB