From af661a9459d997f09b8f514daa25e46b00dce9b8 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 14 Jul 2022 10:03:15 +0100 Subject: [PATCH 01/72] Fix canExtendCanonical when some headers are downloaded (#4709) * Fix canExtendCanonical when some headers are downloaded * Restore original logic for forkValidator.ValidatePayload * Check FCU status --- eth/stagedsync/stage_headers.go | 58 +++++++++++++++++++++----------- turbo/stages/sentry_mock_test.go | 3 ++ 2 files changed, 41 insertions(+), 20 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 498ae67ec20..8dfbb1551b5 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -193,7 +193,7 @@ func HeadersPOS( var payloadStatus *privateapi.PayloadStatus var err error if forkChoiceInsteadOfNewPayload { - payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter, cfg.blockReader) + payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter) } else { payloadStatus, err = handleNewPayload(payloadMessage, requestStatus, requestId, s, ctx, tx, cfg, headerInserter) } @@ -267,7 +267,6 @@ func startHandlingForkChoice( tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, - headerReader services.HeaderReader, ) (*privateapi.PayloadStatus, error) { headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) @@ -307,7 +306,7 @@ func startHandlingForkChoice( } // Header itself may already be in the snapshots, if CL starts off at much earlier state than Erigon - header, err := headerReader.HeaderByHash(ctx, tx, headerHash) + header, err := cfg.blockReader.HeaderByHash(ctx, tx, headerHash) if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err (reading header by hash %x)", s.LogPrefix(), headerHash), "err", err) cfg.hd.BeaconRequestList.Remove(requestId) @@ -369,16 +368,9 @@ func startHandlingForkChoice( } cfg.hd.UpdateTopSeenHeightPoS(headerNumber) - forkingPoint := uint64(0) - if headerNumber > 0 { - parent, err := headerReader.Header(ctx, tx, header.ParentHash, headerNumber-1) - if err != nil { - return nil, err - } - forkingPoint, err = headerInserter.ForkingPoint(tx, header, parent) - if err != nil { - return nil, err - } + forkingPoint, err := forkingPoint(ctx, tx, headerInserter, cfg.blockReader, header) + if err != nil { + return nil, err } log.Info(fmt.Sprintf("[%s] Fork choice re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) @@ -549,7 +541,7 @@ func handleNewPayload( } log.Debug(fmt.Sprintf("[%s] New payload begin verification", s.LogPrefix())) - response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, tx, cfg, header, payloadMessage.Body, headerInserter) + response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, ctx, tx, cfg, header, payloadMessage.Body, headerInserter) log.Debug(fmt.Sprintf("[%s] New payload verification ended", s.LogPrefix()), "success", success, "err", err) if err != nil || !success { return response, err @@ -566,6 +558,7 @@ func handleNewPayload( func verifyAndSaveNewPoSHeader( requestStatus engineapi.RequestStatus, s *StageState, + ctx context.Context, tx kv.RwTx, cfg HeadersCfg, header *types.Header, @@ -586,17 +579,24 @@ func verifyAndSaveNewPoSHeader( } currentHeadHash := rawdb.ReadHeadHeaderHash(tx) - canExtendCanonical := header.ParentHash == currentHeadHash + + forkingPoint, err := forkingPoint(ctx, tx, headerInserter, cfg.blockReader, header) + if err != nil { + return nil, false, err + } + forkingHash, err := cfg.blockReader.CanonicalHash(ctx, tx, forkingPoint) + + canExtendCanonical := forkingHash == currentHeadHash canExtendFork := cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash() - if cfg.memoryOverlay && (canExtendFork || !canExtendCanonical) { - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, canExtendCanonical) + if cfg.memoryOverlay && (canExtendFork || header.ParentHash != currentHeadHash) { + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, header.ParentHash == currentHeadHash /* extendCanonical */) if criticalError != nil { - return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError + return nil, false, criticalError } success = validationError == nil if !success { - log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", validationError) + log.Warn("Validation failed for header", "hash", headerHash, "height", headerNumber, "err", validationError) cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) } else if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { return nil, false, err @@ -613,7 +613,7 @@ func verifyAndSaveNewPoSHeader( } if !canExtendCanonical { - log.Info("Side chain or something weird", "parentHash", header.ParentHash, "currentHead", currentHeadHash) + log.Info("Side chain", "parentHash", header.ParentHash, "currentHead", currentHeadHash) return &privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil } @@ -708,6 +708,24 @@ func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserte cfg.hd.SetPosStatus(headerdownload.Idle) } +func forkingPoint( + ctx context.Context, + tx kv.RwTx, + headerInserter *headerdownload.HeaderInserter, + headerReader services.HeaderReader, + header *types.Header, +) (uint64, error) { + headerNumber := header.Number.Uint64() + if headerNumber == 0 { + return 0, nil + } + parent, err := headerReader.Header(ctx, tx, header.ParentHash, headerNumber-1) + if err != nil { + return 0, err + } + return headerInserter.ForkingPoint(tx, header, parent) +} + // HeadersPOW progresses Headers stage for Proof-of-Work headers func HeadersPOW( s *StageState, diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index f0a9c136efa..9ab7015e06b 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -605,6 +605,7 @@ func TestPoSDownloader(t *testing.T) { headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + assert.Equal(t, chain.TopBlock.Hash(), headBlockHash) // Point forkChoice to the head forkChoiceMessage := engineapi.ForkChoiceMessage{ @@ -617,6 +618,8 @@ func TestPoSDownloader(t *testing.T) { require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + payloadStatus = m.ReceivePayloadStatus() + assert.Equal(t, remote.EngineStatus_VALID, payloadStatus.Status) assert.Equal(t, chain.TopBlock.Hash(), headBlockHash) } From 211dbfbb7505c93285a9839dbf4dae9b8b46f359 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 14 Jul 2022 11:01:57 +0100 Subject: [PATCH 02/72] fix(#4543): BeginRo use semaphore (erigon-lib bump) (#4712) --- cmd/rpcdaemon/cli/config.go | 3 ++- cmd/rpcdaemon22/cli/config.go | 3 ++- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index d326e45911e..bd79d8a8da0 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -46,6 +46,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" + "golang.org/x/sync/semaphore" "google.golang.org/grpc" grpcHealth "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" @@ -255,7 +256,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, if cfg.WithDatadir { var rwKv kv.RwDB log.Trace("Creating chain db", "path", cfg.Dirs.Chaindata) - limiter := make(chan struct{}, cfg.DBReadConcurrency) + limiter := semaphore.NewWeighted(int64(cfg.DBReadConcurrency)) rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() if err != nil { return nil, nil, nil, nil, nil, nil, nil, nil, ff, err diff --git a/cmd/rpcdaemon22/cli/config.go b/cmd/rpcdaemon22/cli/config.go index 7f15ae9db48..abe200dfd3c 100644 --- a/cmd/rpcdaemon22/cli/config.go +++ b/cmd/rpcdaemon22/cli/config.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/rpc/rpccfg" + "golang.org/x/sync/semaphore" "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces" @@ -253,7 +254,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, if cfg.WithDatadir { var rwKv kv.RwDB log.Trace("Creating chain db", "path", cfg.Dirs.Chaindata) - limiter := make(chan struct{}, cfg.DBReadConcurrency) + limiter := semaphore.NewWeighted(int64(cfg.DBReadConcurrency)) rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() if err != nil { return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err diff --git a/go.mod b/go.mod index ab125c696e3..b8e7bbe9d08 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c + github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index b687d0fb068..87deff96b61 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c h1:xcHZhKSsUezVVGiqMxhe2qlkoedgkCAyx1Zi+bY9Pxs= -github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c/go.mod h1:bttvdtZXjh803u/CeMerKYnWvVvXTICWSfpcMeQNtmc= +github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e h1:lU3YEzEKf55d3Sd363FFwxMIHch7/59Xi4PLG4MHWcg= +github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 07e00b878c1ea38d39bf39f824c6bff433c94800 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 14 Jul 2022 11:30:50 +0100 Subject: [PATCH 03/72] use nested datadir/network path for db supporting legacy (#4713) Co-authored-by: Scott Fairclough --- cmd/utils/flags.go | 48 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 583e84a3598..0e8e5a941a4 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "math/big" + "os" "path/filepath" "runtime" "strconv" @@ -32,14 +33,15 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" - "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" - "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/urfave/cli" + "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" + "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" + "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/params/networkname" @@ -1069,30 +1071,48 @@ func DataDirForNetwork(datadir string, network string) string { case networkname.DevChainName: return "" // unless explicitly requested, use memory databases case networkname.RinkebyChainName: - return filepath.Join(datadir, "rinkeby") + return networkDataDirCheckingLegacy(datadir, "rinkeby") case networkname.GoerliChainName: - filepath.Join(datadir, "goerli") + return networkDataDirCheckingLegacy(datadir, "goerli") case networkname.KilnDevnetChainName: - filepath.Join(datadir, "kiln-devnet") + return networkDataDirCheckingLegacy(datadir, "kiln-devnet") case networkname.SokolChainName: - return filepath.Join(datadir, "sokol") + return networkDataDirCheckingLegacy(datadir, "sokol") case networkname.FermionChainName: - return filepath.Join(datadir, "fermion") + return networkDataDirCheckingLegacy(datadir, "fermion") case networkname.MumbaiChainName: - return filepath.Join(datadir, "mumbai") + return networkDataDirCheckingLegacy(datadir, "mumbai") case networkname.BorMainnetChainName: - return filepath.Join(datadir, "bor-mainnet") + return networkDataDirCheckingLegacy(datadir, "bor-mainnet") case networkname.BorDevnetChainName: - return filepath.Join(datadir, "bor-devnet") + return networkDataDirCheckingLegacy(datadir, "bor-devnet") case networkname.SepoliaChainName: - return filepath.Join(datadir, "sepolia") + return networkDataDirCheckingLegacy(datadir, "sepolia") case networkname.GnosisChainName: - return filepath.Join(datadir, "gnosis") + return networkDataDirCheckingLegacy(datadir, "gnosis") + default: return datadir } +} + +// networkDataDirCheckingLegacy checks if the datadir for the network already exists and uses that if found. +// if not checks for a LOCK file at the root of the datadir and uses this if found +// or by default assume a fresh node and to use the nested directory for the network +func networkDataDirCheckingLegacy(datadir, network string) string { + anticipated := filepath.Join(datadir, network) + + if _, err := os.Stat(anticipated); !os.IsNotExist(err) { + return anticipated + } + + legacyLockFile := filepath.Join(datadir, "LOCK") + if _, err := os.Stat(legacyLockFile); !os.IsNotExist(err) { + log.Info("Using legacy datadir") + return datadir + } - return datadir + return anticipated } func setDataDir(ctx *cli.Context, cfg *nodecfg.Config) { From 793ffcce3b19f02e69e37cf7b60837e1798d6f26 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Thu, 14 Jul 2022 17:22:46 +0300 Subject: [PATCH 04/72] separated interrupt logic (#4714) --- eth/stagedsync/stage_headers.go | 39 +++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 8dfbb1551b5..5db5c099c43 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -164,16 +164,12 @@ func HeadersPOS( cfg.hd.SetHeaderReader(&chainReader{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}) headerInserter := headerdownload.NewHeaderInserter(s.LogPrefix(), nil, s.BlockNumber, cfg.blockReader) - if interrupt != engineapi.None { - if interrupt == engineapi.Stopping { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: errors.New("server is stopping")} - } - if interrupt == engineapi.Synced { - verifyAndSaveDownloadedPoSHeaders(tx, cfg, headerInserter) - } - if !useExternalTx { - return tx.Commit() - } + interrupted, err := handleInterrupt(interrupt, cfg, tx, headerInserter, useExternalTx) + if err != nil { + return err + } + + if interrupted { return nil } @@ -181,20 +177,15 @@ func HeadersPOS( requestStatus := requestWithStatus.Status // Decide what kind of action we need to take place - var payloadMessage *engineapi.PayloadMessage forkChoiceMessage, forkChoiceInsteadOfNewPayload := request.(*engineapi.ForkChoiceMessage) - if !forkChoiceInsteadOfNewPayload { - payloadMessage = request.(*engineapi.PayloadMessage) - } - cfg.hd.ClearPendingPayloadHash() cfg.hd.SetPendingPayloadStatus(nil) var payloadStatus *privateapi.PayloadStatus - var err error if forkChoiceInsteadOfNewPayload { payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter) } else { + payloadMessage := request.(*engineapi.PayloadMessage) payloadStatus, err = handleNewPayload(payloadMessage, requestStatus, requestId, s, ctx, tx, cfg, headerInserter) } @@ -726,6 +717,22 @@ func forkingPoint( return headerInserter.ForkingPoint(tx, header, parent) } +func handleInterrupt(interrupt engineapi.Interrupt, cfg HeadersCfg, tx kv.RwTx, headerInserter *headerdownload.HeaderInserter, useExternalTx bool) (bool, error) { + if interrupt != engineapi.None { + if interrupt == engineapi.Stopping { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: errors.New("server is stopping")} + } + if interrupt == engineapi.Synced { + verifyAndSaveDownloadedPoSHeaders(tx, cfg, headerInserter) + } + if !useExternalTx { + return true, tx.Commit() + } + return true, nil + } + return false, nil +} + // HeadersPOW progresses Headers stage for Proof-of-Work headers func HeadersPOW( s *StageState, From 759e77c71bb01d678ffa959992588cdb7a50e25b Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 14 Jul 2022 19:08:33 +0200 Subject: [PATCH 05/72] mod (#4717) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b8e7bbe9d08..afa34d5579d 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e + github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 87deff96b61..d59d34e8091 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e h1:lU3YEzEKf55d3Sd363FFwxMIHch7/59Xi4PLG4MHWcg= -github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383 h1:1EE1EIsDHok6NrzqQjGqkCj47APObiqFbgv+s7GJMrk= +github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From f18a5b08643261eba5fd2c2efdd1609c244aa81d Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 15 Jul 2022 13:17:07 +0700 Subject: [PATCH 06/72] integration to pass mdbx.Accede flag (#4719) --- cmd/integration/commands/root.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 5db26bfeacc..0bef08f0553 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -11,6 +11,7 @@ import ( "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" + "github.com/torquem-ch/mdbx-go/mdbx" ) var rootCmd = &cobra.Command{ @@ -46,6 +47,9 @@ func dbCfg(label kv.Label, logger log.Logger, path string) kv2.MdbxOpts { } func openDB(opts kv2.MdbxOpts, applyMigrations bool) kv.RwDB { + // integration tool don't intent to create db, then easiest way to open db - it's pass mdbx.Accede flag, which allow + // to read all options from DB, instead of overriding them + opts = opts.Flags(func(f uint) uint { return f | mdbx.Accede }) db := opts.MustOpen() if applyMigrations { migrator := migrations.NewMigrator(opts.GetLabel()) From 7b57e26d84bb495d1a51fe12f7cfe0ccd245f191 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 15 Jul 2022 13:57:44 +0700 Subject: [PATCH 07/72] grafana: up security fix version #4721 --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 40cf128e883..b5d77b58d1b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -66,7 +66,7 @@ services: prometheus: - image: prom/prometheus:v2.36.2 + image: prom/prometheus:v2.37.0 user: ${DOCKER_UID:1000}:${DOCKER_GID:1000} # Uses erigon user from Dockerfile command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles ports: [ "9090:9090" ] @@ -76,7 +76,7 @@ services: restart: unless-stopped grafana: - image: grafana/grafana:9.0.2 + image: grafana/grafana:9.0.3 user: "472:0" # required for grafana version >= 7.3 ports: [ "3000:3000" ] volumes: From 92e2311eb3333dad6eb7a7673f68aeb85aaa63f9 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 15 Jul 2022 14:39:27 +0700 Subject: [PATCH 08/72] docker compose: fix interpolation format #4722 Open --- docker-compose.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index b5d77b58d1b..0c6cf567c46 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,16 +20,16 @@ x-erigon-service: &default-erigon-service volumes_from: [ erigon ] restart: unless-stopped mem_swappiness: 0 + user: ${DOCKER_UID:-1000}:${DOCKER_GID:-1000} services: erigon: image: thorax/erigon:${TAG:-latest} build: args: - UID: ${DOCKER_UID:1000} - GID: ${DOCKER_GID:1000} + UID: ${DOCKER_UID:-1000} + GID: ${DOCKER_GID:-1000} context: . - user: "${DOCKER_UID:1000}:${DOCKER_GID:1000}" command: | erigon ${ERIGON_FLAGS-} --private.api.addr=0.0.0.0:9090 --sentry.api.addr=sentry:9091 --downloader.api.addr=downloader:9093 --txpool.disable @@ -67,7 +67,7 @@ services: prometheus: image: prom/prometheus:v2.37.0 - user: ${DOCKER_UID:1000}:${DOCKER_GID:1000} # Uses erigon user from Dockerfile + user: ${DOCKER_UID:-1000}:${DOCKER_GID:-1000} # Uses erigon user from Dockerfile command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles ports: [ "9090:9090" ] volumes: @@ -77,7 +77,7 @@ services: grafana: image: grafana/grafana:9.0.3 - user: "472:0" # required for grafana version >= 7.3 + user: 472:0 # required for grafana version >= 7.3 ports: [ "3000:3000" ] volumes: - ${ERIGON_GRAFANA_CONFIG:-./cmd/prometheus/grafana.ini}:/etc/grafana/grafana.ini From e8f83db20805f45ff9f883af5a5c26a727e098ab Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 15 Jul 2022 13:56:35 +0100 Subject: [PATCH 09/72] Gas api unit tests (#4715) * gas price initial unit tests * tweak(makefile): gas price test timeout increase increase test timeout in Makefile to 50s from 30s to cater for increased test time unit testing gas price logic. Co-authored-by: Scott Fairclough --- Makefile | 2 +- cmd/rpcdaemon/commands/eth_system_test.go | 93 +++++++++++++++++++++++ 2 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 cmd/rpcdaemon/commands/eth_system_test.go diff --git a/Makefile b/Makefile index 1cff92359d3..3546d3e065e 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ db-tools: git-submodules @echo "Run \"$(GOBIN)/mdbx_stat -h\" to get info about mdbx db file." test: - $(GOTEST) --timeout 30s + $(GOTEST) --timeout 50s test-integration: $(GOTEST) --timeout 30m -tags $(BUILD_TAGS),integration diff --git a/cmd/rpcdaemon/commands/eth_system_test.go b/cmd/rpcdaemon/commands/eth_system_test.go new file mode 100644 index 00000000000..5163590e88e --- /dev/null +++ b/cmd/rpcdaemon/commands/eth_system_test.go @@ -0,0 +1,93 @@ +package commands + +import ( + "context" + "math" + "math/big" + "testing" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" +) + +func TestGasPrice(t *testing.T) { + + cases := []struct { + description string + chainSize int + expectedPrice *big.Int + }{ + { + description: "standard settings 60 blocks", + chainSize: 60, + expectedPrice: big.NewInt(params.GWei * int64(36)), + }, + { + description: "standard settings 30 blocks", + chainSize: 30, + expectedPrice: big.NewInt(params.GWei * int64(18)), + }, + } + + for _, testCase := range cases { + t.Run(testCase.description, func(t *testing.T) { + db := createGasPriceTestKV(t, testCase.chainSize) + defer db.Close() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + base := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) + eth := NewEthAPI(base, db, nil, nil, nil, 5000000) + + ctx := context.Background() + result, err := eth.GasPrice(ctx) + if err != nil { + t.Fatalf("error getting gas price: %s", err) + } + + if testCase.expectedPrice.Cmp(result.ToInt()) != 0 { + t.Fatalf("gas price mismatch, want %d, got %d", testCase.expectedPrice, result.ToInt()) + } + }) + } + +} + +func createGasPriceTestKV(t *testing.T, chainSize int) kv.RwDB { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + gspec = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}}, + } + signer = types.LatestSigner(gspec.Config) + ) + m := stages.MockWithGenesis(t, gspec, key) + + // Generate testing blocks + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, chainSize, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + tx, txErr := types.SignTx(types.NewTransaction(b.TxNonce(addr), common.HexToAddress("deadbeef"), uint256.NewInt(100), 21000, uint256.NewInt(uint64(int64(i+1)*params.GWei)), nil), *signer, key) + if txErr != nil { + t.Fatalf("failed to create tx: %v", txErr) + } + b.AddTx(tx) + }, false) + if err != nil { + t.Error(err) + } + // Construct testing chain + if err = m.InsertChain(chain); err != nil { + t.Error(err) + } + + return m.DB +} From b6440eea1e2d5c8eb36ab176e275e0b0c12cb99d Mon Sep 17 00:00:00 2001 From: Levi Aul Date: Fri, 15 Jul 2022 07:04:23 -0700 Subject: [PATCH 10/72] Add erigon_getBalanceChangesInBlock RPC endpoint (#4609) * Add eth_getBalanceChangesInBlock RPC endpoint * Fix lints * added assertion for one test * moved balance change api from eth to erigon Co-authored-by: fatemebagherii --- cmd/rpcdaemon/commands/erigon_api.go | 2 + cmd/rpcdaemon/commands/erigon_block.go | 68 ++++++++++++++++++++++++++ cmd/rpcdaemon/commands/eth_api_test.go | 25 ++++++++++ 3 files changed, 95 insertions(+) diff --git a/cmd/rpcdaemon/commands/erigon_api.go b/cmd/rpcdaemon/commands/erigon_api.go index f976cf31f05..87781938642 100644 --- a/cmd/rpcdaemon/commands/erigon_api.go +++ b/cmd/rpcdaemon/commands/erigon_api.go @@ -5,6 +5,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/rpc" @@ -20,6 +21,7 @@ type ErigonAPI interface { GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) GetHeaderByHash(_ context.Context, hash common.Hash) (*types.Header, error) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Timestamp, fullTx bool) (map[string]interface{}, error) + GetBalanceChangesInBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (map[common.Address]*hexutil.Big, error) // Receipt related (see ./erigon_receipts.go) GetLogsByHash(ctx context.Context, hash common.Hash) ([][]*types.Log, error) diff --git a/cmd/rpcdaemon/commands/erigon_block.go b/cmd/rpcdaemon/commands/erigon_block.go index 7a5bf1bda1d..e56cba2a1d3 100644 --- a/cmd/rpcdaemon/commands/erigon_block.go +++ b/cmd/rpcdaemon/commands/erigon_block.go @@ -1,15 +1,21 @@ package commands import ( + "bytes" "context" "errors" "fmt" "sort" + "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -168,3 +174,65 @@ func buildBlockResponse(db kv.Tx, blockNum uint64, fullTx bool) (map[string]inte } return response, err } + +func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (map[common.Address]*hexutil.Big, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } + + c, err := tx.Cursor(kv.AccountChangeSet) + if err != nil { + return nil, err + } + defer c.Close() + + startkey := dbutils.EncodeBlockNumber(blockNumber) + + decodeFn := changeset.Mapper[kv.AccountChangeSet].Decode + + balancesMapping := make(map[common.Address]*hexutil.Big) + + newReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + if err != nil { + return nil, err + } + + for dbKey, dbValue, _ := c.Seek(startkey); bytes.Equal(dbKey, startkey) && dbKey != nil; dbKey, dbValue, _ = c.Next() { + _, addressBytes, v, err := decodeFn(dbKey, dbValue) + if err != nil { + return nil, err + } + + var oldAcc accounts.Account + if err = oldAcc.DecodeForStorage(v); err != nil { + return nil, err + } + oldBalance := oldAcc.Balance + + address := common.BytesToAddress(addressBytes) + + newAcc, err := newReader.ReadAccountData(address) + if err != nil { + return nil, err + } + + newBalance := uint256.NewInt(0) + if newAcc != nil { + newBalance = &newAcc.Balance + } + + if !oldBalance.Eq(newBalance) { + newBalanceDesc := (*hexutil.Big)(newBalance.ToBig()) + balancesMapping[address] = newBalanceDesc + } + } + + return balancesMapping, nil +} diff --git a/cmd/rpcdaemon/commands/eth_api_test.go b/cmd/rpcdaemon/commands/eth_api_test.go index cc940c43e66..043f620db7b 100644 --- a/cmd/rpcdaemon/commands/eth_api_test.go +++ b/cmd/rpcdaemon/commands/eth_api_test.go @@ -5,6 +5,8 @@ import ( "fmt" "testing" + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" @@ -16,6 +18,29 @@ import ( "github.com/ledgerwatch/erigon/common" ) +func TestGetBalanceChangesInBlock(t *testing.T) { + assert := assert.New(t) + myBlockNum := rpc.BlockNumberOrHashWithNumber(0) + + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + balances, err := api.GetBalanceChangesInBlock(context.Background(), myBlockNum) + if err != nil { + t.Errorf("calling GetBalanceChangesInBlock resulted in an error: %v", err) + } + expected := map[common.Address]*hexutil.Big{ + common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e"): (*hexutil.Big)(uint256.NewInt(200000000000000000).ToBig()), + common.HexToAddress("0x703c4b2bD70c169f5717101CaeE543299Fc946C7"): (*hexutil.Big)(uint256.NewInt(300000000000000000).ToBig()), + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): (*hexutil.Big)(uint256.NewInt(9000000000000000000).ToBig()), + } + assert.Equal(len(expected), len(balances)) + for i := range balances { + assert.Contains(expected, i, "%s is not expected to be present in the output.", i) + assert.Equal(balances[i], expected[i], "the value for %s is expected to be %v, but got %v.", i, expected[i], balances[i]) + } +} + func TestGetTransactionReceipt(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) From e04401491fa310ec7bc54d0cc530cbaded6d70e8 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Sat, 16 Jul 2022 11:06:26 +0300 Subject: [PATCH 11/72] checking if we build torrent file (#4723) * checking if we build torrent file * only if torrentHash != nil * clearer separation of scenario * refactored Download * comments * ops * not using magnet with empty hash * moved log to top * ops * logs * log warns * bumped up log lvl * log --- .../downloader/downloader_grpc_server.go | 112 ++++++++++++------ 1 file changed, 78 insertions(+), 34 deletions(-) diff --git a/cmd/downloader/downloader/downloader_grpc_server.go b/cmd/downloader/downloader/downloader_grpc_server.go index c75516aa439..062d086c219 100644 --- a/cmd/downloader/downloader/downloader_grpc_server.go +++ b/cmd/downloader/downloader/downloader_grpc_server.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/anacrolix/torrent" "github.com/anacrolix/torrent/metainfo" "github.com/ledgerwatch/erigon-lib/gointerfaces" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" @@ -32,49 +33,33 @@ func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.Dow defer logEvery.Stop() torrentClient := s.d.Torrent() - mi := &metainfo.MetaInfo{AnnounceList: Trackers} + snapDir := s.d.SnapDir() for i, it := range request.Items { - if it.TorrentHash == nil { // seed new snapshot - if err := BuildTorrentFileIfNeed(it.Path, s.d.SnapDir()); err != nil { - return nil, err - } - } - - hash := Proto2InfoHash(it.TorrentHash) - if _, ok := torrentClient.Torrent(hash); ok { - continue - } - - ok, err := AddSegment(it.Path, s.d.SnapDir(), torrentClient) - if err != nil { - return nil, fmt.Errorf("AddSegment: %w", err) - } select { case <-logEvery.C: - log.Info("[snpshots] initializing", "files", fmt.Sprintf("%d/%d", i, len(request.Items))) + log.Info("[snapshots] initializing", "files", fmt.Sprintf("%d/%d", i, len(request.Items))) default: } - if ok { - continue - } - magnet := mi.Magnet(&hash, nil) - go func(magnetUrl string) { - t, err := torrentClient.AddMagnet(magnetUrl) + if it.TorrentHash == nil { + // if we dont have the torrent hash then we seed a new snapshot + log.Info("[snapshots] seeding a new snapshot") + ok, err := seedNewSnapshot(it, torrentClient, snapDir) if err != nil { - log.Warn("[downloader] add magnet link", "err", err) - return + return nil, err } - t.DisallowDataDownload() - t.AllowDataUpload() - <-t.GotInfo() - - mi := t.Metainfo() - if err := CreateTorrentFileIfNotExists(s.d.SnapDir(), t.Info(), &mi); err != nil { - log.Warn("[downloader] create torrent file", "err", err) - return + if ok { + log.Debug("[snapshots] already have both seg and torrent file") + } else { + log.Warn("[snapshots] didn't get the seg or the torrent file") } - }(magnet.String()) + continue + } + + _, err := createMagnetLinkWithInfoHash(it.TorrentHash, torrentClient, snapDir) + if err != nil { + return nil, err + } } s.d.ReCalcStats(10 * time.Second) // immediately call ReCalc to set stat.Complete flag return &emptypb.Empty{}, nil @@ -110,3 +95,62 @@ func (s *GrpcServer) Stats(ctx context.Context, request *proto_downloader.StatsR func Proto2InfoHash(in *prototypes.H160) metainfo.Hash { return gointerfaces.ConvertH160toAddress(in) } + +// decides what we do depending on wether we have the .seg file or the .torrent file +// have .torrent no .seg => get .seg file from .torrent +// have .seg no .torrent => get .torrent from .seg +func seedNewSnapshot(it *proto_downloader.DownloadItem, torrentClient *torrent.Client, snapDir string) (bool, error) { + // if we dont have the torrent file we build it if we have the .seg file + if err := BuildTorrentFileIfNeed(it.Path, snapDir); err != nil { + return false, err + } + + // we add the .seg file we have and create the .torrent file if we dont have it + ok, err := AddSegment(it.Path, snapDir, torrentClient) + if err != nil { + return false, fmt.Errorf("AddSegment: %w", err) + } + + // torrent file does exist and seg + if !ok { + return false, nil + } + + // we skip the item in for loop since we build the seg and torrent file here + return true, nil +} + +// we dont have .seg or .torrent so we get them through the torrent hash +func createMagnetLinkWithInfoHash(hash *prototypes.H160, torrentClient *torrent.Client, snapDir string) (bool, error) { + mi := &metainfo.MetaInfo{AnnounceList: Trackers} + if hash == nil { + return false, nil + } + infoHash := Proto2InfoHash(hash) + log.Debug("[downloader] downloading torrent and seg file", "hash", infoHash) + + if _, ok := torrentClient.Torrent(infoHash); ok { + log.Debug("[downloader] torrent client related to hash found", "hash", infoHash) + return true, nil + } + + magnet := mi.Magnet(&infoHash, nil) + go func(magnetUrl string) { + t, err := torrentClient.AddMagnet(magnetUrl) + if err != nil { + log.Warn("[downloader] add magnet link", "err", err) + return + } + t.DisallowDataDownload() + t.AllowDataUpload() + <-t.GotInfo() + + mi := t.Metainfo() + if err := CreateTorrentFileIfNotExists(snapDir, t.Info(), &mi); err != nil { + log.Warn("[downloader] create torrent file", "err", err) + return + } + }(magnet.String()) + log.Debug("[downloader] downloaded both seg and torrent files", "hash", infoHash) + return false, nil +} From 8a754cd252d00ee33184582814bd1bc7b72a217e Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 17 Jul 2022 02:02:53 +0200 Subject: [PATCH 12/72] Added PoS download validation when applicable (#4728) * added incomplete version of PoS download validation * fixed stuff --- eth/stagedsync/default_stages.go | 16 ++++++++-------- eth/stagedsync/stage_headers.go | 25 ++++++++++++++++++++++--- eth/stagedsync/sync.go | 20 ++++++++++++++++++++ turbo/engineapi/fork_validator.go | 27 +++++++++++++++++++++++++-- turbo/stages/stageloop.go | 23 +++++++++++++---------- 5 files changed, 88 insertions(+), 23 deletions(-) diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 359f7aed023..869fc7331fe 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -242,23 +242,23 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc }, }, { - ID: stages.BlockHashes, - Description: "Write block hashes", + ID: stages.Bodies, + Description: "Download block bodies", Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) + return BodiesForward(s, u, ctx, tx, bodies, false, false) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { - return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) + return UnwindBodiesStage(u, tx, bodies, ctx) }, }, { - ID: stages.Bodies, - Description: "Download block bodies", + ID: stages.BlockHashes, + Description: "Write block hashes", Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return nil + return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { - return UnwindBodiesStage(u, tx, bodies, ctx) + return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) }, }, { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 5db5c099c43..bf6ad576a4c 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -666,16 +666,32 @@ func schedulePoSDownload( func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter) { var lastValidHash common.Hash - + var badChainError error headerLoadFunc := func(key, value []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { var h types.Header if err := rlp.DecodeBytes(value, &h); err != nil { return err } + if badChainError != nil { + cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) + return nil + } lastValidHash = h.ParentHash if err := cfg.hd.VerifyHeader(&h); err != nil { log.Warn("Verification failed for header", "hash", h.Hash(), "height", h.Number.Uint64(), "err", err) - return err + badChainError = err + cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) + return nil + } + // Validate state if possible (bodies will be retrieved through body download) + _, _, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, &h, nil, false) + if criticalError != nil { + return criticalError + } + if validationError != nil { + badChainError = validationError + cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) + return nil } return headerInserter.FeedHeaderPoS(tx, &h, h.Hash()) } @@ -686,7 +702,10 @@ func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserte }, }) - if err != nil { + if err != nil || badChainError != nil { + if err == nil { + err = badChainError + } log.Warn("Removing beacon request due to", "err", err, "requestId", cfg.hd.RequestId()) cfg.hd.BeaconRequestList.Remove(cfg.hd.RequestId()) cfg.hd.ReportBadHeaderPoS(cfg.hd.PoSDownloaderTip(), lastValidHash) diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 5b26d418a39..2fd46f1c1fb 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -189,6 +189,26 @@ func (s *Sync) StageState(stage stages.SyncStage, tx kv.Tx, db kv.RoDB) (*StageS return &StageState{s, stage, blockNum}, nil } +func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error { + if s.unwindPoint == nil { + return nil + } + for j := 0; j < len(s.unwindOrder); j++ { + if s.unwindOrder[j] == nil || s.unwindOrder[j].Disabled || s.unwindOrder[j].Unwind == nil { + continue + } + if err := s.unwindStage(false, s.unwindOrder[j], db, tx); err != nil { + return err + } + } + s.prevUnwindPoint = s.unwindPoint + s.unwindPoint = nil + s.badBlock = common.Hash{} + if err := s.SetCurrentStage(s.stages[0].ID); err != nil { + return err + } + return nil +} func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { s.prevUnwindPoint = nil s.timings = s.timings[:0] diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index e65db857423..91b87b31087 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -14,12 +14,15 @@ package engineapi import ( + "bytes" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/log/v3" ) @@ -177,7 +180,6 @@ func (fv *ForkValidator) Clear(tx kv.RwTx) { } fv.extendingFork.Rollback() } - // Clean all data relative to txpool fv.extendingForkHeadHash = common.Hash{} fv.extendingFork = nil } @@ -191,8 +193,29 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade status = remote.EngineStatus_INVALID return } + // If we do not have the body we can recover it from the batch. + if body == nil { + var bodyWithTxs *types.Body + bodyWithTxs, criticalError = rawdb.ReadBodyWithTransactions(tx, header.Hash(), header.Number.Uint64()) + if criticalError != nil { + return + } + var encodedTxs [][]byte + buf := bytes.NewBuffer(nil) + for _, tx := range bodyWithTxs.Transactions { + buf.Reset() + if criticalError = rlp.Encode(buf, tx); criticalError != nil { + return + } + encodedTxs = append(encodedTxs, common.CopyBytes(buf.Bytes())) + } + fv.sideForksBlock[header.Hash()] = forkSegment{header, &types.RawBody{ + Transactions: encodedTxs, + }} + } else { + fv.sideForksBlock[header.Hash()] = forkSegment{header, body} + } status = remote.EngineStatus_VALID - fv.sideForksBlock[header.Hash()] = forkSegment{header, body} return } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 36ae3fca7f7..8a0caa5451f 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -259,7 +259,7 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h if unwindPoint > 0 { // Run it through the unwind stateSync.UnwindTo(unwindPoint, common.Hash{}) - if err = stateSync.Run(nil, batch, false); err != nil { + if err = stateSync.RunUnwind(nil, batch); err != nil { return err } // Once we unwond we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) @@ -282,17 +282,13 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h } } // If we did not specify header or body we stop here - if header == nil || body == nil { + if header == nil { return nil } // Setup height := header.Number.Uint64() hash := header.Hash() // Prepare memory state for block execution - if err = rawdb.WriteRawBodyIfNotExists(batch, hash, height, body); err != nil { - return err - } - rawdb.WriteHeader(batch, header) if err = rawdb.WriteHeaderNumber(batch, hash, height); err != nil { return err @@ -309,11 +305,18 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h if err = stages.SaveStageProgress(batch, stages.Headers, height); err != nil { return err } - - if err = stages.SaveStageProgress(batch, stages.Bodies, height); err != nil { - return err + if body != nil { + if err = stages.SaveStageProgress(batch, stages.Bodies, height); err != nil { + return err + } + if err = rawdb.WriteRawBodyIfNotExists(batch, hash, height, body); err != nil { + return err + } + } else { + if err = stages.SaveStageProgress(batch, stages.Bodies, height-1); err != nil { + return err + } } - // Run state sync if err = stateSync.Run(nil, batch, false); err != nil { return err From fb9f19334935fde694ed567c30af4baa2aa0786b Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Mon, 18 Jul 2022 03:04:02 +0200 Subject: [PATCH 13/72] fixed Two Block PoW Re-org to Higher-Height Chain (#4730) Co-authored-by: giuliorebuffo --- eth/stagedsync/stage_headers.go | 11 ++++++++++ turbo/engineapi/fork_validator.go | 7 +++++++ turbo/stages/stageloop.go | 35 ++++++++++++++++--------------- 3 files changed, 36 insertions(+), 17 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index bf6ad576a4c..3f2996bf937 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -667,6 +667,8 @@ func schedulePoSDownload( func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter) { var lastValidHash common.Hash var badChainError error + var foundPow bool + headerLoadFunc := func(key, value []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { var h types.Header if err := rlp.DecodeBytes(value, &h); err != nil { @@ -683,6 +685,15 @@ func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserte cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) return nil } + // If we are in PoW range then block validation is not required anymore. + if foundPow { + return headerInserter.FeedHeaderPoS(tx, &h, h.Hash()) + } + + foundPow = h.Difficulty.Cmp(common.Big0) != 0 + if foundPow { + return headerInserter.FeedHeaderPoS(tx, &h, h.Hash()) + } // Validate state if possible (bodies will be retrieved through body download) _, _, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, &h, nil, false) if criticalError != nil { diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 91b87b31087..ae83190cd4b 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -15,6 +15,7 @@ package engineapi import ( "bytes" + "fmt" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" @@ -130,6 +131,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body // if the block is not in range of maxForkDepth from head then we do not validate it. if abs64(int64(fv.currentHeight)-header.Number.Int64()) > maxForkDepth { status = remote.EngineStatus_ACCEPTED + fmt.Println("not in range") return } // Let's assemble the side fork backwards @@ -137,6 +139,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body currentHash := header.ParentHash foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) if criticalError != nil { + fmt.Println("critical") return } @@ -160,6 +163,10 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body } unwindPoint = sb.header.Number.Uint64() - 1 } + // Do not set an unwind point if we are already there. + if unwindPoint == fv.currentHeight { + unwindPoint = 0 + } // if it is not canonical we validate it in memory and discard it aferwards. batch := memdb.NewMemoryBatch(tx) defer batch.Close() diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 8a0caa5451f..5280dc8b7e3 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -262,25 +262,26 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h if err = stateSync.RunUnwind(nil, batch); err != nil { return err } - // Once we unwond we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) - for i := range headersChain { - currentHeader := headersChain[i] - currentBody := bodiesChain[i] - currentHeight := headersChain[i].Number.Uint64() - currentHash := headersChain[i].Hash() - // Prepare memory state for block execution - if err = rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { - return err - } - rawdb.WriteHeader(batch, currentHeader) - if err = rawdb.WriteHeaderNumber(batch, currentHash, currentHeight); err != nil { - return err - } - if err = rawdb.WriteCanonicalHash(batch, currentHash, currentHeight); err != nil { - return err - } + } + // Once we unwond we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) + for i := range headersChain { + currentHeader := headersChain[i] + currentBody := bodiesChain[i] + currentHeight := headersChain[i].Number.Uint64() + currentHash := headersChain[i].Hash() + // Prepare memory state for block execution + if err = rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { + return err + } + rawdb.WriteHeader(batch, currentHeader) + if err = rawdb.WriteHeaderNumber(batch, currentHash, currentHeight); err != nil { + return err + } + if err = rawdb.WriteCanonicalHash(batch, currentHash, currentHeight); err != nil { + return err } } + // If we did not specify header or body we stop here if header == nil { return nil From ac9b7d8cc2dc75deb92de740dfce464552301c8e Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 18 Jul 2022 12:19:46 +0700 Subject: [PATCH 14/72] commitment: generic btree #4731 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index afa34d5579d..6f1ffe0d4d0 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383 + github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index d59d34e8091..55948da44b2 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383 h1:1EE1EIsDHok6NrzqQjGqkCj47APObiqFbgv+s7GJMrk= -github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c h1:jkzM2nkZ+FNstxqaH8cq6PKskFSnilJ5QmRo49SI+o4= +github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 13bf5c30c11c54188a85c9b5915156852619eccf Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 18 Jul 2022 14:38:01 +0700 Subject: [PATCH 15/72] db migration fix: it was able run with delay #4732 --- migrations/reset_blocks.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/migrations/reset_blocks.go b/migrations/reset_blocks.go index 16256bf3267..30a187a1237 100644 --- a/migrations/reset_blocks.go +++ b/migrations/reset_blocks.go @@ -30,14 +30,14 @@ var resetBlocks = Migration{ if err := BeforeCommit(tx, nil, true); err != nil { return err } - return + return tx.Commit() } genesisBlock := rawdb.ReadHeaderByNumber(tx, 0) if genesisBlock == nil { if err := BeforeCommit(tx, nil, true); err != nil { return err } - return nil + return tx.Commit() } chainConfig, err := rawdb.ReadChainConfig(tx, genesisBlock.Hash()) if err != nil { From 15ca3d25c1a0d707d18c87bc5f55c9fddab375aa Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 18 Jul 2022 15:36:53 +0700 Subject: [PATCH 16/72] snapshots: mainnet to 15m #4733 --- turbo/snapshotsync/snapshothashes/erigon-snapshots | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots index 9cd91d0b377..d90ddcf7257 160000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ b/turbo/snapshotsync/snapshothashes/erigon-snapshots @@ -1 +1 @@ -Subproject commit 9cd91d0b377149102613f6bec46f28429aa3c761 +Subproject commit d90ddcf72579066b48d631fc5a84dcfbbf2bac49 From b7acf6c108d2a1205634e9b8c7c756d788d7e782 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 18 Jul 2022 17:12:13 +0700 Subject: [PATCH 17/72] compressor: generic sort (#4734) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6f1ffe0d4d0..d6b6f4d3c58 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c + github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 55948da44b2..19dabbc596a 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c h1:jkzM2nkZ+FNstxqaH8cq6PKskFSnilJ5QmRo49SI+o4= -github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475 h1:WULehvYiLzt/pXBZBMEXNMC8w4S0PrgM0UC7r3J2Z1M= +github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 8dceb6fe8270bb0d47d4c4d3f5413f17e1255978 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Mon, 18 Jul 2022 17:42:20 +0300 Subject: [PATCH 18/72] Auto download snapshots (#4729) * refactored request download * keeping track of missing snapshots * using slice mergeRange * request snapshots on reopen * passing arguments * passed in var * Revert "passed in var" This reverts commit 90478978dfa9f2a6dd5b1b051fc1d3f9e5f7a9c5. * Revert "passing arguments" This reverts commit 1e39c4152003796f6ff0bcfc188512d4a43bd18d. * Revert "request snapshots on reopen" This reverts commit d40212b973bc15db2b25cc1b0abb22051a3debb1. * added downloadRequest ; * downloading missing headers at start up * there shouldnt be an error anymore * not using nil; ; --- eth/stagedsync/stage_headers.go | 24 +++-- turbo/snapshotsync/block_snapshots.go | 109 +++++++++++++++------ turbo/snapshotsync/block_snapshots_test.go | 4 +- 3 files changed, 96 insertions(+), 41 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 3f2996bf937..688a94972cf 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -17,7 +17,6 @@ import ( proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/downloader/downloadergrpc" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" @@ -1333,11 +1332,20 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { return err } dbEmpty := len(snInDB) == 0 + var missingSnapshots []snapshotsync.MergeRange + if !dbEmpty { + _, missingSnapshots, err = snapshotsync.Segments(cfg.snapshots.Dir()) + if err != nil { + return err + } + } // send all hashes to the Downloader service preverified := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).Preverified - req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(preverified))} i := 0 + var downloadRequest []snapshotsync.DownloadRequest + // build all download requests + // builds preverified snapshots request for _, p := range preverified { _, has := snInDB[p.Name] if !dbEmpty && !has { @@ -1346,13 +1354,15 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { if dbEmpty { snInDB[p.Name] = p.Hash } - - req.Items = append(req.Items, &proto_downloader.DownloadItem{ - TorrentHash: downloadergrpc.String2Proto(p.Hash), - Path: p.Name, - }) + downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(nil, p.Name, p.Hash)) i++ } + // builds missing snapshots request + for _, r := range missingSnapshots { + downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(&r, "", "")) + } + req := snapshotsync.BuildProtoRequest(downloadRequest) + log.Info("[Snapshots] Fetching torrent files metadata") for { select { diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 373079f3b6a..6f645bb7a21 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -24,6 +24,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/recsplit" types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/cmd/downloader/downloadergrpc" "github.com/ledgerwatch/erigon/cmd/hack/tool" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" @@ -40,6 +41,12 @@ import ( "golang.org/x/exp/slices" ) +type DownloadRequest struct { + ranges *MergeRange + path string + torrentHash string +} + type HeaderSegment struct { seg *compress.Decompressor // value: first_byte_of_header_hash + header_rlp idxHeaderHash *recsplit.Index // header_hash -> headers_segment_offset @@ -404,7 +411,7 @@ func (s *RoSnapshots) Reopen() error { s.Txs.lock.Lock() defer s.Txs.lock.Unlock() s.closeSegmentsLocked() - files, err := segments(s.dir) + files, _, err := Segments(s.dir) if err != nil { return err } @@ -499,7 +506,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Txs.lock.Lock() defer s.Txs.lock.Unlock() s.closeSegmentsLocked() - files, err := segments(s.dir) + files, _, err := Segments(s.dir) if err != nil { return err } @@ -786,19 +793,20 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD return nil } -func noGaps(in []snap.FileInfo) (out []snap.FileInfo, err error) { +func noGaps(in []snap.FileInfo) (out []snap.FileInfo, missingSnapshots []MergeRange) { var prevTo uint64 for _, f := range in { if f.To <= prevTo { continue } if f.From != prevTo { // no gaps - return nil, fmt.Errorf("%w: from %d to %d", snap.ErrSnapshotMissed, prevTo, f.From) + missingSnapshots = append(missingSnapshots, MergeRange{prevTo, f.From}) + continue } prevTo = f.To out = append(out, f) } - return out, nil + return out, missingSnapshots } func allTypeOfSegmentsMustExist(dir string, in []snap.FileInfo) (res []snap.FileInfo) { @@ -846,10 +854,10 @@ func noOverlaps(in []snap.FileInfo) (res []snap.FileInfo) { return res } -func segments(dir string) (res []snap.FileInfo, err error) { +func Segments(dir string) (res []snap.FileInfo, missingSnapshots []MergeRange, err error) { list, err := snap.Segments(dir) if err != nil { - return nil, err + return nil, missingSnapshots, err } for _, f := range list { if f.T != snap.Headers { @@ -857,7 +865,8 @@ func segments(dir string) (res []snap.FileInfo, err error) { } res = append(res, f) } - return noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, res))) + res, missingSnapshots = noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, res))) + return res, missingSnapshots, nil } func chooseSegmentEnd(from, to, blocksPerFile uint64) uint64 { @@ -1003,24 +1012,8 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 if err := snapshots.Reopen(); err != nil { return fmt.Errorf("Reopen: %w", err) } - // start seed large .seg of large size - req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(snap.AllSnapshotTypes))} - for _, r := range ranges { - if r.to-r.from != snap.DEFAULT_SEGMENT_SIZE { - continue - } - for _, t := range snap.AllSnapshotTypes { - req.Items = append(req.Items, &proto_downloader.DownloadItem{ - Path: snap.SegmentFileName(r.from, r.to, t), - }) - } - } - if len(req.Items) > 0 && downloader != nil { - if _, err := downloader.Download(ctx, req); err != nil { - return err - } - } - return nil + + return RequestSnapshotDownload(ctx, ranges, downloader) } func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl) error { @@ -1725,13 +1718,13 @@ func NewMerger(tmpDir string, workers int, lvl log.Lvl, chainID uint256.Int, not return &Merger{tmpDir: tmpDir, workers: workers, lvl: lvl, chainID: chainID, notifier: notifier} } -type mergeRange struct { +type MergeRange struct { from, to uint64 } -func (r mergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } +func (r MergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } -func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []mergeRange) { +func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []MergeRange) { for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { sn := snapshots.Headers.segments[i] if sn.To-sn.From >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg @@ -1746,14 +1739,14 @@ func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []mergeRange) { break } aggFrom := sn.To - span - res = append(res, mergeRange{from: aggFrom, to: sn.To}) + res = append(res, MergeRange{from: aggFrom, to: sn.To}) for snapshots.Headers.segments[i].From > aggFrom { i-- } break } } - slices.SortFunc(res, func(i, j mergeRange) bool { return i.from < j.from }) + slices.SortFunc(res, func(i, j MergeRange) bool { return i.from < j.from }) return res } func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeHeaders, toMergeBodies, toMergeTxs []string, err error) { @@ -1781,7 +1774,7 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeH } // Merge does merge segments in given ranges -func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges []mergeRange, snapDir string, doIndex bool) error { +func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges []MergeRange, snapDir string, doIndex bool) error { if len(mergeRanges) == 0 { return nil } @@ -1944,3 +1937,55 @@ func assertSegment(segmentFile string) { panic(err) } } + +func NewDownloadRequest(ranges *MergeRange, path string, torrentHash string) DownloadRequest { + return DownloadRequest{ + ranges: ranges, + path: path, + torrentHash: torrentHash, + } +} + +// builds the snapshots download request and downloads them +func RequestSnapshotDownload(ctx context.Context, ranges []MergeRange, downloader proto_downloader.DownloaderClient) error { + // start seed large .seg of large size + var downloadRequest []DownloadRequest + for _, r := range ranges { + downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) + } + req := BuildProtoRequest(downloadRequest) + if len(req.Items) > 0 && downloader != nil { + if _, err := downloader.Download(ctx, req); err != nil { + return err + } + } + return nil +} + +func BuildProtoRequest(downloadRequest []DownloadRequest) *proto_downloader.DownloadRequest { + req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(snap.AllSnapshotTypes))} + for _, r := range downloadRequest { + if r.path != "" { + if r.torrentHash != "" { + req.Items = append(req.Items, &proto_downloader.DownloadItem{ + TorrentHash: downloadergrpc.String2Proto(r.torrentHash), + Path: r.path, + }) + } else { + req.Items = append(req.Items, &proto_downloader.DownloadItem{ + Path: r.path, + }) + } + } else { + if r.ranges.to-r.ranges.from != snap.DEFAULT_SEGMENT_SIZE { + continue + } + for _, t := range snap.AllSnapshotTypes { + req.Items = append(req.Items, &proto_downloader.DownloadItem{ + Path: snap.SegmentFileName(r.ranges.from, r.ranges.to, t), + }) + } + } + } + return req +} diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index f3c82864fd8..a41757048a3 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -148,8 +148,8 @@ func TestOpenAllSnapshot(t *testing.T) { createFile(500_000, 1_000_000, snap.Transactions) s = NewRoSnapshots(cfg, dir) err = s.Reopen() - require.Error(err) - require.Equal(0, len(s.Headers.segments)) //because, no gaps are allowed (expect snapshots from block 0) + require.NoError(err) + require.Equal(0, len(s.Headers.segments)) s.Close() createFile(0, 500_000, snap.Bodies) From 58628999793ed22891f30bfe15c09a7b96b7b271 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Mon, 18 Jul 2022 19:03:38 +0200 Subject: [PATCH 19/72] Fix index out of range in (*Accumulator) ChangeStorage (#4738) --- turbo/shards/state_change_accumulator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/turbo/shards/state_change_accumulator.go b/turbo/shards/state_change_accumulator.go index dc7372c9537..ad5a5d89b39 100644 --- a/turbo/shards/state_change_accumulator.go +++ b/turbo/shards/state_change_accumulator.go @@ -105,6 +105,7 @@ func (a *Accumulator) DeleteAccount(address common.Address) { accountChange.Code = nil accountChange.StorageChanges = nil accountChange.Action = remote.Action_REMOVE + delete(a.storageChangeIndex, address) } // ChangeCode adds code to the latest change From 59dda485673c509ae3996ec82feaed072480a217 Mon Sep 17 00:00:00 2001 From: nanevardanyan Date: Tue, 19 Jul 2022 05:11:37 +0400 Subject: [PATCH 20/72] eth: replace maps with etl.Collectors (#4707) * WIP: eth: replace maps with etl.Collectors * WIP: eth: replace maps with etl.Collectors in pruneOldLogChunks * WIP: eth: use appendBuffer to avoid duplicates * WIP: eth: replace with oldestEntrySortableBuffer --- eth/stagedsync/stage_log_index.go | 50 ++++++++++++++++--------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/eth/stagedsync/stage_log_index.go b/eth/stagedsync/stage_log_index.go index 3febf9d97a4..5b8a897471b 100644 --- a/eth/stagedsync/stage_log_index.go +++ b/eth/stagedsync/stage_log_index.go @@ -332,40 +332,35 @@ func truncateBitmaps(tx kv.RwTx, bucket string, inMem map[string]struct{}, to ui return nil } -func pruneOldLogChunks(tx kv.RwTx, bucket string, inMem map[string]struct{}, pruneTo uint64, logPrefix string, ctx context.Context) error { +func pruneOldLogChunks(tx kv.RwTx, bucket string, inMem *etl.Collector, pruneTo uint64, ctx context.Context) error { logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - keys := make([]string, 0, len(inMem)) - for k := range inMem { - keys = append(keys, k) - } - slices.Sort(keys) + c, err := tx.RwCursor(bucket) if err != nil { return err } defer c.Close() - for _, kS := range keys { - seek := []byte(kS) - for k, _, err := c.Seek(seek); k != nil; k, _, err = c.Next() { + + if err := inMem.Load(tx, bucket, func(key, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + for k, _, err := c.Seek(key); k != nil; k, _, err = c.Next() { if err != nil { return err } - blockNum := uint64(binary.BigEndian.Uint32(k[len(seek):])) - if !bytes.HasPrefix(k, seek) || blockNum >= pruneTo { + blockNum := uint64(binary.BigEndian.Uint32(k[len(key):])) + if !bytes.HasPrefix(k, key) || blockNum >= pruneTo { break } - select { - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s]", logPrefix), "table", kv.AccountsHistory, "block", blockNum) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } + if err = c.DeleteCurrent(); err != nil { return fmt.Errorf("failed delete, block=%d: %w", blockNum, err) } } + return nil + }, etl.TransformArgs{ + Quit: ctx.Done(), + }); err != nil { + return err } return nil } @@ -405,8 +400,11 @@ func pruneLogIndex(logPrefix string, tx kv.RwTx, tmpDir string, pruneTo uint64, logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - topics := map[string]struct{}{} - addrs := map[string]struct{}{} + bufferSize := etl.BufferOptimalSize + topics := etl.NewCollector(logPrefix, tmpDir, etl.NewOldestEntryBuffer(bufferSize)) + defer topics.Close() + addrs := etl.NewCollector(logPrefix, tmpDir, etl.NewOldestEntryBuffer(bufferSize)) + defer addrs.Close() reader := bytes.NewReader(nil) { @@ -440,17 +438,21 @@ func pruneLogIndex(logPrefix string, tx kv.RwTx, tmpDir string, pruneTo uint64, for _, l := range logs { for _, topic := range l.Topics { - topics[string(topic.Bytes())] = struct{}{} + if err := topics.Collect(topic.Bytes(), nil); err != nil { + return err + } + } + if err := addrs.Collect(l.Address.Bytes(), nil); err != nil { + return err } - addrs[string(l.Address.Bytes())] = struct{}{} } } } - if err := pruneOldLogChunks(tx, kv.LogTopicIndex, topics, pruneTo, logPrefix, ctx); err != nil { + if err := pruneOldLogChunks(tx, kv.LogTopicIndex, topics, pruneTo, ctx); err != nil { return err } - if err := pruneOldLogChunks(tx, kv.LogAddressIndex, addrs, pruneTo, logPrefix, ctx); err != nil { + if err := pruneOldLogChunks(tx, kv.LogAddressIndex, addrs, pruneTo, ctx); err != nil { return err } return nil From 5d68f610bc3ab1b711479d11743c92c23d919083 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 10:40:02 +0700 Subject: [PATCH 21/72] mdbx: use OS pagesize by default (but > 4Kb, and < 64Kb) #4743 --- cmd/utils/flags.go | 5 +++-- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0e8e5a941a4..d2bcb6eea9c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -31,6 +31,7 @@ import ( "text/template" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" "github.com/ledgerwatch/log/v3" @@ -700,8 +701,8 @@ var ( } DbPageSizeFlag = cli.StringFlag{ Name: "db.pagesize", - Usage: "set mdbx pagesize on db creation: must be power of 2 and '256b <= pagesize <= 64kb' ", - Value: "4kb", + Usage: "set mdbx pagesize on db creation: must be power of 2 and '256b <= pagesize <= 64kb'. default: equal to OperationSystem's pageSize", + Value: datasize.ByteSize(kv.DefaultPageSize()).String(), } HealthCheckFlag = cli.BoolFlag{ diff --git a/go.mod b/go.mod index d6b6f4d3c58..08d8d2ea972 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475 + github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 19dabbc596a..5feb0e6cca0 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475 h1:WULehvYiLzt/pXBZBMEXNMC8w4S0PrgM0UC7r3J2Z1M= -github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e h1:5UltJUvO6qSku8+OLxnC9ynCHNrZ7JfVOoLdpM5aq3k= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From c7da7a6d90d6981c03f81632a8ebda9a638b909a Mon Sep 17 00:00:00 2001 From: michaelscheung Date: Mon, 18 Jul 2022 20:48:22 -0700 Subject: [PATCH 22/72] Support block parameter for integration stage_log_index (#4740) * Support block parameter for integration stage_log_index * Add logPrefix * Skip stage_log_index if endBlock < startBlock Co-authored-by: michaelscheung --- cmd/integration/commands/stages.go | 2 +- eth/stagedsync/default_stages.go | 2 +- eth/stagedsync/stage_log_index.go | 28 +++++++++++++++++++++----- eth/stagedsync/stage_log_index_test.go | 6 +++--- 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 45a51f3344f..d3cb77845d3 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -850,7 +850,7 @@ func stageLogIndex(db kv.RwDB, ctx context.Context) error { return err } } else { - if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx); err != nil { + if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx, block); err != nil { return err } } diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 869fc7331fe..df37c6210e8 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -177,7 +177,7 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul ID: stages.LogIndex, Description: "Generate receipt logs index", Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnLogIndex(s, tx, logIndex, ctx) + return SpawnLogIndex(s, tx, logIndex, ctx, 0) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindLogIndex(u, s, tx, logIndex, ctx) diff --git a/eth/stagedsync/stage_log_index.go b/eth/stagedsync/stage_log_index.go index 5b8a897471b..43c78a1fae5 100644 --- a/eth/stagedsync/stage_log_index.go +++ b/eth/stagedsync/stage_log_index.go @@ -45,7 +45,7 @@ func StageLogIndexCfg(db kv.RwDB, prune prune.Mode, tmpDir string) LogIndexCfg { } } -func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context) error { +func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context, prematureEndBlock uint64) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -61,7 +61,15 @@ func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Conte if err != nil { return fmt.Errorf("getting last executed block: %w", err) } - if endBlock == s.BlockNumber { + // if prematureEndBlock is nonzero and less than the latest executed block, + // then we only run the log index stage until prematureEndBlock + if prematureEndBlock != 0 && prematureEndBlock < endBlock { + endBlock = prematureEndBlock + } + // It is possible that prematureEndBlock < s.BlockNumber, + // in which case it is important that we skip this stage, + // or else we could overwrite stage_at with prematureEndBlock + if endBlock <= s.BlockNumber { return nil } @@ -73,8 +81,7 @@ func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Conte if startBlock > 0 { startBlock++ } - - if err = promoteLogIndex(logPrefix, tx, startBlock, cfg, ctx); err != nil { + if err = promoteLogIndex(logPrefix, tx, startBlock, endBlock, cfg, ctx); err != nil { return err } if err = s.Update(tx, endBlock); err != nil { @@ -90,7 +97,7 @@ func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Conte return nil } -func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, cfg LogIndexCfg, ctx context.Context) error { +func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, endBlock uint64, cfg LogIndexCfg, ctx context.Context) error { quit := ctx.Done() logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() @@ -112,6 +119,10 @@ func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, cfg LogIndexCfg reader := bytes.NewReader(nil) + if endBlock != 0 { + log.Info(fmt.Sprintf("[%s] Running from blocks %d to %d", logPrefix, start, endBlock), "endBlock", endBlock) + } + for k, v, err := logs.Seek(dbutils.LogKey(start, 0)); k != nil; k, v, err = logs.Next() { if err != nil { return err @@ -122,6 +133,13 @@ func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, cfg LogIndexCfg } blockNum := binary.BigEndian.Uint64(k[:8]) + // if endBlock is positive, we only run the stage up until endBlock + // if endBlock is zero, we run the stage for all available blocks + if endBlock != 0 && blockNum > endBlock { + log.Info(fmt.Sprintf("[%s] Reached user-specified end block", logPrefix), "endBlock", endBlock) + break + } + select { default: case <-logEvery.C: diff --git a/eth/stagedsync/stage_log_index_test.go b/eth/stagedsync/stage_log_index_test.go index 611c8a999c4..5f09221812a 100644 --- a/eth/stagedsync/stage_log_index_test.go +++ b/eth/stagedsync/stage_log_index_test.go @@ -101,7 +101,7 @@ func TestPromoteLogIndex(t *testing.T) { cfgCopy.bufLimit = 10 cfgCopy.flushEvery = time.Nanosecond - err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) + err := promoteLogIndex("logPrefix", tx, 0, 0, cfgCopy, ctx) require.NoError(err) // Check indices GetCardinality (in how many blocks they meet) @@ -127,7 +127,7 @@ func TestPruneLogIndex(t *testing.T) { cfgCopy := cfg cfgCopy.bufLimit = 10 cfgCopy.flushEvery = time.Nanosecond - err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) + err := promoteLogIndex("logPrefix", tx, 0, 0, cfgCopy, ctx) require.NoError(err) // Mode test @@ -166,7 +166,7 @@ func TestUnwindLogIndex(t *testing.T) { cfgCopy := cfg cfgCopy.bufLimit = 10 cfgCopy.flushEvery = time.Nanosecond - err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) + err := promoteLogIndex("logPrefix", tx, 0, 0, cfgCopy, ctx) require.NoError(err) // Mode test From c9306ab8d0a4076b7f91b30df652a49adf7ef48b Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 10:54:44 +0700 Subject: [PATCH 23/72] disable asserts in devel (#4746) * save * save --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3546d3e065e..492d6191edb 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ DOCKER_TAG ?= thorax/erigon:latest # Pipe error below to /dev/null since Makefile structure kind of expects # Go to be available, but with docker it's not strictly necessary CGO_CFLAGS := $(shell $(GO) env CGO_CFLAGS 2>/dev/null) # don't lose default -CGO_CFLAGS += -DMDBX_FORCE_ASSERTIONS=1 # Enable MDBX's asserts by default in 'devel' branch and disable in 'stable' +CGO_CFLAGS += -DMDBX_FORCE_ASSERTIONS=0 # Enable MDBX's asserts by default in 'devel' branch and disable in releases CGO_CFLAGS := CGO_CFLAGS="$(CGO_CFLAGS)" DBG_CGO_CFLAGS += -DMDBX_DEBUG=1 From bda2697bcca0692a63f0d7147ead3c43da21f391 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 10:56:14 +0700 Subject: [PATCH 24/72] linter version up #4745 --- .github/workflows/ci.yml | 4 +--- Makefile | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e5512bcee86..402b8c7cfde 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,9 +53,7 @@ jobs: if: runner.os == 'Linux' uses: golangci/golangci-lint-action@v3 with: - version: v1.46 - skip-pkg-cache: true - skip-build-cache: true + version: v1.47 - name: Test run: make test diff --git a/Makefile b/Makefile index 492d6191edb..1bcdbe10a37 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ lintci: lintci-deps: rm -f ./build/bin/golangci-lint - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.46.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.0 clean: go clean -cache From b70abd7aafde5fd983887a3f5911b7f666b1ddc2 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 13:45:53 +0700 Subject: [PATCH 25/72] Grpc up v48 #532 * save * save --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 08d8d2ea972..ca35ad108d6 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e + github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 @@ -58,11 +58,11 @@ require ( github.com/xsleonard/go-merkle v1.1.0 go.uber.org/atomic v1.9.0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983 + golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75 golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f - golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 golang.org/x/time v0.0.0-20220609170525-579cf78fd858 - google.golang.org/grpc v1.46.2 + google.golang.org/grpc v1.48.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 google.golang.org/protobuf v1.28.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c diff --git a/go.sum b/go.sum index 5feb0e6cca0..9e844b35822 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e h1:5UltJUvO6qSku8+OLxnC9ynCHNrZ7JfVOoLdpM5aq3k= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad h1:fdAdq41F6zH39l6FgsfezXZElEFzl80fXqnB7gKWCTE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad/go.mod h1:KXCwHR5gW/dv9naTlrx4Du8Wzj6H3ndTBC+vw3hnyWU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -679,8 +679,8 @@ golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983 h1:sUweFwmLOje8KNfXAVqGGAsmgJ/F8jJ6wBLJDt4BTKY= -golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75 h1:x03zeu7B2B11ySp+daztnwM5oBJ/8wGUSqrwcw9L0RA= +golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -779,8 +779,8 @@ golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e h1:CsOuNlbOuf0mzxJIefr6Q4uAUetRUwZE4qt7VfzP+xo= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -848,8 +848,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= From ab28089583570217cea4e6cc389b429d69f1d558 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 19 Jul 2022 11:11:08 +0200 Subject: [PATCH 26/72] Still fixing index out of range in (*Accumulator) ChangeStorage (#4751) --- turbo/shards/state_change_accumulator.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/turbo/shards/state_change_accumulator.go b/turbo/shards/state_change_accumulator.go index ad5a5d89b39..f2743020658 100644 --- a/turbo/shards/state_change_accumulator.go +++ b/turbo/shards/state_change_accumulator.go @@ -74,6 +74,7 @@ func (a *Accumulator) ChangeAccount(address common.Address, incarnation uint64, i = len(a.latestChange.Changes) a.latestChange.Changes = append(a.latestChange.Changes, &remote.AccountChange{Address: gointerfaces.ConvertAddressToH160(address)}) a.accountChangeIndex[address] = i + delete(a.storageChangeIndex, address) } accountChange := a.latestChange.Changes[i] switch accountChange.Action { @@ -116,6 +117,7 @@ func (a *Accumulator) ChangeCode(address common.Address, incarnation uint64, cod i = len(a.latestChange.Changes) a.latestChange.Changes = append(a.latestChange.Changes, &remote.AccountChange{Address: gointerfaces.ConvertAddressToH160(address), Action: remote.Action_CODE}) a.accountChangeIndex[address] = i + delete(a.storageChangeIndex, address) } accountChange := a.latestChange.Changes[i] switch accountChange.Action { @@ -137,6 +139,7 @@ func (a *Accumulator) ChangeStorage(address common.Address, incarnation uint64, i = len(a.latestChange.Changes) a.latestChange.Changes = append(a.latestChange.Changes, &remote.AccountChange{Address: gointerfaces.ConvertAddressToH160(address), Action: remote.Action_STORAGE}) a.accountChangeIndex[address] = i + delete(a.storageChangeIndex, address) } accountChange := a.latestChange.Changes[i] if accountChange.Action == remote.Action_REMOVE { From d3b424c9f6e935f1e9a9c0580b9f18aca24e6f66 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 16:53:18 +0700 Subject: [PATCH 27/72] Mdbx: GC BigFoot (#4750) --- cmd/downloader/downloader/downloadercfg/logger.go | 3 +++ go.mod | 4 ++-- go.sum | 8 ++++---- libmdbx | 2 +- turbo/snapshotsync/snapshothashes/erigon-snapshots | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/cmd/downloader/downloader/downloadercfg/logger.go b/cmd/downloader/downloader/downloadercfg/logger.go index 7c71fa81e45..27989595f8e 100644 --- a/cmd/downloader/downloader/downloadercfg/logger.go +++ b/cmd/downloader/downloader/downloadercfg/logger.go @@ -74,6 +74,9 @@ func (b adapterHandler) Handle(r lg.Record) { if strings.Contains(str, "being sole dirtier of piece") { // suppress useless errors break } + if strings.Contains(str, "requested chunk too long") { // suppress useless errors + break + } log.Warn(str) case lg.Error: diff --git a/go.mod b/go.mod index ca35ad108d6..efcd1b91f37 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad + github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 @@ -50,7 +50,7 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tendermint v0.31.11 - github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde + github.com/torquem-ch/mdbx-go v0.25.0 github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 github.com/urfave/cli v1.22.9 diff --git a/go.sum b/go.sum index 9e844b35822..985b44713ef 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad h1:fdAdq41F6zH39l6FgsfezXZElEFzl80fXqnB7gKWCTE= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad/go.mod h1:KXCwHR5gW/dv9naTlrx4Du8Wzj6H3ndTBC+vw3hnyWU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc h1:5opLy9YqL26YvSNGKxHcJO4X/R7Q3FU4ajp7jhbZPBE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -619,8 +619,8 @@ github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde h1:1nzKGldWC9T0ApRfV0jzH28DaBy1Yg5+rmjSiJ/G0dI= -github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= +github.com/torquem-ch/mdbx-go v0.25.0 h1:k66O6GrqyAsXNn4tF87Q+ba4840aplv6O8Ph0FR1PCY= +github.com/torquem-ch/mdbx-go v0.25.0/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= github.com/ugorji/go/codec v1.1.13 h1:013LbFhocBoIqgHeIHKlV4JWYhqogATYWZhIcH0WHn4= github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= diff --git a/libmdbx b/libmdbx index 5d2eb580fdd..0018164fef0 160000 --- a/libmdbx +++ b/libmdbx @@ -1 +1 @@ -Subproject commit 5d2eb580fdd61ccacf00aa93d7ee42e8e53afc8e +Subproject commit 0018164fef048b68dd84d503fde95dab5fdea94b diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots index d90ddcf7257..7e85e4d0028 160000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ b/turbo/snapshotsync/snapshothashes/erigon-snapshots @@ -1 +1 @@ -Subproject commit d90ddcf72579066b48d631fc5a84dcfbbf2bac49 +Subproject commit 7e85e4d0028c27f747d97f65ac0b8c252a050b39 From e768227d38a317b5d41692567929d8228787994b Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Tue, 19 Jul 2022 15:27:54 +0300 Subject: [PATCH 28/72] Merge range (#4749) * added merge range into segments * got rid of missing snapshot errors * reusing RequestSnapshotDownload * sleep out of download * ops * warning if we are missing snapshots --- eth/stagedsync/stage_headers.go | 7 +- turbo/snapshotsync/block_reader.go | 6 +- turbo/snapshotsync/block_snapshots.go | 107 ++++++++++----------- turbo/snapshotsync/block_snapshots_test.go | 4 +- turbo/snapshotsync/snap/files.go | 2 - 5 files changed, 63 insertions(+), 63 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 688a94972cf..30a38f385d8 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1340,6 +1340,10 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { } } + if len(missingSnapshots) > 0 { + log.Warn("[Snapshots] downloading missing snapshots") + } + // send all hashes to the Downloader service preverified := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).Preverified i := 0 @@ -1361,7 +1365,6 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { for _, r := range missingSnapshots { downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(&r, "", "")) } - req := snapshotsync.BuildProtoRequest(downloadRequest) log.Info("[Snapshots] Fetching torrent files metadata") for { @@ -1370,7 +1373,7 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { return ctx.Err() default: } - if _, err := cfg.snapshotDownloader.Download(ctx, req); err != nil { + if err := snapshotsync.RequestSnapshotDownload(ctx, downloadRequest, cfg.snapshotDownloader); err != nil { log.Error("[Snapshots] call downloader", "err", err) time.Sleep(10 * time.Second) continue diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index 4bde09ed7dc..babb6a0037e 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -516,7 +516,7 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshot(blockHeight uint64, sn func (back *BlockReaderWithSnapshots) headerFromSnapshotByHash(hash common.Hash, sn *HeaderSegment, buf []byte) (*types.Header, error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.From, sn.To, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -564,7 +564,7 @@ func (back *BlockReaderWithSnapshots) bodyFromSnapshot(blockHeight uint64, sn *B func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.BodyForStorage, []byte, error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.From, sn.To, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -597,7 +597,7 @@ func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uin func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmount uint32, txsSeg *TxnSegment, buf []byte) (txs []types.Transaction, senders []common.Address, err error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.From, txsSeg.To, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.ranges.from, txsSeg.ranges.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 6f645bb7a21..8f7ea07da1f 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -50,20 +50,20 @@ type DownloadRequest struct { type HeaderSegment struct { seg *compress.Decompressor // value: first_byte_of_header_hash + header_rlp idxHeaderHash *recsplit.Index // header_hash -> headers_segment_offset - From, To uint64 + ranges MergeRange } type BodySegment struct { seg *compress.Decompressor // value: rlp(types.BodyForStorage) idxBodyNumber *recsplit.Index // block_num_u64 -> bodies_segment_offset - From, To uint64 + ranges MergeRange } type TxnSegment struct { Seg *compress.Decompressor // value: first_byte_of_transaction_hash + sender_address + transaction_rlp IdxTxnHash *recsplit.Index // transaction_hash -> transactions_segment_offset IdxTxnHash2BlockNum *recsplit.Index // transaction_hash -> block_number - From, To uint64 + ranges MergeRange } func (sn *HeaderSegment) close() { @@ -79,12 +79,12 @@ func (sn *HeaderSegment) close() { func (sn *HeaderSegment) reopen(dir string) (err error) { sn.close() - fileName := snap.SegmentFileName(sn.From, sn.To, snap.Headers) + fileName := snap.SegmentFileName(sn.ranges.from, sn.ranges.to, snap.Headers) sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Headers.String()))) + sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Headers.String()))) if err != nil { return err } @@ -104,12 +104,12 @@ func (sn *BodySegment) close() { func (sn *BodySegment) reopen(dir string) (err error) { sn.close() - fileName := snap.SegmentFileName(sn.From, sn.To, snap.Bodies) + fileName := snap.SegmentFileName(sn.ranges.from, sn.ranges.to, snap.Bodies) sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Bodies.String()))) + sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Bodies.String()))) if err != nil { return err } @@ -148,16 +148,16 @@ func (sn *TxnSegment) close() { } func (sn *TxnSegment) reopen(dir string) (err error) { sn.close() - fileName := snap.SegmentFileName(sn.From, sn.To, snap.Transactions) + fileName := snap.SegmentFileName(sn.ranges.from, sn.ranges.to, snap.Transactions) sn.Seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions.String()))) + sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Transactions.String()))) if err != nil { return err } - sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions2Block.String()))) + sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Transactions2Block.String()))) if err != nil { return err } @@ -194,7 +194,7 @@ func (s *headerSegments) ViewSegment(blockNum uint64, f func(sn *HeaderSegment) s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.From && blockNum < seg.To) { + if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { continue } return true, f(seg) @@ -232,7 +232,7 @@ func (s *bodySegments) ViewSegment(blockNum uint64, f func(*BodySegment) error) s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.From && blockNum < seg.To) { + if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { continue } return true, f(seg) @@ -270,7 +270,7 @@ func (s *txnSegments) ViewSegment(blockNum uint64, f func(*TxnSegment) error) (f s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.From && blockNum < seg.To) { + if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { continue } return true, f(seg) @@ -323,7 +323,7 @@ func (s *RoSnapshots) idxAvailability() uint64 { if seg.idxHeaderHash == nil { continue } - headers = seg.To - 1 + headers = seg.ranges.to - 1 break } for i := len(s.Bodies.segments) - 1; i >= 0; i-- { @@ -331,7 +331,7 @@ func (s *RoSnapshots) idxAvailability() uint64 { if seg.idxBodyNumber == nil { continue } - bodies = seg.To - 1 + bodies = seg.ranges.to - 1 break } @@ -340,7 +340,7 @@ func (s *RoSnapshots) idxAvailability() uint64 { if seg.IdxTxnHash == nil || seg.IdxTxnHash2BlockNum == nil { continue } - txs = seg.To - 1 + txs = seg.ranges.to - 1 break } return cmp.Min(headers, cmp.Min(bodies, txs)) @@ -390,7 +390,7 @@ func (s *RoSnapshots) AsyncOpenAll(ctx context.Context) { return default: } - if err := s.Reopen(); err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, snap.ErrSnapshotMissed) { + if err := s.Reopen(); err != nil && !errors.Is(err, os.ErrNotExist) { log.Error("AsyncOpenAll", "err", err) } time.Sleep(15 * time.Second) @@ -422,7 +422,7 @@ func (s *RoSnapshots) Reopen() error { s.Txs.segments = s.Txs.segments[:0] for _, f := range files { { - seg := &BodySegment{From: f.From, To: f.To} + seg := &BodySegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -434,7 +434,7 @@ func (s *RoSnapshots) Reopen() error { s.Bodies.segments = append(s.Bodies.segments, seg) } { - seg := &HeaderSegment{From: f.From, To: f.To} + seg := &HeaderSegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -446,7 +446,7 @@ func (s *RoSnapshots) Reopen() error { s.Headers.segments = append(s.Headers.segments, seg) } { - seg := &TxnSegment{From: f.From, To: f.To} + seg := &TxnSegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -471,23 +471,23 @@ func (s *RoSnapshots) Reopen() error { s.segmentsReady.Store(true) for _, sn := range s.Headers.segments { - sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Headers.String()))) + sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Headers.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } } for _, sn := range s.Bodies.segments { - sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Bodies.String()))) + sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Bodies.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } } for _, sn := range s.Txs.segments { - sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions.String()))) + sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Transactions.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions2Block.String()))) + sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Transactions2Block.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } @@ -517,7 +517,7 @@ func (s *RoSnapshots) ReopenSegments() error { var segmentsMaxSet bool for _, f := range files { { - seg := &BodySegment{From: f.From, To: f.To} + seg := &BodySegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -529,7 +529,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Bodies.segments = append(s.Bodies.segments, seg) } { - seg := &HeaderSegment{From: f.From, To: f.To} + seg := &HeaderSegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -541,7 +541,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Headers.segments = append(s.Headers.segments, seg) } { - seg := &TxnSegment{From: f.From, To: f.To} + seg := &TxnSegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -600,15 +600,15 @@ func (s *RoSnapshots) PrintDebug() { fmt.Printf("sn: %d, %d\n", s.segmentsMax.Load(), s.idxMax.Load()) fmt.Println(" == Snapshots, Header") for _, sn := range s.Headers.segments { - fmt.Printf("%d, %t\n", sn.From, sn.idxHeaderHash == nil) + fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxHeaderHash == nil) } fmt.Println(" == Snapshots, Body") for _, sn := range s.Bodies.segments { - fmt.Printf("%d, %t\n", sn.From, sn.idxBodyNumber == nil) + fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxBodyNumber == nil) } fmt.Println(" == Snapshots, Txs") for _, sn := range s.Txs.segments { - fmt.Printf("%d, %t, %t\n", sn.From, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil) + fmt.Printf("%d, %t, %t\n", sn.ranges.from, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil) } } func (s *RoSnapshots) ViewHeaders(blockNum uint64, f func(sn *HeaderSegment) error) (found bool, err error) { @@ -639,7 +639,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD errs := make(chan error, len(segments)*2) workersCh := make(chan struct{}, workers) for _, sn := range segments { - if sn.From < from { + if sn.ranges.from < from { continue } @@ -667,7 +667,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD default: } - }(sn.From, sn.To) + }(sn.ranges.from, sn.ranges.to) } go func() { wg.Wait() @@ -688,7 +688,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD errs := make(chan error, len(segments)*2) workersCh := make(chan struct{}, workers) for _, sn := range segments { - if sn.From < from { + if sn.ranges.from < from { continue } @@ -716,7 +716,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD default: } - }(sn.From, sn.To) + }(sn.ranges.from, sn.ranges.to) } go func() { wg.Wait() @@ -741,7 +741,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD errs := make(chan error, len(segments)*2) workersCh := make(chan struct{}, workers) for i, sn := range segments { - if sn.From < from { + if sn.ranges.from < from { continue } @@ -772,7 +772,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD default: } - }(sn.From, sn.To) + }(sn.ranges.from, sn.ranges.to) } go func() { wg.Wait() @@ -1013,7 +1013,12 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return fmt.Errorf("Reopen: %w", err) } - return RequestSnapshotDownload(ctx, ranges, downloader) + var downloadRequest []DownloadRequest + for _, r := range ranges { + downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) + } + + return RequestSnapshotDownload(ctx, downloadRequest, downloader) } func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl) error { @@ -1727,20 +1732,20 @@ func (r MergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []MergeRange) { for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { sn := snapshots.Headers.segments[i] - if sn.To-sn.From >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg + if sn.ranges.to-sn.ranges.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg continue } for _, span := range []uint64{500_000, 100_000, 10_000} { - if sn.To%span != 0 { + if sn.ranges.to%span != 0 { continue } - if sn.To-sn.From == span { + if sn.ranges.to-sn.ranges.from == span { break } - aggFrom := sn.To - span - res = append(res, MergeRange{from: aggFrom, to: sn.To}) - for snapshots.Headers.segments[i].From > aggFrom { + aggFrom := sn.ranges.to - span + res = append(res, MergeRange{from: aggFrom, to: sn.ranges.to}) + for snapshots.Headers.segments[i].ranges.from > aggFrom { i-- } break @@ -1754,10 +1759,10 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeH return snapshots.Bodies.View(func(bSegments []*BodySegment) error { return snapshots.Txs.View(func(tSegments []*TxnSegment) error { for i, sn := range hSegments { - if sn.From < from { + if sn.ranges.from < from { continue } - if sn.To > to { + if sn.ranges.to > to { break } @@ -1947,17 +1952,11 @@ func NewDownloadRequest(ranges *MergeRange, path string, torrentHash string) Dow } // builds the snapshots download request and downloads them -func RequestSnapshotDownload(ctx context.Context, ranges []MergeRange, downloader proto_downloader.DownloaderClient) error { +func RequestSnapshotDownload(ctx context.Context, downloadRequest []DownloadRequest, downloader proto_downloader.DownloaderClient) error { // start seed large .seg of large size - var downloadRequest []DownloadRequest - for _, r := range ranges { - downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) - } req := BuildProtoRequest(downloadRequest) - if len(req.Items) > 0 && downloader != nil { - if _, err := downloader.Download(ctx, req); err != nil { - return err - } + if _, err := downloader.Download(ctx, req); err != nil { + return err } return nil } diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index a41757048a3..65a90ba54ad 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -163,14 +163,14 @@ func TestOpenAllSnapshot(t *testing.T) { require.Equal(2, len(s.Headers.segments)) ok, err := s.ViewTxs(10, func(sn *TxnSegment) error { - require.Equal(int(sn.To), 500_000) + require.Equal(int(sn.ranges.to), 500_000) return nil }) require.NoError(err) require.True(ok) ok, err = s.ViewTxs(500_000, func(sn *TxnSegment) error { - require.Equal(int(sn.To), 1_000_000) // [from:to) + require.Equal(int(sn.ranges.to), 1_000_000) // [from:to) return nil }) require.NoError(err) diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go index fa009192ce0..90f0badda58 100644 --- a/turbo/snapshotsync/snap/files.go +++ b/turbo/snapshotsync/snap/files.go @@ -167,8 +167,6 @@ func TmpFiles(dir string) (res []string, err error) { return res, nil } -var ErrSnapshotMissed = fmt.Errorf("snapshot missed") - // ParseDir - reading dir ( func ParseDir(dir string) (res []FileInfo, err error) { files, err := os.ReadDir(dir) From 1110c350338d566612b0027c07140e35d448f442 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 19 Jul 2022 16:03:35 +0200 Subject: [PATCH 29/72] =?UTF-8?q?G=C3=B6rli=20Terminal=20Total=20Difficult?= =?UTF-8?q?y=20(#4752)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- params/chainspecs/goerli.json | 1 + 1 file changed, 1 insertion(+) diff --git a/params/chainspecs/goerli.json b/params/chainspecs/goerli.json index 7596abd2be2..16a8cfede28 100644 --- a/params/chainspecs/goerli.json +++ b/params/chainspecs/goerli.json @@ -13,6 +13,7 @@ "istanbulBlock": 1561651, "berlinBlock": 4460644, "londonBlock": 5062605, + "terminalTotalDifficulty": 10790000, "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "clique": { "period": 15, From d0399012c262d24adbaef26a15dbef53cfd3e38b Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 19 Jul 2022 16:31:30 +0200 Subject: [PATCH 30/72] Fix txn removal in PendingPool (#4754) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index efcd1b91f37..438ff4300a5 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc + github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 985b44713ef..8905269c01d 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc h1:5opLy9YqL26YvSNGKxHcJO4X/R7Q3FU4ajp7jhbZPBE= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 h1:AMu0iTB2BlgeBTxJvAa7amzz6WmyX5xxnLOF2LFhkTs= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From d4f865d725050e7e5fb18aec8fed31c1d0398f7b Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 19 Jul 2022 22:31:15 +0200 Subject: [PATCH 31/72] Added proper cleanup when we get notified of new height (#4753) * added proper cleanup when we get notified of new height * added extra cleanup * removed bad if condition * fixed hive tests Co-authored-by: giuliorebuffo --- eth/stagedsync/stage_headers.go | 15 +++++++++------ turbo/engineapi/fork_validator.go | 25 ++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 30a38f385d8..33b4c16ff8e 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -258,11 +258,11 @@ func startHandlingForkChoice( cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, ) (*privateapi.PayloadStatus, error) { - headerHash := forkChoice.HeadBlockHash - log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) if cfg.memoryOverlay { - defer cfg.forkValidator.Clear(tx) + defer cfg.forkValidator.ClearWithUnwind(tx) } + headerHash := forkChoice.HeadBlockHash + log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash == headerHash { // no-op @@ -577,10 +577,11 @@ func verifyAndSaveNewPoSHeader( forkingHash, err := cfg.blockReader.CanonicalHash(ctx, tx, forkingPoint) canExtendCanonical := forkingHash == currentHeadHash - canExtendFork := cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash() - if cfg.memoryOverlay && (canExtendFork || header.ParentHash != currentHeadHash) { - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, header.ParentHash == currentHeadHash /* extendCanonical */) + if cfg.memoryOverlay { + extendingHash := cfg.forkValidator.ExtendingForkHeadHash() + extendCanonical := (extendingHash == common.Hash{} && header.ParentHash == currentHeadHash) || extendingHash == header.ParentHash + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, extendCanonical) if criticalError != nil { return nil, false, criticalError } @@ -664,6 +665,8 @@ func schedulePoSDownload( } func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter) { + defer cfg.forkValidator.Clear() + var lastValidHash common.Hash var badChainError error var foundPow bool diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index ae83190cd4b..7baaba9c052 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -84,6 +84,12 @@ func (fv *ForkValidator) ExtendingForkHeadHash() common.Hash { // NotifyCurrentHeight is to be called at the end of the stage cycle and repressent the last processed block. func (fv *ForkValidator) NotifyCurrentHeight(currentHeight uint64) { fv.currentHeight = currentHeight + // If the head changed,e previous assumptions on head are incorrect now. + if fv.extendingFork != nil { + fv.extendingFork.Rollback() + } + fv.extendingFork = nil + fv.extendingForkHeadHash = common.Hash{} } // FlushExtendingFork flush the current extending fork if fcu chooses its head hash as the its forkchoice. @@ -176,7 +182,16 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body // Clear wipes out current extending fork data, this method is called after fcu is called, // because fcu decides what the head is and after the call is done all the non-chosed forks are // to be considered obsolete. -func (fv *ForkValidator) Clear(tx kv.RwTx) { +func (fv *ForkValidator) Clear() { + if fv.extendingFork != nil { + fv.extendingFork.Rollback() + } + fv.extendingForkHeadHash = common.Hash{} + fv.extendingFork = nil +} + +// Clear wipes out current extending fork data and notify txpool. +func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx) { sb, ok := fv.sideForksBlock[fv.extendingForkHeadHash] // If we did not flush the fork state, then we need to notify the txpool through unwind. if fv.extendingFork != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { @@ -187,8 +202,7 @@ func (fv *ForkValidator) Clear(tx kv.RwTx) { } fv.extendingFork.Rollback() } - fv.extendingForkHeadHash = common.Hash{} - fv.extendingFork = nil + fv.Clear() } // validateAndStorePayload validate and store a payload fork chain if such chain results valid. @@ -198,6 +212,11 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade if validationError != nil { latestValidHash = header.ParentHash status = remote.EngineStatus_INVALID + if fv.extendingFork != nil { + fv.extendingFork.Rollback() + fv.extendingFork = nil + } + fv.extendingForkHeadHash = common.Hash{} return } // If we do not have the body we can recover it from the batch. From 5805d963ea5c352d4b4baac0b2c966bf1f70b056 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 20 Jul 2022 09:34:12 +0700 Subject: [PATCH 32/72] erigon-snapshot: convert from git sumbodule to golang package (#4760) --- .gitmodules | 3 - eth/stagedsync/stage_headers.go | 4 +- eth/stagedsync/stage_senders.go | 6 +- go.mod | 6 +- go.sum | 2 + turbo/snapshotsync/block_snapshots.go | 4 +- turbo/snapshotsync/block_snapshots_test.go | 4 +- turbo/snapshotsync/snap/files.go | 4 +- turbo/snapshotsync/snapcfg/util.go | 112 ++++++++++++++++ turbo/snapshotsync/snapshothashes/embed.go | 123 ------------------ .../snapshothashes/erigon-snapshots | 1 - 11 files changed, 130 insertions(+), 139 deletions(-) create mode 100644 turbo/snapshotsync/snapcfg/util.go delete mode 100644 turbo/snapshotsync/snapshothashes/embed.go delete mode 160000 turbo/snapshotsync/snapshothashes/erigon-snapshots diff --git a/.gitmodules b/.gitmodules index e7ceb250819..e1a0db9182e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,6 @@ [submodule "tests"] path = tests/testdata url = https://github.com/ethereum/tests -[submodule "turbo/snapshotsync/snapshothashes/erigon-snapshots"] - path = turbo/snapshotsync/snapshothashes/erigon-snapshots - url = https://github.com/ledgerwatch/erigon-snapshot.git [submodule "cmd/downloader/trackers/trackerslist"] path = cmd/downloader/trackers/trackerslist url = https://github.com/ngosang/trackerslist.git diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 33b4c16ff8e..4b3a5224348 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -27,7 +27,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/erigon/turbo/stages/bodydownload" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" @@ -1348,7 +1348,7 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { } // send all hashes to the Downloader service - preverified := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).Preverified + preverified := snapcfg.KnownCfg(cfg.chainConfig.ChainName).Preverified i := 0 var downloadRequest []snapshotsync.DownloadRequest // build all download requests diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index ae01802da06..22f6c5ce513 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -24,7 +24,7 @@ import ( "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/secp256k1" @@ -42,7 +42,7 @@ type SendersCfg struct { prune prune.Mode chainConfig *params.ChainConfig blockRetire *snapshotsync.BlockRetire - snapshotHashesCfg *snapshothashes.Config + snapshotHashesCfg *snapcfg.Cfg hd *headerdownload.HeaderDownload } @@ -62,7 +62,7 @@ func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, badBlockHalt bool chainConfig: chainCfg, prune: prune, blockRetire: br, - snapshotHashesCfg: snapshothashes.KnownConfig(chainCfg.ChainName), + snapshotHashesCfg: snapcfg.KnownCfg(chainCfg.ChainName), hd: hd, } } diff --git a/go.mod b/go.mod index 438ff4300a5..4ca64f2642a 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,11 @@ module github.com/ledgerwatch/erigon go 1.18 +require ( + github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 + github.com/ledgerwatch/erigon-snapshot v1.0.0 +) + require ( github.com/RoaringBitmap/roaring v1.2.1 github.com/VictoriaMetrics/fastcache v1.10.0 @@ -36,7 +41,6 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 8905269c01d..120e46a2e14 100644 --- a/go.sum +++ b/go.sum @@ -392,6 +392,8 @@ github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7 github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 h1:AMu0iTB2BlgeBTxJvAa7amzz6WmyX5xxnLOF2LFhkTs= github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= +github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= +github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 8f7ea07da1f..144f4c206e4 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -35,7 +35,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/log/v3" "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -309,7 +309,7 @@ func (s *RoSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } func (s *RoSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } func (s *RoSnapshots) BlocksAvailable() uint64 { return cmp.Min(s.segmentsMax.Load(), s.idxMax.Load()) } -func (s *RoSnapshots) EnsureExpectedBlocksAreAvailable(cfg *snapshothashes.Config) error { +func (s *RoSnapshots) EnsureExpectedBlocksAreAvailable(cfg *snapcfg.Cfg) error { if s.BlocksAvailable() < cfg.ExpectBlocks { return fmt.Errorf("app must wait until all expected snapshots are available. Expected: %d, Available: %d", cfg.ExpectBlocks, s.BlocksAvailable()) } diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index 65a90ba54ad..fb1c8f8ab83 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -14,7 +14,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/params/networkname" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" ) @@ -127,7 +127,7 @@ func TestCanRetire(t *testing.T) { } func TestOpenAllSnapshot(t *testing.T) { dir, require := t.TempDir(), require.New(t) - chainSnapshotCfg := snapshothashes.KnownConfig(networkname.MainnetChainName) + chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 cfg := ethconfig.Snapshot{Enabled: true} createFile := func(from, to uint64, name snap.Type) { createTestSegmentFile(t, from, to, name, dir) } diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go index 90f0badda58..3c24290c21b 100644 --- a/turbo/snapshotsync/snap/files.go +++ b/turbo/snapshotsync/snap/files.go @@ -10,7 +10,7 @@ import ( "strings" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "golang.org/x/exp/slices" ) @@ -214,7 +214,7 @@ func ParseDir(dir string) (res []FileInfo, err error) { } func RemoveNonPreverifiedFiles(chainName, snapDir string) error { - preverified := snapshothashes.KnownConfig(chainName).Preverified + preverified := snapcfg.KnownCfg(chainName).Preverified keep := map[string]struct{}{} for _, p := range preverified { ext := filepath.Ext(p.Name) diff --git a/turbo/snapshotsync/snapcfg/util.go b/turbo/snapshotsync/snapcfg/util.go new file mode 100644 index 00000000000..bf7e007b4a8 --- /dev/null +++ b/turbo/snapshotsync/snapcfg/util.go @@ -0,0 +1,112 @@ +package snapcfg + +import ( + _ "embed" + "path/filepath" + "strconv" + "strings" + + snapshothashes "github.com/ledgerwatch/erigon-snapshot" + "github.com/ledgerwatch/erigon/params/networkname" + "github.com/pelletier/go-toml/v2" + "golang.org/x/exp/slices" +) + +var Mainnet = fromToml(snapshothashes.Mainnet) + +var Goerli = fromToml(snapshothashes.Goerli) + +var Bsc = fromToml(snapshothashes.Bsc) + +var Ropsten = fromToml(snapshothashes.Ropsten) + +var Mumbai = fromToml(snapshothashes.Mumbai) + +var BorMainnet = fromToml(snapshothashes.BorMainnet) + +type PreverifiedItem struct { + Name string + Hash string +} +type Preverified []PreverifiedItem +type preverified map[string]string + +func fromToml(in []byte) (out Preverified) { + var outMap preverified + if err := toml.Unmarshal(in, &outMap); err != nil { + panic(err) + } + return doSort(outMap) +} +func doSort(in preverified) Preverified { + out := make(Preverified, 0, len(in)) + for k, v := range in { + out = append(out, PreverifiedItem{k, v}) + } + slices.SortFunc(out, func(i, j PreverifiedItem) bool { return i.Name < j.Name }) + return out +} + +var ( + MainnetChainSnapshotCfg = newCfg(Mainnet) + GoerliChainSnapshotCfg = newCfg(Goerli) + BscChainSnapshotCfg = newCfg(Bsc) + RopstenChainSnapshotCfg = newCfg(Ropsten) + MumbaiChainSnapshotCfg = newCfg(Mumbai) + BorMainnetChainSnapshotCfg = newCfg(BorMainnet) +) + +func newCfg(preverified Preverified) *Cfg { + return &Cfg{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified} +} + +func maxBlockNum(preverified Preverified) uint64 { + max := uint64(0) + for _, p := range preverified { + _, fileName := filepath.Split(p.Name) + ext := filepath.Ext(fileName) + if ext != ".seg" { + continue + } + onlyName := fileName[:len(fileName)-len(ext)] + parts := strings.Split(onlyName, "-") + if parts[0] != "v1" { + panic("not implemented") + } + if parts[3] != "headers" { + continue + } + to, err := strconv.ParseUint(parts[2], 10, 64) + if err != nil { + panic(err) + } + if max < to { + max = to + } + } + if max == 0 { // to prevent underflow + return 0 + } + return max*1_000 - 1 +} + +type Cfg struct { + ExpectBlocks uint64 + Preverified Preverified +} + +var KnownCfgs = map[string]*Cfg{ + networkname.MainnetChainName: MainnetChainSnapshotCfg, + networkname.GoerliChainName: GoerliChainSnapshotCfg, + networkname.BSCChainName: BscChainSnapshotCfg, + networkname.RopstenChainName: RopstenChainSnapshotCfg, + networkname.MumbaiChainName: MumbaiChainSnapshotCfg, + networkname.BorMainnetChainName: BorMainnetChainSnapshotCfg, +} + +func KnownCfg(networkName string) *Cfg { + if c, ok := KnownCfgs[networkName]; ok { + return c + } + return newCfg(Preverified{}) +} diff --git a/turbo/snapshotsync/snapshothashes/embed.go b/turbo/snapshotsync/snapshothashes/embed.go deleted file mode 100644 index 8949a45ee45..00000000000 --- a/turbo/snapshotsync/snapshothashes/embed.go +++ /dev/null @@ -1,123 +0,0 @@ -package snapshothashes - -import ( - _ "embed" - "path/filepath" - "strconv" - "strings" - - "github.com/ledgerwatch/erigon/params/networkname" - "github.com/pelletier/go-toml/v2" - "golang.org/x/exp/slices" -) - -//go:embed erigon-snapshots/mainnet.toml -var mainnet []byte -var Mainnet = fromToml(mainnet) - -//go:embed erigon-snapshots/goerli.toml -var goerli []byte -var Goerli = fromToml(goerli) - -//go:embed erigon-snapshots/bsc.toml -var bsc []byte -var Bsc = fromToml(bsc) - -//go:embed erigon-snapshots/ropsten.toml -var ropsten []byte -var Ropsten = fromToml(ropsten) - -//go:embed erigon-snapshots/mumbai.toml -var mumbai []byte -var Mumbai = fromToml(mumbai) - -//go:embed erigon-snapshots/bor-mainnet.toml -var borMainnet []byte -var BorMainnet = fromToml(borMainnet) - -type PreverifiedItem struct { - Name string - Hash string -} -type Preverified []PreverifiedItem -type preverified map[string]string - -func fromToml(in []byte) (out Preverified) { - var outMap preverified - if err := toml.Unmarshal(in, &outMap); err != nil { - panic(err) - } - return doSort(outMap) -} -func doSort(in preverified) Preverified { - out := make(Preverified, 0, len(in)) - for k, v := range in { - out = append(out, PreverifiedItem{k, v}) - } - slices.SortFunc(out, func(i, j PreverifiedItem) bool { return i.Name < j.Name }) - return out -} - -var ( - MainnetChainSnapshotConfig = newConfig(Mainnet) - GoerliChainSnapshotConfig = newConfig(Goerli) - BscChainSnapshotConfig = newConfig(Bsc) - RopstenChainSnapshotConfig = newConfig(Ropsten) - MumbaiChainSnapshotConfig = newConfig(Mumbai) - BorMainnetChainSnapshotConfig = newConfig(BorMainnet) -) - -func newConfig(preverified Preverified) *Config { - return &Config{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified} -} - -func maxBlockNum(preverified Preverified) uint64 { - max := uint64(0) - for _, p := range preverified { - _, fileName := filepath.Split(p.Name) - ext := filepath.Ext(fileName) - if ext != ".seg" { - continue - } - onlyName := fileName[:len(fileName)-len(ext)] - parts := strings.Split(onlyName, "-") - if parts[0] != "v1" { - panic("not implemented") - } - if parts[3] != "headers" { - continue - } - to, err := strconv.ParseUint(parts[2], 10, 64) - if err != nil { - panic(err) - } - if max < to { - max = to - } - } - if max == 0 { // to prevent underflow - return 0 - } - return max*1_000 - 1 -} - -type Config struct { - ExpectBlocks uint64 - Preverified Preverified -} - -var KnownConfigs map[string]*Config = map[string]*Config{ - networkname.MainnetChainName: MainnetChainSnapshotConfig, - networkname.GoerliChainName: GoerliChainSnapshotConfig, - networkname.BSCChainName: BscChainSnapshotConfig, - networkname.RopstenChainName: RopstenChainSnapshotConfig, - networkname.MumbaiChainName: MumbaiChainSnapshotConfig, - networkname.BorMainnetChainName: BorMainnetChainSnapshotConfig, -} - -func KnownConfig(networkName string) *Config { - if c, ok := KnownConfigs[networkName]; ok { - return c - } - return newConfig(Preverified{}) -} diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots deleted file mode 160000 index 7e85e4d0028..00000000000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7e85e4d0028c27f747d97f65ac0b8c252a050b39 From 1ecacde3a90f0e9da31bbafa66c19e97acc90068 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 20 Jul 2022 09:47:58 +0700 Subject: [PATCH 33/72] trackerslist: convert from git submodule to go package (#4761) --- .gitmodules | 4 -- cmd/downloader/trackers/embed.go | 29 +++------ cmd/downloader/trackers/trackerslist | 1 - go.mod | 5 +- go.sum | 2 + .../parallelcompress/decompress.go | 62 ------------------- 6 files changed, 14 insertions(+), 89 deletions(-) delete mode 160000 cmd/downloader/trackers/trackerslist delete mode 100644 turbo/snapshotsync/parallelcompress/decompress.go diff --git a/.gitmodules b/.gitmodules index e1a0db9182e..ae94b08f852 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,10 +1,6 @@ [submodule "tests"] path = tests/testdata url = https://github.com/ethereum/tests -[submodule "cmd/downloader/trackers/trackerslist"] - path = cmd/downloader/trackers/trackerslist - url = https://github.com/ngosang/trackerslist.git - [submodule "libmdbx"] path = libmdbx url = https://github.com/torquem-ch/libmdbx.git diff --git a/cmd/downloader/trackers/embed.go b/cmd/downloader/trackers/embed.go index 694f2eee40e..bf179e756aa 100644 --- a/cmd/downloader/trackers/embed.go +++ b/cmd/downloader/trackers/embed.go @@ -2,29 +2,18 @@ package trackers import ( "bufio" - _ "embed" "strings" -) - -//go:embed trackerslist/trackers_best.txt -var best string -var Best = split(best) - -//go:embed trackerslist/trackers_all_https.txt -var https string -var Https = split(https) -//go:embed trackerslist/trackers_all_http.txt -var http string -var Http = split(http) - -//go:embed trackerslist/trackers_all_udp.txt -var udp string -var Udp = split(udp) + "github.com/ledgerwatch/trackerslist" +) -//go:embed trackerslist/trackers_all_ws.txt -var ws string -var Ws = split(ws) +var ( + Best = split(trackerslist.Best) + Https = split(trackerslist.Https) + Http = split(trackerslist.Http) + Udp = split(trackerslist.Udp) + Ws = split(trackerslist.Ws) +) func split(txt string) (lines []string) { sc := bufio.NewScanner(strings.NewReader(txt)) diff --git a/cmd/downloader/trackers/trackerslist b/cmd/downloader/trackers/trackerslist deleted file mode 160000 index 17f277f3762..00000000000 --- a/cmd/downloader/trackers/trackerslist +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 17f277f376286f5a99db386421897d5f82031f57 diff --git a/go.mod b/go.mod index 4ca64f2642a..47404e9aac6 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,9 @@ go 1.18 require ( github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 github.com/ledgerwatch/erigon-snapshot v1.0.0 + github.com/ledgerwatch/log/v3 v3.4.1 + github.com/ledgerwatch/secp256k1 v1.0.0 + github.com/ledgerwatch/trackerslist v1.0.0 ) require ( @@ -41,8 +44,6 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/log/v3 v3.4.1 - github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 github.com/pelletier/go-toml v1.9.5 github.com/pelletier/go-toml/v2 v2.0.2 diff --git a/go.sum b/go.sum index 120e46a2e14..22e34193ee1 100644 --- a/go.sum +++ b/go.sum @@ -398,6 +398,8 @@ github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= github.com/ledgerwatch/secp256k1 v1.0.0/go.mod h1:SPmqJFciiF/Q0mPt2jVs2dTr/1TZBTIA+kPMmKgBAak= +github.com/ledgerwatch/trackerslist v1.0.0 h1:6gnQu93WCTL4jPcdmc8UEmw56Cb8IFQHLGnevfIeLwo= +github.com/ledgerwatch/trackerslist v1.0.0/go.mod h1:pCC+eEw8izNcnBBiSwvIq8kKsxDLInAafSW275jqFrg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lispad/go-generics-tools v1.1.0 h1:mbSgcxdFVmpoyso1X/MJHXbSbSL3dD+qhRryyxk+/XY= diff --git a/turbo/snapshotsync/parallelcompress/decompress.go b/turbo/snapshotsync/parallelcompress/decompress.go deleted file mode 100644 index 481717b2ab9..00000000000 --- a/turbo/snapshotsync/parallelcompress/decompress.go +++ /dev/null @@ -1,62 +0,0 @@ -package parallelcompress - -import ( - "bufio" - "encoding/binary" - "fmt" - "os" - "time" - - "github.com/ledgerwatch/erigon-lib/compress" - "github.com/ledgerwatch/erigon-lib/etl" - "github.com/ledgerwatch/log/v3" -) - -func Decompress(logPrefix, segFilePath, datFilePath string) error { - d, err := compress.NewDecompressor(segFilePath) - if err != nil { - return err - } - defer d.Close() - logEvery := time.NewTicker(20 * time.Second) - defer logEvery.Stop() - var df *os.File - if df, err = os.Create(datFilePath); err != nil { - return err - } - dw := bufio.NewWriterSize(df, etl.BufIOSize) - var word = make([]byte, 0, 256) - numBuf := make([]byte, binary.MaxVarintLen64) - var decodeTime time.Duration - g := d.MakeGetter() - start := time.Now() - wc := 0 - for g.HasNext() { - word, _ = g.Next(word[:0]) - decodeTime += time.Since(start) - n := binary.PutUvarint(numBuf, uint64(len(word))) - if _, e := dw.Write(numBuf[:n]); e != nil { - return e - } - if len(word) > 0 { - if _, e := dw.Write(word); e != nil { - return e - } - } - wc++ - select { - default: - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Decompress", logPrefix), "millions", wc/1_000_000) - } - start = time.Now() - } - log.Info(fmt.Sprintf("[%s] Average decoding time", logPrefix), "per word", time.Duration(int64(decodeTime)/int64(wc))) - if err = dw.Flush(); err != nil { - return err - } - if err = df.Close(); err != nil { - return err - } - return nil -} From 2706b01cea4616ce199af19b8caf58b4b24a4363 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 20 Jul 2022 17:23:12 +0700 Subject: [PATCH 34/72] go mod (#4762) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1bcdbe10a37..061abf375a5 100644 --- a/Makefile +++ b/Makefile @@ -80,7 +80,7 @@ docker-compose: validate_docker_build_args setup_xdg_data_home dbg: $(GO_DBG_BUILD) -o $(GOBIN)/ ./cmd/... -%.cmd: git-submodules +%.cmd: @# Note: $* is replaced by the command name @echo "Building $*" @cd ./cmd/$* && $(GOBUILD) -o $(GOBIN)/$* From f8c37be3b2f9ff5d594a29fbe33137b9d99ce37b Mon Sep 17 00:00:00 2001 From: Andrea Lanfranchi Date: Wed, 20 Jul 2022 13:42:33 +0200 Subject: [PATCH 35/72] Only test and db-tools require submodule update (#4765) --- wmake.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wmake.ps1 b/wmake.ps1 index 0771cbbe230..b66e7842ea9 100644 --- a/wmake.ps1 +++ b/wmake.ps1 @@ -443,7 +443,7 @@ Write-Host @" "@ -if (!$WnoSubmoduleUpdate -and $BuildTargets[0] -ne "clean") { +if (!$WnoSubmoduleUpdate -and $BuildTargets[0] -ne "clean" -and ($BuildTargets.Contains("test") -or $BuildTargets.Contains("db-tools"))) { Write-Host " Updating git submodules ..." Invoke-Expression -Command "git.exe submodule update --init --recursive --force --quiet" if (!($?)) { From 00769e3dff674fab1911c1cb33366711a2163529 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 20 Jul 2022 15:16:20 +0200 Subject: [PATCH 36/72] Fix MDBX compilation on macOS (#4767) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 47404e9aac6..8015c7659e0 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 + github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 @@ -55,7 +55,7 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tendermint v0.31.11 - github.com/torquem-ch/mdbx-go v0.25.0 + github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 github.com/urfave/cli v1.22.9 diff --git a/go.sum b/go.sum index 22e34193ee1..eeda3cf9e5c 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 h1:AMu0iTB2BlgeBTxJvAa7amzz6WmyX5xxnLOF2LFhkTs= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320 h1:Wd2XPRsa/oVXz2j3a0554Ct8qAUS2IwZeTxdaCWzqbY= +github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= @@ -623,8 +623,8 @@ github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/torquem-ch/mdbx-go v0.25.0 h1:k66O6GrqyAsXNn4tF87Q+ba4840aplv6O8Ph0FR1PCY= -github.com/torquem-ch/mdbx-go v0.25.0/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= +github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece h1:jwLF5BKBWPb00kMfRmSHJl0Hwe52HonOVpNkBJZR+XI= +github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= github.com/ugorji/go/codec v1.1.13 h1:013LbFhocBoIqgHeIHKlV4JWYhqogATYWZhIcH0WHn4= github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= From 9e8f625c533d967287035c1172e0e7b4941fa28b Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 20 Jul 2022 17:28:58 +0200 Subject: [PATCH 37/72] Fix txn removal in PendingPool (#4770) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8015c7659e0..c5023b793da 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320 + github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index eeda3cf9e5c..11bce9fb5ec 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320 h1:Wd2XPRsa/oVXz2j3a0554Ct8qAUS2IwZeTxdaCWzqbY= -github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a h1:fRjDLDbieEy48O5BvMf1+ib8loZMA3nSiRtjxbuIsYw= +github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 73b028a5fd553519c24c8a36ce2bd8c96467f889 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 20 Jul 2022 18:16:42 +0200 Subject: [PATCH 38/72] better payload cleanup (#4772) Co-authored-by: giuliorebuffo --- eth/stagedsync/stage_headers.go | 2 +- turbo/engineapi/fork_validator.go | 96 ++++++++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 4 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 4b3a5224348..19a44499c1f 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -259,7 +259,7 @@ func startHandlingForkChoice( headerInserter *headerdownload.HeaderInserter, ) (*privateapi.PayloadStatus, error) { if cfg.memoryOverlay { - defer cfg.forkValidator.ClearWithUnwind(tx) + defer cfg.forkValidator.ClearWithUnwind(tx, cfg.notifications.Accumulator, cfg.notifications.StateChangesConsumer) } headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 7baaba9c052..a1b4f03bbb6 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -15,15 +15,22 @@ package engineapi import ( "bytes" + "context" + "encoding/binary" "fmt" + "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/log/v3" ) @@ -81,6 +88,89 @@ func (fv *ForkValidator) ExtendingForkHeadHash() common.Hash { return fv.extendingForkHeadHash } +func (fv *ForkValidator) rewindAccumulator(to uint64, accumulator *shards.Accumulator, c shards.StateChangeConsumer) error { + hash, err := rawdb.ReadCanonicalHash(fv.extendingFork, to) + if err != nil { + return fmt.Errorf("read canonical hash of unwind point: %w", err) + } + header := rawdb.ReadHeader(fv.extendingFork, hash, to) + if header == nil { + return fmt.Errorf("could not find header for block: %d", to) + } + + txs, err := rawdb.RawTransactionsRange(fv.extendingFork, to, to+1) + if err != nil { + return err + } + // Start the changes + accumulator.StartChange(to, hash, txs, true) + accChangesCursor, err := fv.extendingFork.CursorDupSort(kv.AccountChangeSet) + if err != nil { + return err + } + defer accChangesCursor.Close() + + storageChangesCursor, err := fv.extendingFork.CursorDupSort(kv.StorageChangeSet) + if err != nil { + return err + } + defer storageChangesCursor.Close() + + startingKey := dbutils.EncodeBlockNumber(to) + // Unwind notifications on accounts + for k, v, err := accChangesCursor.Seek(startingKey); k != nil; k, v, err = accChangesCursor.Next() { + if err != nil { + return err + } + _, dbKey, dbValue, err := changeset.FromDBFormat(k, v) + if err != nil { + return err + } + if len(dbValue) > 0 { + var acc accounts.Account + if err := acc.DecodeForStorage(dbValue); err != nil { + return err + } + // Fetch the code hash + var address common.Address + copy(address[:], dbKey) + if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { + if codeHash, err2 := fv.extendingFork.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { + copy(acc.CodeHash[:], codeHash) + } + } + + newV := make([]byte, acc.EncodingLengthForStorage()) + acc.EncodeForStorage(newV) + accumulator.ChangeAccount(address, acc.Incarnation, newV) + } else { + var address common.Address + copy(address[:], dbKey) + accumulator.DeleteAccount(address) + } + } + // Unwind notifications on storage + for k, v, err := storageChangesCursor.Seek(startingKey); k != nil; k, v, err = accChangesCursor.Next() { + if err != nil { + return err + } + _, dbKey, dbValue, err := changeset.FromDBFormat(k, v) + if err != nil { + return err + } + var address common.Address + var incarnation uint64 + var location common.Hash + copy(address[:], dbKey[:length.Addr]) + incarnation = binary.BigEndian.Uint64(dbKey[length.Addr:]) + copy(location[:], dbKey[length.Addr+length.Incarnation:]) + accumulator.ChangeStorage(address, incarnation, location, common.CopyBytes(dbValue)) + } + accumulator.SendAndReset(context.Background(), c, header.BaseFee.Uint64(), header.GasLimit) + log.Info("Transaction pool notified of discard side fork.") + return nil +} + // NotifyCurrentHeight is to be called at the end of the stage cycle and repressent the last processed block. func (fv *ForkValidator) NotifyCurrentHeight(currentHeight uint64) { fv.currentHeight = currentHeight @@ -191,14 +281,14 @@ func (fv *ForkValidator) Clear() { } // Clear wipes out current extending fork data and notify txpool. -func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx) { +func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx, accumulator *shards.Accumulator, c shards.StateChangeConsumer) { sb, ok := fv.sideForksBlock[fv.extendingForkHeadHash] // If we did not flush the fork state, then we need to notify the txpool through unwind. if fv.extendingFork != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { fv.extendingFork.UpdateTxn(tx) // this will call unwind of extending fork to notify txpool of reverting transactions. - if err := fv.validatePayload(fv.extendingFork, nil, nil, sb.header.Number.Uint64()-1, nil, nil); err != nil { - log.Warn("Could not clean payload", "err", err) + if err := fv.rewindAccumulator(sb.header.Number.Uint64()-1, accumulator, c); err != nil { + log.Warn("could not notify txpool of invalid side fork", "err", err) } fv.extendingFork.Rollback() } From 7573a410692ff1499b68ae3c895f752d920f7ff4 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 21 Jul 2022 03:47:37 +0200 Subject: [PATCH 39/72] fixed accumulator nil case (#4773) Co-authored-by: giuliorebuffo --- turbo/engineapi/fork_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index a1b4f03bbb6..7e6853725ec 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -284,7 +284,7 @@ func (fv *ForkValidator) Clear() { func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx, accumulator *shards.Accumulator, c shards.StateChangeConsumer) { sb, ok := fv.sideForksBlock[fv.extendingForkHeadHash] // If we did not flush the fork state, then we need to notify the txpool through unwind. - if fv.extendingFork != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { + if fv.extendingFork != nil && accumulator != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { fv.extendingFork.UpdateTxn(tx) // this will call unwind of extending fork to notify txpool of reverting transactions. if err := fv.rewindAccumulator(sb.header.Number.Uint64()-1, accumulator, c); err != nil { From 1becfc509bba7eeb24c74aab8b64650b82b52a81 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 21 Jul 2022 04:06:26 +0200 Subject: [PATCH 40/72] extra reset before starting change in rewind side fork (#4774) * extra reset before starting change * extra reset before starting change Co-authored-by: giuliorebuffo --- turbo/engineapi/fork_validator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 7e6853725ec..7fc99f5d1ae 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -103,6 +103,7 @@ func (fv *ForkValidator) rewindAccumulator(to uint64, accumulator *shards.Accumu return err } // Start the changes + accumulator.Reset(0) accumulator.StartChange(to, hash, txs, true) accChangesCursor, err := fv.extendingFork.CursorDupSort(kv.AccountChangeSet) if err != nil { From 770d7cf8bd6052f06d544e0218607539b8861eb9 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 21 Jul 2022 13:47:36 +0100 Subject: [PATCH 41/72] Hive CI output parse (#4737) * feat(ci): run hive tests as part of CI * feat(ci): add hive test runs and output parse * feat(ci): parse hive output for forked repos --- .github/workflows/ci.yml | 17 ++++++++++++-- .github/workflows/hive-results.yml | 37 ++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/hive-results.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 402b8c7cfde..af6a1c08223 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -104,8 +104,21 @@ jobs: fetch-depth: 0 # fetch git tags for "git describe" - name: make docker - run: DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker # check with root permissions, should be cached from previous build - name: sudo make docker - run: sudo DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + - name: run hive + run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/sync --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 + + - name: parse hive output + run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results + + - name: archive hive results + uses: actions/upload-artifact@v3 + if: always() + with: + name: hive-ci-output + path: results/*.xml diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml new file mode 100644 index 00000000000..c67dc5fcfab --- /dev/null +++ b/.github/workflows/hive-results.yml @@ -0,0 +1,37 @@ +name: Hive results + +on: + workflow_run: + workflows: ["Continuous integration", "ci"] + types: + - completed + +jobs: + hive-results: + name: Hive results + runs-on: ubuntu-latest + if: github.event.workflow_run.conclusion != 'skipped' + + steps: + - name: Download and extract artifacts + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + run: | + mkdir -p artifacts && cd artifacts + + artifacts_url=${{ github.event.workflow_run.artifacts_url }} + + gh api "$artifacts_url" -q '.artifacts[] | [.name, .archive_download_url] | @tsv' | while read artifact + do + IFS=$'\t' read name url <<< "$artifact" + gh api $url > "$name.zip" + unzip -d "$name" "$name.zip" + done + + - name: Publish hive test results + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + commit: ${{ github.event.workflow_run.head_sha }} + event_file: artifacts/Event File/event.json + event_name: ${{ github.event.workflow_run.event }} + files: "artifacts/**/*.xml" \ No newline at end of file From 42e59618a7e933c617e3cb50821e7ed5c454534f Mon Sep 17 00:00:00 2001 From: Igor Mandrigin Date: Thu, 21 Jul 2022 14:50:03 +0200 Subject: [PATCH 42/72] run engine tests on Hive --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index af6a1c08223..12ba6dac513 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,7 @@ jobs: run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker - name: run hive - run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/sync --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 + run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 - name: parse hive output run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results From 6060b87840c7698af4aec9835c403a817e547418 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 21 Jul 2022 19:40:00 +0200 Subject: [PATCH 43/72] Fix binary vs raw confusion for PoS transaction (#4781) * Replace PayloadMessage with Block * RawTransactions -> BinaryTransactions for clarity * add a log warning --- core/types/transaction.go | 7 +++--- eth/stagedsync/stage_headers.go | 42 +++++++------------------------ ethdb/privateapi/ethbackend.go | 43 ++++++++++++++++++++++---------- turbo/engineapi/request_list.go | 10 ++------ turbo/stages/mock_sentry.go | 2 +- turbo/stages/sentry_mock_test.go | 17 ++++--------- 6 files changed, 51 insertions(+), 70 deletions(-) diff --git a/core/types/transaction.go b/core/types/transaction.go index 1934103b24c..d5310d1866b 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -98,13 +98,14 @@ type TransactionMisc struct { from atomic.Value } -type RawTransactions [][]byte +// RLP-marshalled legacy transactions and binary-marshalled (not wrapped into an RLP string) typed (EIP-2718) transactions +type BinaryTransactions [][]byte -func (t RawTransactions) Len() int { +func (t BinaryTransactions) Len() int { return len(t) } -func (t RawTransactions) EncodeIndex(i int, w *bytes.Buffer) { +func (t BinaryTransactions) EncodeIndex(i int, w *bytes.Buffer) { w.Write(t[i]) } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 19a44499c1f..e1c316e7eb9 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -184,7 +184,7 @@ func HeadersPOS( if forkChoiceInsteadOfNewPayload { payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter) } else { - payloadMessage := request.(*engineapi.PayloadMessage) + payloadMessage := request.(*types.Block) payloadStatus, err = handleNewPayload(payloadMessage, requestStatus, requestId, s, ctx, tx, cfg, headerInserter) } @@ -431,7 +431,7 @@ func finishHandlingForkChoice( } func handleNewPayload( - payloadMessage *engineapi.PayloadMessage, + block *types.Block, requestStatus engineapi.RequestStatus, requestId int, s *StageState, @@ -440,9 +440,9 @@ func handleNewPayload( cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, ) (*privateapi.PayloadStatus, error) { - header := payloadMessage.Header + header := block.Header() headerNumber := header.Number.Uint64() - headerHash := header.Hash() + headerHash := block.Hash() log.Debug(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) cfg.hd.UpdateTopSeenHeightPoS(headerNumber) @@ -507,38 +507,14 @@ func handleNewPayload( cfg.hd.BeaconRequestList.Remove(requestId) - for _, tx := range payloadMessage.Body.Transactions { - if types.TypedTransactionMarshalledAsRlpString(tx) { - log.Warn(fmt.Sprintf("[%s] typed txn marshalled as RLP string", s.LogPrefix()), "tx", common.Bytes2Hex(tx)) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: errors.New("typed txn marshalled as RLP string"), - }, nil - } - } - - transactions, err := types.DecodeTransactions(payloadMessage.Body.Transactions) - if err != nil { - log.Warn(fmt.Sprintf("[%s] Error during Beacon transaction decoding", s.LogPrefix()), "err", err.Error()) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: err, - }, nil - } - log.Debug(fmt.Sprintf("[%s] New payload begin verification", s.LogPrefix())) - response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, ctx, tx, cfg, header, payloadMessage.Body, headerInserter) + response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, ctx, tx, cfg, block, headerInserter) log.Debug(fmt.Sprintf("[%s] New payload verification ended", s.LogPrefix()), "success", success, "err", err) if err != nil || !success { return response, err } if cfg.bodyDownload != nil { - block := types.NewBlockFromStorage(headerHash, header, transactions, nil) cfg.bodyDownload.AddToPrefetch(block) } @@ -551,12 +527,12 @@ func verifyAndSaveNewPoSHeader( ctx context.Context, tx kv.RwTx, cfg HeadersCfg, - header *types.Header, - body *types.RawBody, + block *types.Block, headerInserter *headerdownload.HeaderInserter, ) (response *privateapi.PayloadStatus, success bool, err error) { + header := block.Header() headerNumber := header.Number.Uint64() - headerHash := header.Hash() + headerHash := block.Hash() if verificationErr := cfg.hd.VerifyHeader(header); verificationErr != nil { log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", verificationErr) @@ -581,7 +557,7 @@ func verifyAndSaveNewPoSHeader( if cfg.memoryOverlay { extendingHash := cfg.forkValidator.ExtendingForkHeadHash() extendCanonical := (extendingHash == common.Hash{} && header.ParentHash == currentHeadHash) || extendingHash == header.ParentHash - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, extendCanonical) + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, block.RawBody(), extendCanonical) if criticalError != nil { return nil, false, criticalError } diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 5925ad29009..f4abc963acf 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -303,10 +303,37 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E Difficulty: serenity.SerenityDifficulty, Nonce: serenity.SerenityNonce, ReceiptHash: gointerfaces.ConvertH256ToHash(req.ReceiptRoot), - TxHash: types.DeriveSha(types.RawTransactions(req.Transactions)), + TxHash: types.DeriveSha(types.BinaryTransactions(req.Transactions)), } blockHash := gointerfaces.ConvertH256ToHash(req.BlockHash) + if header.Hash() != blockHash { + log.Error("[NewPayload] invalid block hash", "stated", common.Hash(blockHash), "actual", header.Hash()) + return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID_BLOCK_HASH}, nil + } + + for _, txn := range req.Transactions { + if types.TypedTransactionMarshalledAsRlpString(txn) { + log.Warn("[NewPayload] typed txn marshalled as RLP string", "txn", common.Bytes2Hex(txn)) + return &remote.EnginePayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: nil, + ValidationError: "typed txn marshalled as RLP string", + }, nil + } + } + + transactions, err := types.DecodeTransactions(req.Transactions) + if err != nil { + log.Warn("[NewPayload] failed to decode transactions", "err", err) + return &remote.EnginePayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: nil, + ValidationError: err.Error(), + }, nil + } + block := types.NewBlockFromStorage(blockHash, &header, transactions, nil) + tx, err := s.db.BeginRo(ctx) if err != nil { return nil, err @@ -322,6 +349,7 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, nil } tx.Rollback() + // If another payload is already commissioned then we just reply with syncing if s.stageLoopIsBusy() { // We are still syncing a commissioned payload @@ -333,11 +361,6 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } - if header.Hash() != blockHash { - log.Error("[NewPayload] invalid block hash", "stated", common.Hash(blockHash), "actual", header.Hash()) - return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID_BLOCK_HASH}, nil - } - // Lock the thread (We modify shared resources). log.Debug("[NewPayload] acquiring lock") s.lock.Lock() @@ -345,13 +368,7 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E log.Debug("[NewPayload] lock acquired") log.Debug("[NewPayload] sending block", "height", header.Number, "hash", common.Hash(blockHash)) - s.requestList.AddPayloadRequest(&engineapi.PayloadMessage{ - Header: &header, - Body: &types.RawBody{ - Transactions: req.Transactions, - Uncles: nil, - }, - }) + s.requestList.AddPayloadRequest(block) payloadStatus := <-s.statusCh log.Debug("[NewPayload] got reply", "payloadStatus", payloadStatus) diff --git a/turbo/engineapi/request_list.go b/turbo/engineapi/request_list.go index e66084c5fc9..11a2bc0ba13 100644 --- a/turbo/engineapi/request_list.go +++ b/turbo/engineapi/request_list.go @@ -10,12 +10,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" ) -// The message we are going to send to the stage sync in NewPayload -type PayloadMessage struct { - Header *types.Header - Body *types.RawBody -} - // The message we are going to send to the stage sync in ForkchoiceUpdated type ForkChoiceMessage struct { HeadBlockHash common.Hash @@ -31,7 +25,7 @@ const ( // RequestStatus values ) type RequestWithStatus struct { - Message interface{} // *PayloadMessage or *ForkChoiceMessage + Message interface{} // *Block or *ForkChoiceMessage Status RequestStatus } @@ -59,7 +53,7 @@ func NewRequestList() *RequestList { return rl } -func (rl *RequestList) AddPayloadRequest(message *PayloadMessage) { +func (rl *RequestList) AddPayloadRequest(message *types.Block) { rl.syncCond.L.Lock() defer rl.syncCond.L.Unlock() diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 4a3cd2e28bf..774e98fab39 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -522,7 +522,7 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { return nil } -func (ms *MockSentry) SendPayloadRequest(message *engineapi.PayloadMessage) { +func (ms *MockSentry) SendPayloadRequest(message *types.Block) { ms.sentriesClient.Hd.BeaconRequestList.AddPayloadRequest(message) } diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index 9ab7015e06b..bd8e552c1be 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -571,12 +571,8 @@ func TestPoSDownloader(t *testing.T) { }, false /* intermediateHashes */) require.NoError(t, err) - // Send a payload with missing parent - payloadMessage := engineapi.PayloadMessage{ - Header: chain.TopBlock.Header(), - Body: chain.TopBlock.RawBody(), - } - m.SendPayloadRequest(&payloadMessage) + // Send a payload whose parent isn't downloaded yet + m.SendPayloadRequest(chain.TopBlock) headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) @@ -640,12 +636,9 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { invalidTip := chain.TopBlock.Header() invalidTip.ParentHash = invalidParent.Hash() - // Send a payload with missing parent - payloadMessage := engineapi.PayloadMessage{ - Header: invalidTip, - Body: chain.TopBlock.RawBody(), - } - m.SendPayloadRequest(&payloadMessage) + // Send a payload with the parent missing + payloadMessage := types.NewBlockFromStorage(invalidTip.Hash(), invalidTip, chain.TopBlock.Transactions(), nil) + m.SendPayloadRequest(payloadMessage) headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) From 66758c79607e40bd96d1ccdeb0f57bd4b2d24524 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 22 Jul 2022 13:44:42 +0700 Subject: [PATCH 44/72] RetireBlocks: less arguments (#4785) * save * save --- cmd/state/commands/history22.go | 3 +- cmd/state/commands/state_recon.go | 3 +- core/state/state_recon_writer.go | 2 +- core/vm/lightclient/iavl/proof_path.go | 2 +- eth/stagedsync/stage_headers.go | 2 +- eth/stagedsync/stage_senders.go | 4 +- rpc/handler.go | 5 +- turbo/app/snapshots.go | 4 +- turbo/snapshotsync/block_snapshots.go | 72 +++++++++++--------------- 9 files changed, 37 insertions(+), 60 deletions(-) diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index a7ecf4d8ad9..02890f1f3c3 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -130,8 +130,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { prevTime := time.Now() var blockReader services.FullBlockReader - var allSnapshots *snapshotsync.RoSnapshots - allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) defer allSnapshots.Close() if err := allSnapshots.Reopen(); err != nil { return fmt.Errorf("reopen snapshot segments: %w", err) diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go index 231fcca9499..db5ea046249 100644 --- a/cmd/state/commands/state_recon.go +++ b/cmd/state/commands/state_recon.go @@ -376,8 +376,7 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { return err } var blockReader services.FullBlockReader - var allSnapshots *snapshotsync.RoSnapshots - allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) defer allSnapshots.Close() if err := allSnapshots.Reopen(); err != nil { return fmt.Errorf("reopen snapshot segments: %w", err) diff --git a/core/state/state_recon_writer.go b/core/state/state_recon_writer.go index 49f2635c75f..0d1c8e8e1ef 100644 --- a/core/state/state_recon_writer.go +++ b/core/state/state_recon_writer.go @@ -156,7 +156,7 @@ func (rs *ReconState) RollbackTx(txTask TxTask, dependency uint64) { if rs.doneBitmap.Contains(dependency) { heap.Push(&rs.queue, txTask) } else { - tt, _ := rs.triggers[dependency] + tt := rs.triggers[dependency] tt = append(tt, txTask) rs.triggers[dependency] = tt } diff --git a/core/vm/lightclient/iavl/proof_path.go b/core/vm/lightclient/iavl/proof_path.go index de366f33813..5b2609654bb 100644 --- a/core/vm/lightclient/iavl/proof_path.go +++ b/core/vm/lightclient/iavl/proof_path.go @@ -118,7 +118,7 @@ func (pl PathToLeaf) isRightmost() bool { } func (pl PathToLeaf) isEmpty() bool { - return pl == nil || len(pl) == 0 + return len(pl) == 0 } func (pl PathToLeaf) dropRoot() PathToLeaf { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index e1c316e7eb9..b982d87c421 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1311,7 +1311,7 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { return err } dbEmpty := len(snInDB) == 0 - var missingSnapshots []snapshotsync.MergeRange + var missingSnapshots []snapshotsync.Range if !dbEmpty { _, missingSnapshots, err = snapshotsync.Segments(cfg.snapshots.Dir()) if err != nil { diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 22f6c5ce513..0a9db4af808 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -9,7 +9,6 @@ import ( "sync" "time" - "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/length" @@ -430,8 +429,7 @@ func retireBlocksInSingleBackgroundThread(s *PruneState, cfg SendersCfg, ctx con } } - chainID, _ := uint256.FromBig(cfg.chainConfig.ChainID) - cfg.blockRetire.RetireBlocksInBackground(ctx, s.ForwardProgress, *chainID, log.LvlInfo) + cfg.blockRetire.RetireBlocksInBackground(ctx, s.ForwardProgress, log.LvlInfo) return nil } diff --git a/rpc/handler.go b/rpc/handler.go index 86985ea56e5..be73dbe7167 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -383,10 +383,7 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage, stream *json func (h *handler) isMethodAllowedByGranularControl(method string) bool { _, isForbidden := h.forbiddenList[method] if len(h.allowList) == 0 { - if isForbidden { - return false - } - return true + return !isForbidden } _, ok := h.allowList[method] diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index fd87d640159..b9c73f302d7 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -244,8 +244,6 @@ func doRetireCommand(cliCtx *cli.Context) error { defer chainDB.Close() cfg := ethconfig.NewSnapCfg(true, true, true) - chainConfig := tool.ChainConfigFromDB(chainDB) - chainID, _ := uint256.FromBig(chainConfig.ChainID) snapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) if err := snapshots.Reopen(); err != nil { return err @@ -256,7 +254,7 @@ func doRetireCommand(cliCtx *cli.Context) error { log.Info("Params", "from", from, "to", to, "every", every) for i := from; i < to; i += every { - if err := br.RetireBlocks(ctx, i, i+every, *chainID, log.LvlInfo); err != nil { + if err := br.RetireBlocks(ctx, i, i+every, log.LvlInfo); err != nil { panic(err) } if err := chainDB.Update(ctx, func(tx kv.RwTx) error { diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 144f4c206e4..fc7850eca7a 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -42,7 +42,7 @@ import ( ) type DownloadRequest struct { - ranges *MergeRange + ranges *Range path string torrentHash string } @@ -50,20 +50,20 @@ type DownloadRequest struct { type HeaderSegment struct { seg *compress.Decompressor // value: first_byte_of_header_hash + header_rlp idxHeaderHash *recsplit.Index // header_hash -> headers_segment_offset - ranges MergeRange + ranges Range } type BodySegment struct { seg *compress.Decompressor // value: rlp(types.BodyForStorage) idxBodyNumber *recsplit.Index // block_num_u64 -> bodies_segment_offset - ranges MergeRange + ranges Range } type TxnSegment struct { Seg *compress.Decompressor // value: first_byte_of_transaction_hash + sender_address + transaction_rlp IdxTxnHash *recsplit.Index // transaction_hash -> transactions_segment_offset IdxTxnHash2BlockNum *recsplit.Index // transaction_hash -> block_number - ranges MergeRange + ranges Range } func (sn *HeaderSegment) close() { @@ -382,22 +382,6 @@ func (s *RoSnapshots) ReopenSomeIndices(types ...snap.Type) (err error) { return nil } -func (s *RoSnapshots) AsyncOpenAll(ctx context.Context) { - go func() { - for !s.segmentsReady.Load() || !s.indicesReady.Load() { - select { - case <-ctx.Done(): - return - default: - } - if err := s.Reopen(); err != nil && !errors.Is(err, os.ErrNotExist) { - log.Error("AsyncOpenAll", "err", err) - } - time.Sleep(15 * time.Second) - } - }() -} - // OptimisticReopen - optimistically open snapshots (ignoring error), useful at App startup because: // - user must be able: delete any snapshot file and Erigon will self-heal by re-downloading // - RPC return Nil for historical blocks if snapshots are not open @@ -422,7 +406,7 @@ func (s *RoSnapshots) Reopen() error { s.Txs.segments = s.Txs.segments[:0] for _, f := range files { { - seg := &BodySegment{ranges: MergeRange{f.From, f.To}} + seg := &BodySegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -434,7 +418,7 @@ func (s *RoSnapshots) Reopen() error { s.Bodies.segments = append(s.Bodies.segments, seg) } { - seg := &HeaderSegment{ranges: MergeRange{f.From, f.To}} + seg := &HeaderSegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -446,7 +430,7 @@ func (s *RoSnapshots) Reopen() error { s.Headers.segments = append(s.Headers.segments, seg) } { - seg := &TxnSegment{ranges: MergeRange{f.From, f.To}} + seg := &TxnSegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -517,7 +501,7 @@ func (s *RoSnapshots) ReopenSegments() error { var segmentsMaxSet bool for _, f := range files { { - seg := &BodySegment{ranges: MergeRange{f.From, f.To}} + seg := &BodySegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -529,7 +513,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Bodies.segments = append(s.Bodies.segments, seg) } { - seg := &HeaderSegment{ranges: MergeRange{f.From, f.To}} + seg := &HeaderSegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -541,7 +525,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Headers.segments = append(s.Headers.segments, seg) } { - seg := &TxnSegment{ranges: MergeRange{f.From, f.To}} + seg := &TxnSegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -793,14 +777,14 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD return nil } -func noGaps(in []snap.FileInfo) (out []snap.FileInfo, missingSnapshots []MergeRange) { +func noGaps(in []snap.FileInfo) (out []snap.FileInfo, missingSnapshots []Range) { var prevTo uint64 for _, f := range in { if f.To <= prevTo { continue } if f.From != prevTo { // no gaps - missingSnapshots = append(missingSnapshots, MergeRange{prevTo, f.From}) + missingSnapshots = append(missingSnapshots, Range{prevTo, f.From}) continue } prevTo = f.To @@ -854,7 +838,7 @@ func noOverlaps(in []snap.FileInfo) (res []snap.FileInfo) { return res } -func Segments(dir string) (res []snap.FileInfo, missingSnapshots []MergeRange, err error) { +func Segments(dir string) (res []snap.FileInfo, missingSnapshots []Range, err error) { list, err := snap.Segments(dir) if err != nil { return nil, missingSnapshots, err @@ -944,10 +928,12 @@ func CanDeleteTo(curBlockNum uint64, snapshots *RoSnapshots) (blockTo uint64) { hardLimit := (curBlockNum/1_000)*1_000 - params.FullImmutabilityThreshold return cmp.Min(hardLimit, snapshots.BlocksAvailable()+1) } -func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint256.Int, lvl log.Lvl) error { - return retireBlocks(ctx, blockFrom, blockTo, chainID, br.tmpDir, br.snapshots, br.db, br.workers, br.downloader, lvl, br.notifier) +func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint64, lvl log.Lvl) error { + chainConfig := tool.ChainConfigFromDB(br.db) + chainID, _ := uint256.FromBig(chainConfig.ChainID) + return retireBlocks(ctx, blockFrom, blockTo, *chainID, br.tmpDir, br.snapshots, br.db, br.workers, br.downloader, lvl, br.notifier) } -func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProgress uint64, chainID uint256.Int, lvl log.Lvl) { +func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProgress uint64, lvl log.Lvl) { if br.working.Load() { // go-routine is still working return @@ -968,7 +954,7 @@ func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProg return } - err := br.RetireBlocks(ctx, blockFrom, blockTo, chainID, lvl) + err := br.RetireBlocks(ctx, blockFrom, blockTo, lvl) br.result = &BlockRetireResult{ BlockFrom: blockFrom, BlockTo: blockTo, @@ -988,7 +974,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return fmt.Errorf("DumpBlocks: %w", err) } if err := snapshots.Reopen(); err != nil { - return fmt.Errorf("Reopen: %w", err) + return fmt.Errorf("reopen: %w", err) } idxWorkers := workers if idxWorkers > 4 { @@ -998,7 +984,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return err } if err := snapshots.Reopen(); err != nil { - return fmt.Errorf("Reopen: %w", err) + return fmt.Errorf("reopen: %w", err) } merger := NewMerger(tmpDir, workers, lvl, chainID, notifier) ranges := merger.FindMergeRanges(snapshots) @@ -1010,7 +996,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return err } if err := snapshots.Reopen(); err != nil { - return fmt.Errorf("Reopen: %w", err) + return fmt.Errorf("reopen: %w", err) } var downloadRequest []DownloadRequest @@ -1723,13 +1709,13 @@ func NewMerger(tmpDir string, workers int, lvl log.Lvl, chainID uint256.Int, not return &Merger{tmpDir: tmpDir, workers: workers, lvl: lvl, chainID: chainID, notifier: notifier} } -type MergeRange struct { +type Range struct { from, to uint64 } -func (r MergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } +func (r Range) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } -func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []MergeRange) { +func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []Range) { for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { sn := snapshots.Headers.segments[i] if sn.ranges.to-sn.ranges.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg @@ -1744,14 +1730,14 @@ func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []MergeRange) { break } aggFrom := sn.ranges.to - span - res = append(res, MergeRange{from: aggFrom, to: sn.ranges.to}) + res = append(res, Range{from: aggFrom, to: sn.ranges.to}) for snapshots.Headers.segments[i].ranges.from > aggFrom { i-- } break } } - slices.SortFunc(res, func(i, j MergeRange) bool { return i.from < j.from }) + slices.SortFunc(res, func(i, j Range) bool { return i.from < j.from }) return res } func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeHeaders, toMergeBodies, toMergeTxs []string, err error) { @@ -1779,7 +1765,7 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeH } // Merge does merge segments in given ranges -func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges []MergeRange, snapDir string, doIndex bool) error { +func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges []Range, snapDir string, doIndex bool) error { if len(mergeRanges) == 0 { return nil } @@ -1943,7 +1929,7 @@ func assertSegment(segmentFile string) { } } -func NewDownloadRequest(ranges *MergeRange, path string, torrentHash string) DownloadRequest { +func NewDownloadRequest(ranges *Range, path string, torrentHash string) DownloadRequest { return DownloadRequest{ ranges: ranges, path: path, From 1d378b6618cf20ca48425e0805c85970d8ca07d2 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 22 Jul 2022 09:18:19 +0200 Subject: [PATCH 45/72] Filter out bad tx with wrong chain id during block building phase. (#4783) * filter out bad tx with wrong chain id * report bad txs Co-authored-by: giuliorebuffo --- eth/stagedsync/stage_mining_create_block.go | 3 +++ go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index ec6b3532624..f148f2ef811 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -141,6 +141,9 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc if err != nil { return err } + if transaction.GetChainID().ToBig().Cmp(cfg.chainConfig.ChainID) != 0 { + continue + } txs = append(txs, transaction) } var sender common.Address diff --git a/go.mod b/go.mod index c5023b793da..f96580dbf77 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a + github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 11bce9fb5ec..3447e0747c9 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a h1:fRjDLDbieEy48O5BvMf1+ib8loZMA3nSiRtjxbuIsYw= -github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22 h1:cGxOEtCnkVzX+RcGQbQHiDuV8dQHnGqcwTFl9q8Hnkg= +github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From a1777accd83814120def6b38150c2e381c957992 Mon Sep 17 00:00:00 2001 From: Artem Tsebrovskiy Date: Fri, 22 Jul 2022 09:47:33 +0100 Subject: [PATCH 46/72] fixed passing of raw byte slices to tx processing (#4782) --- cmd/sentry/sentry/sentry_multi_client.go | 2 +- turbo/stages/bodydownload/body_algos.go | 18 +++++++++++++++--- turbo/stages/bodydownload/body_data_struct.go | 4 ++-- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/cmd/sentry/sentry/sentry_multi_client.go b/cmd/sentry/sentry/sentry_multi_client.go index 1359398c43c..5abd7cb853a 100644 --- a/cmd/sentry/sentry/sentry_multi_client.go +++ b/cmd/sentry/sentry/sentry_multi_client.go @@ -500,7 +500,7 @@ func (cs *MultiClient) blockBodies66(inreq *proto_sentry.InboundMessage, _ direc return fmt.Errorf("decode BlockBodiesPacket66: %w", err) } txs, uncles := request.BlockRawBodiesPacket.Unpack() - cs.Bd.DeliverBodies(txs, uncles, uint64(len(inreq.Data)), ConvertH512ToPeerID(inreq.PeerId)) + cs.Bd.DeliverBodies(&txs, &uncles, uint64(len(inreq.Data)), ConvertH512ToPeerID(inreq.PeerId)) return nil } diff --git a/turbo/stages/bodydownload/body_algos.go b/turbo/stages/bodydownload/body_algos.go index c2d392c379b..edb406bcc51 100644 --- a/turbo/stages/bodydownload/body_algos.go +++ b/turbo/stages/bodydownload/body_algos.go @@ -9,6 +9,8 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/rawdb" @@ -17,7 +19,6 @@ import ( "github.com/ledgerwatch/erigon/turbo/adapter" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" - "github.com/ledgerwatch/log/v3" ) const BlockBufferSize = 128 @@ -195,7 +196,7 @@ func (bd *BodyDownload) RequestSent(bodyReq *BodyRequest, timeWithTimeout uint64 } // DeliverBodies takes the block body received from a peer and adds it to the various data structures -func (bd *BodyDownload) DeliverBodies(txs [][][]byte, uncles [][]*types.Header, lenOfP2PMsg uint64, peerID [64]byte) { +func (bd *BodyDownload) DeliverBodies(txs *[][][]byte, uncles *[][]*types.Header, lenOfP2PMsg uint64, peerID [64]byte) { bd.deliveryCh <- Delivery{txs: txs, uncles: uncles, lenOfP2PMessage: lenOfP2PMsg, peerID: peerID} select { @@ -240,8 +241,19 @@ Loop: break Loop } + if delivery.txs == nil { + log.Warn("nil transactions delivered", "peer_id", delivery.peerID, "p2p_msg_len", delivery.lenOfP2PMessage) + } + if delivery.uncles == nil { + log.Warn("nil uncles delivered", "peer_id", delivery.peerID, "p2p_msg_len", delivery.lenOfP2PMessage) + } + if delivery.txs == nil || delivery.uncles == nil { + log.Debug("delivery body processing has been skipped due to nil tx|data") + continue + } + reqMap := make(map[uint64]*BodyRequest) - txs, uncles, lenOfP2PMessage, _ := delivery.txs, delivery.uncles, delivery.lenOfP2PMessage, delivery.peerID + txs, uncles, lenOfP2PMessage, _ := *delivery.txs, *delivery.uncles, delivery.lenOfP2PMessage, delivery.peerID var delivered, undelivered int for i := range txs { diff --git a/turbo/stages/bodydownload/body_data_struct.go b/turbo/stages/bodydownload/body_data_struct.go index 56995ea9c4e..eedbf1c3ea0 100644 --- a/turbo/stages/bodydownload/body_data_struct.go +++ b/turbo/stages/bodydownload/body_data_struct.go @@ -14,8 +14,8 @@ const MaxBodiesInRequest = 1024 type Delivery struct { peerID [64]byte - txs [][][]byte - uncles [][]*types.Header + txs *[][][]byte + uncles *[][]*types.Header lenOfP2PMessage uint64 } From 46a8c531ced28dd6e0b55020305aa7045b059ca1 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 22 Jul 2022 11:07:58 +0200 Subject: [PATCH 47/72] Optimized PoS header downloader (#4775) * optimized PoS header downloader * removed println * comments * ops * Restore schedulePoSDownload params + simplify Co-authored-by: giuliorebuffo Co-authored-by: yperbasis --- eth/stagedsync/stage_headers.go | 9 +++------ turbo/engineapi/fork_validator.go | 1 - turbo/stages/headerdownload/header_algos.go | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index b982d87c421..e8a4b7f863b 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -305,9 +305,8 @@ func startHandlingForkChoice( if header == nil { log.Info(fmt.Sprintf("[%s] Fork choice missing header with hash %x", s.LogPrefix(), headerHash)) - hashToDownload := headerHash cfg.hd.SetPoSDownloaderTip(headerHash) - schedulePoSDownload(requestId, hashToDownload, 0 /* header height is unknown, setting to 0 */, s, cfg) + schedulePoSDownload(requestId, headerHash, 0 /* header height is unknown, setting to 0 */, s, cfg) return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } @@ -487,10 +486,8 @@ func handleNewPayload( } if parent == nil { log.Info(fmt.Sprintf("[%s] New payload missing parent", s.LogPrefix())) - hashToDownload := header.ParentHash - heightToDownload := headerNumber - 1 cfg.hd.SetPoSDownloaderTip(headerHash) - schedulePoSDownload(requestId, hashToDownload, heightToDownload, s, cfg) + schedulePoSDownload(requestId, header.ParentHash, headerNumber-1, s, cfg) return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } @@ -629,7 +626,7 @@ func schedulePoSDownload( cfg.hd.SetRequestId(requestId) cfg.hd.SetHeaderToDownloadPoS(hashToDownload, heightToDownload) - cfg.hd.SetPOSSync(true) // This needs to be called afrer SetHeaderToDownloadPOS because SetHeaderToDownloadPOS sets `posAnchor` member field which is used by ProcessHeadersPOS + cfg.hd.SetPOSSync(true) // This needs to be called after SetHeaderToDownloadPOS because SetHeaderToDownloadPOS sets `posAnchor` member field which is used by ProcessHeadersPOS //nolint headerCollector := etl.NewCollector(s.LogPrefix(), cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize)) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 7fc99f5d1ae..a1ae9a1b85c 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -228,7 +228,6 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body // if the block is not in range of maxForkDepth from head then we do not validate it. if abs64(int64(fv.currentHeight)-header.Number.Int64()) > maxForkDepth { status = remote.EngineStatus_ACCEPTED - fmt.Println("not in range") return } // Let's assemble the side fork backwards diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 131b3b641ee..1f9d9f83808 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -437,7 +437,7 @@ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeo request = &HeaderRequest{ Anchor: anchor, Hash: anchor.parentHash, - Number: 0, // Since posAnchor may be an estimate, do not specify it here + Number: anchor.blockHeight - 1, Length: 192, Skip: 0, Reverse: true, From cd8b10f57e5f0a25a40cc2df312c2560494f1a1f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 22 Jul 2022 16:54:05 +0700 Subject: [PATCH 48/72] snapshot merger: smaller interface (#4786) * save * save * save --- .github/workflows/ci.yml | 2 +- turbo/app/snapshots.go | 8 +- turbo/snapshotsync/block_snapshots.go | 144 ++++++++++----------- turbo/snapshotsync/block_snapshots_test.go | 4 +- 4 files changed, 76 insertions(+), 82 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 12ba6dac513..018f9934933 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -116,7 +116,7 @@ jobs: - name: parse hive output run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results - - name: archive hive results + - name: archive hive results uses: actions/upload-artifact@v3 if: always() with: diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index b9c73f302d7..13a1155c629 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -130,13 +130,7 @@ func doIndicesCommand(cliCtx *cli.Context) error { if rebuild { cfg := ethconfig.NewSnapCfg(true, true, false) - workers := runtime.GOMAXPROCS(-1) - 1 - if workers < 1 { - workers = 1 - } - if workers > 4 { - workers = 4 - } + workers := cmp.InRange(1, 4, runtime.GOMAXPROCS(-1)-1) if err := rebuildIndices(ctx, chainDB, cfg, dirs, from, workers); err != nil { log.Error("Error", "err", err) } diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index fc7850eca7a..1ba201f4cc0 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -482,6 +482,17 @@ func (s *RoSnapshots) Reopen() error { return nil } + +func (s *RoSnapshots) Ranges() (ranges []Range) { + _ = s.Headers.View(func(segments []*HeaderSegment) error { + for _, sn := range segments { + ranges = append(ranges, sn.ranges) + } + return nil + }) + return ranges +} + func (s *RoSnapshots) ReopenSegments() error { s.Headers.lock.Lock() defer s.Headers.lock.Unlock() @@ -614,6 +625,25 @@ func (s *RoSnapshots) ViewTxs(blockNum uint64, f func(sn *TxnSegment) error) (fo return s.Txs.ViewSegment(blockNum, f) } +func buildIdx(ctx context.Context, sn snap.FileInfo, chainID uint256.Int, tmpDir string, lvl log.Lvl) error { + switch sn.T { + case snap.Headers: + if err := HeadersIdx(ctx, sn.Path, sn.From, tmpDir, lvl); err != nil { + return err + } + case snap.Bodies: + if err := BodiesIdx(ctx, sn.Path, sn.From, tmpDir, lvl); err != nil { + return err + } + case snap.Transactions: + dir, _ := filepath.Split(sn.Path) + if err := TransactionsIdx(ctx, chainID, sn.From, sn.To, dir, tmpDir, lvl); err != nil { + return err + } + } + return nil +} + func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpDir string, from uint64, workers int, lvl log.Lvl) error { log.Log(lvl, "[snapshots] Build indices", "from", from) logEvery := time.NewTicker(20 * time.Second) @@ -987,11 +1017,11 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return fmt.Errorf("reopen: %w", err) } merger := NewMerger(tmpDir, workers, lvl, chainID, notifier) - ranges := merger.FindMergeRanges(snapshots) - if len(ranges) == 0 { + rangesToMerge := merger.FindMergeRanges(snapshots.Ranges()) + if len(rangesToMerge) == 0 { return nil } - err := merger.Merge(ctx, snapshots, ranges, snapshots.Dir(), true) + err := merger.Merge(ctx, snapshots, rangesToMerge, snapshots.Dir(), true) if err != nil { return err } @@ -1000,7 +1030,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 } var downloadRequest []DownloadRequest - for _, r := range ranges { + for _, r := range rangesToMerge { downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) } @@ -1019,18 +1049,18 @@ func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, t return nil } func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl) error { - segmentFile := filepath.Join(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Headers)) - if err := DumpHeaders(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { + f, _ := snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Headers)) + if err := DumpHeaders(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpHeaders: %w", err) } - segmentFile = filepath.Join(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Bodies)) - if err := DumpBodies(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { + f, _ = snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Bodies)) + if err := DumpBodies(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpBodies: %w", err) } - segmentFile = filepath.Join(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Transactions)) - if _, err := DumpTxs(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { + f, _ = snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Transactions)) + if _, err := DumpTxs(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpTxs: %w", err) } @@ -1048,7 +1078,7 @@ func DumpTxs(ctx context.Context, db kv.RoDB, segmentFile, tmpDir string, blockF chainConfig := tool.ChainConfigFromDB(db) chainID, _ := uint256.FromBig(chainConfig.ChainID) - f, err := compress.NewCompressor(ctx, "Transactions", segmentFile, tmpDir, compress.MinPatternScore, workers, lvl) + f, err := compress.NewCompressor(ctx, "Snapshots Txs", segmentFile, tmpDir, compress.MinPatternScore, workers, lvl) if err != nil { return 0, fmt.Errorf("NewCompressor: %w, %s", err, segmentFile) } @@ -1222,7 +1252,7 @@ func DumpHeaders(ctx context.Context, db kv.RoDB, segmentFilePath, tmpDir string logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - f, err := compress.NewCompressor(ctx, "Headers", segmentFilePath, tmpDir, compress.MinPatternScore, workers, lvl) + f, err := compress.NewCompressor(ctx, "Snapshots Headers", segmentFilePath, tmpDir, compress.MinPatternScore, workers, lvl) if err != nil { return err } @@ -1285,7 +1315,7 @@ func DumpBodies(ctx context.Context, db kv.RoDB, segmentFilePath, tmpDir string, logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - f, err := compress.NewCompressor(ctx, "Bodies", segmentFilePath, tmpDir, compress.MinPatternScore, workers, lvl) + f, err := compress.NewCompressor(ctx, "Snapshots Bodies", segmentFilePath, tmpDir, compress.MinPatternScore, workers, lvl) if err != nil { return err } @@ -1715,33 +1745,35 @@ type Range struct { func (r Range) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } -func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []Range) { - for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { - sn := snapshots.Headers.segments[i] - if sn.ranges.to-sn.ranges.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg +func (*Merger) FindMergeRanges(currentRanges []Range) (toMerge []Range) { + for i := len(currentRanges) - 1; i > 0; i-- { + r := currentRanges[i] + if r.to-r.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg continue } for _, span := range []uint64{500_000, 100_000, 10_000} { - if sn.ranges.to%span != 0 { + if r.to%span != 0 { continue } - if sn.ranges.to-sn.ranges.from == span { + if r.to-r.from == span { break } - aggFrom := sn.ranges.to - span - res = append(res, Range{from: aggFrom, to: sn.ranges.to}) - for snapshots.Headers.segments[i].ranges.from > aggFrom { + aggFrom := r.to - span + toMerge = append(toMerge, Range{from: aggFrom, to: r.to}) + for currentRanges[i].from > aggFrom { i-- } break } } - slices.SortFunc(res, func(i, j Range) bool { return i.from < j.from }) - return res + slices.SortFunc(toMerge, func(i, j Range) bool { return i.from < j.from }) + return toMerge } -func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeHeaders, toMergeBodies, toMergeTxs []string, err error) { - err = snapshots.Headers.View(func(hSegments []*HeaderSegment) error { + +func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (map[snap.Type][]string, error) { + toMerge := map[snap.Type][]string{} + err := snapshots.Headers.View(func(hSegments []*HeaderSegment) error { return snapshots.Bodies.View(func(bSegments []*BodySegment) error { return snapshots.Txs.View(func(tSegments []*TxnSegment) error { for i, sn := range hSegments { @@ -1751,17 +1783,16 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeH if sn.ranges.to > to { break } - - toMergeHeaders = append(toMergeHeaders, hSegments[i].seg.FilePath()) - toMergeBodies = append(toMergeBodies, bSegments[i].seg.FilePath()) - toMergeTxs = append(toMergeTxs, tSegments[i].Seg.FilePath()) + toMerge[snap.Headers] = append(toMerge[snap.Headers], hSegments[i].seg.FilePath()) + toMerge[snap.Bodies] = append(toMerge[snap.Bodies], bSegments[i].seg.FilePath()) + toMerge[snap.Transactions] = append(toMerge[snap.Transactions], tSegments[i].Seg.FilePath()) } return nil }) }) }) - return + return toMerge, err } // Merge does merge segments in given ranges @@ -1773,42 +1804,18 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges defer logEvery.Stop() log.Log(m.lvl, "[snapshots] Merge segments", "ranges", fmt.Sprintf("%v", mergeRanges)) for _, r := range mergeRanges { - toMergeHeaders, toMergeBodies, toMergeTxs, err := m.filesByRange(snapshots, r.from, r.to) + toMerge, err := m.filesByRange(snapshots, r.from, r.to) if err != nil { return err } - { - segFilePath := filepath.Join(snapDir, snap.SegmentFileName(r.from, r.to, snap.Bodies)) - if err := m.merge(ctx, toMergeBodies, segFilePath, logEvery); err != nil { - return fmt.Errorf("mergeByAppendSegments: %w", err) - } - if doIndex { - if err := BodiesIdx(ctx, segFilePath, r.from, m.tmpDir, m.lvl); err != nil { - return fmt.Errorf("BodiesIdx: %w", err) - } - } - } - - { - segFilePath := filepath.Join(snapDir, snap.SegmentFileName(r.from, r.to, snap.Headers)) - if err := m.merge(ctx, toMergeHeaders, segFilePath, logEvery); err != nil { - return fmt.Errorf("mergeByAppendSegments: %w", err) - } - if doIndex { - if err := HeadersIdx(ctx, segFilePath, r.from, m.tmpDir, m.lvl); err != nil { - return fmt.Errorf("HeadersIdx: %w", err) - } - } - } - - { - segFilePath := filepath.Join(snapDir, snap.SegmentFileName(r.from, r.to, snap.Transactions)) - if err := m.merge(ctx, toMergeTxs, segFilePath, logEvery); err != nil { + for _, t := range snap.AllSnapshotTypes { + f, _ := snap.ParseFileName(snapDir, snap.SegmentFileName(r.from, r.to, t)) + if err := m.merge(ctx, toMerge[t], f.Path, logEvery); err != nil { return fmt.Errorf("mergeByAppendSegments: %w", err) } if doIndex { - if err := TransactionsIdx(ctx, m.chainID, r.from, r.to, snapDir, m.tmpDir, m.lvl); err != nil { - return fmt.Errorf("TransactionsIdx: %w", err) + if err := buildIdx(ctx, f, m.chainID, m.tmpDir, m.lvl); err != nil { + return err } } } @@ -1820,17 +1827,10 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges m.notifier.OnNewSnapshot() time.Sleep(1 * time.Second) // i working on blocking API - to ensure client does not use } - - if err := m.removeOldFiles(toMergeHeaders, snapDir); err != nil { - return err - } - - if err := m.removeOldFiles(toMergeBodies, snapDir); err != nil { - return err - } - - if err := m.removeOldFiles(toMergeTxs, snapDir); err != nil { - return err + for _, t := range snap.AllSnapshotTypes { + if err := m.removeOldFiles(toMerge[t], snapDir); err != nil { + return err + } } } log.Log(m.lvl, "[snapshots] Merge done", "from", mergeRanges[0].from) diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index fb1c8f8ab83..bcb75e7695a 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -77,7 +77,7 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, uint256.Int{}, nil) - ranges := merger.FindMergeRanges(s) + ranges := merger.FindMergeRanges(s.Ranges()) require.True(len(ranges) > 0) err := merger.Merge(context.Background(), s, ranges, s.Dir(), false) require.NoError(err) @@ -92,7 +92,7 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, uint256.Int{}, nil) - ranges := merger.FindMergeRanges(s) + ranges := merger.FindMergeRanges(s.Ranges()) require.True(len(ranges) == 0) err := merger.Merge(context.Background(), s, ranges, s.Dir(), false) require.NoError(err) From d2bbf22e3d4dc4b845d881d08a52cd7231a433ff Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 13:26:05 +0100 Subject: [PATCH 49/72] hive - run on successful CI (#4791) * feat(ci): run hive tests as part of CI * feat(ci): hive CI tidy up (#2) run hive on successful CI only run on non-draft PR only --- .github/workflows/ci.yml | 13 +++++++++-- .github/workflows/hive-results.yml | 2 +- .github/workflows/hive.yml | 35 ++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/hive.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 018f9934933..26a80271b30 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Continuous integration +name: CI on: push: branches: @@ -10,8 +10,15 @@ on: - devel - alpha - stable + types: + - opened + - reopened + - synchronize + - ready_for_review + jobs: tests: + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} strategy: matrix: os: [ ubuntu-20.04, macos-11 ] # list of os: https://github.com/actions/virtual-environments @@ -59,6 +66,7 @@ jobs: run: make test tests-windows: + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} strategy: matrix: os: [ windows-2022 ] @@ -97,6 +105,7 @@ jobs: run: .\wmake.ps1 test docker: + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 @@ -121,4 +130,4 @@ jobs: if: always() with: name: hive-ci-output - path: results/*.xml + path: results/*.xml diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml index c67dc5fcfab..b23dc2ce478 100644 --- a/.github/workflows/hive-results.yml +++ b/.github/workflows/hive-results.yml @@ -2,7 +2,7 @@ name: Hive results on: workflow_run: - workflows: ["Continuous integration", "ci"] + workflows: ["Hive"] types: - completed diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml new file mode 100644 index 00000000000..3705a1fe9ca --- /dev/null +++ b/.github/workflows/hive.yml @@ -0,0 +1,35 @@ +name: Hive +on: + workflow_run: + workflows: ["CI"] + types: + - completed + +jobs: + hive: + runs-on: ubuntu-20.04 + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 # fetch git tags for "git describe" + + - name: build erigon image + run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + # check with root permissions, should be cached from previous build + - name: build erigon image (root permissions) + run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + - name: run hive + run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 + + - name: parse hive output + run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results + + - name: archive hive results + uses: actions/upload-artifact@v3 + if: always() + with: + name: hive-ci-output + path: results/*.xml From d6001225e7c9e13eb5a9ab34d12d60403c861df6 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 13:26:18 +0100 Subject: [PATCH 50/72] feat(makefile): add documentation and coverage command (#4792) --- .gitignore | 1 + Makefile | 35 ++++++++++++++++++++++++++++++++--- README.md | 2 ++ 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index c86d201a6e4..1e70d76a79c 100644 --- a/.gitignore +++ b/.gitignore @@ -76,4 +76,5 @@ go.work docker-compose.*.yml .env +coverage.out diff --git a/Makefile b/Makefile index 061abf375a5..7866cf895a1 100644 --- a/Makefile +++ b/Makefile @@ -33,12 +33,14 @@ GOTEST = GODEBUG=cgocheck=0 $(GO) test $(GO_FLAGS) ./... -p 2 default: all +## go-version: print and verify go version go-version: @if [ $(shell $(GO) version | cut -c 16-17) -lt 18 ]; then \ echo "minimum required Golang version is 1.18"; \ exit 1 ;\ fi +## validate_docker_build_args: ensure docker build args are valid validate_docker_build_args: @echo "Docker build args:" @echo " DOCKER_UID: $(DOCKER_UID)" @@ -51,6 +53,7 @@ validate_docker_build_args: fi @echo "✔️ host OS user exists: $(shell id -nu $(DOCKER_UID))" +## docker: validate, update submodules and build with docker docker: validate_docker_build_args git-submodules DOCKER_BUILDKIT=1 $(DOCKER) build -t ${DOCKER_TAG} \ --build-arg "BUILD_DATE=$(shell date -Iseconds)" \ @@ -67,16 +70,18 @@ ifdef XDG_DATA_HOME endif xdg_data_home_subdirs = $(xdg_data_home)/erigon $(xdg_data_home)/erigon-grafana $(xdg_data_home)/erigon-prometheus +## setup_xdg_data_home: TODO setup_xdg_data_home: mkdir -p $(xdg_data_home_subdirs) ls -aln $(xdg_data_home) | grep -E "472.*0.*erigon-grafana" || sudo chown -R 472:0 $(xdg_data_home)/erigon-grafana @echo "✔️ xdg_data_home setup" @ls -al $(xdg_data_home) +## docker-compose: validate build args, setup xdg data home, and run docker-compose up docker-compose: validate_docker_build_args setup_xdg_data_home docker-compose up -# debug build allows see C stack traces, run it with GOTRACEBACK=crash. You don't need debug build for C pit for profiling. To profile C code use SETCGOTRCKEBACK=1 +## dbg debug build allows see C stack traces, run it with GOTRACEBACK=crash. You don't need debug build for C pit for profiling. To profile C code use SETCGOTRCKEBACK=1 dbg: $(GO_DBG_BUILD) -o $(GOBIN)/ ./cmd/... @@ -86,8 +91,10 @@ dbg: @cd ./cmd/$* && $(GOBUILD) -o $(GOBIN)/$* @echo "Run \"$(GOBIN)/$*\" to launch $*." +## geth: run erigon (TODO: remove?) geth: erigon +## erigon: build erigon erigon: go-version erigon.cmd @rm -f $(GOBIN)/tg # Remove old binary to prevent confusion where users still use it because of the scripts @@ -108,8 +115,10 @@ COMMANDS += txpool # build each command using %.cmd rule $(COMMANDS): %: %.cmd +## all: run erigon with all commands all: erigon $(COMMANDS) +## db-tools: build db tools db-tools: git-submodules @echo "Building db-tools" @@ -126,23 +135,29 @@ db-tools: git-submodules cp libmdbx/mdbx_stat $(GOBIN) @echo "Run \"$(GOBIN)/mdbx_stat -h\" to get info about mdbx db file." +## test: run unit tests with a 50s timeout test: $(GOTEST) --timeout 50s +## test-integration: run integration tests with a 30m timeout test-integration: $(GOTEST) --timeout 30m -tags $(BUILD_TAGS),integration +## lint: run golangci-lint with .golangci.yml config file lint: @./build/bin/golangci-lint run --config ./.golangci.yml +## lintci: run golangci-lint (additionally outputs message before run) lintci: @echo "--> Running linter for code" @./build/bin/golangci-lint run --config ./.golangci.yml +## lintci-deps: (re)installs golangci-lint to build/bin/golangci-lint lintci-deps: rm -f ./build/bin/golangci-lint curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.0 +## clean: cleans the go cache, build dir, libmdbx db dir clean: go clean -cache rm -fr build/* @@ -151,6 +166,7 @@ clean: # The devtools target installs tools required for 'go generate'. # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. +## devtools: installs dev tools (and checks for npm installation etc.) devtools: # Notice! If you adding new binary - add it also to cmd/hack/binary-deps/main.go file $(GOBUILD) -o $(GOBIN)/go-bindata github.com/kevinburke/go-bindata/go-bindata @@ -165,16 +181,20 @@ devtools: @type "solc" 2> /dev/null || echo 'Please install solc' @type "protoc" 2> /dev/null || echo 'Please install protoc' +## bindings: generate test contracts and core contracts bindings: PATH=$(GOBIN):$(PATH) go generate ./tests/contracts/ PATH=$(GOBIN):$(PATH) go generate ./core/state/contracts/ +## prometheus: run prometheus and grafana with docker-compose prometheus: docker-compose up prometheus grafana +## escape: run escape path={path} to check for memory leaks e.g. run escape path=cmd/erigon escape: cd $(path) && go test -gcflags "-m -m" -run none -bench=BenchmarkJumpdest* -benchmem -memprofile mem.out +## git-submodules: update git submodules git-submodules: @[ -d ".git" ] || (echo "Not a git repository" && exit 1) @echo "Updating git submodules" @@ -189,7 +209,7 @@ ERIGON_USER_UID ?= 3473 ERIGON_USER_GID ?= 3473 ERIGON_USER_XDG_DATA_HOME ?= ~$(ERIGON_USER)/.local/share -# create "erigon" user +## user_linux: create "erigon" user (Linux) user_linux: ifdef DOCKER sudo groupadd -f docker @@ -203,7 +223,7 @@ ifdef DOCKER endif sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME) -# create "erigon" user +## user_macos: create "erigon" user (MacOS) user_macos: sudo dscl . -create /Users/$(ERIGON_USER) sudo dscl . -create /Users/$(ERIGON_USER) UserShell /bin/bash @@ -212,3 +232,12 @@ user_macos: sudo dscl . -create /Users/$(ERIGON_USER) NFSHomeDirectory /Users/$(ERIGON_USER) sudo dscl . -append /Groups/admin GroupMembership $(ERIGON_USER) sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME) + +## coverage: run code coverage report and output total coverage % +coverage: + @go test -coverprofile=coverage.out ./... > /dev/null 2>&1 && go tool cover -func coverage.out | grep total | awk '{print substr($$3, 1, length($$3)-1)}' + +## help: print commands help +help : Makefile + @sed -n 's/^##//p' $< + diff --git a/README.md b/README.md index 5a85d03e55a..5fdff12f27a 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,8 @@ Use `--datadir` to choose where to store data. Use `--chain=bor-mainnet` for Polygon Mainnet and `--chain=mumbai` for Polygon Mumbai. +Running `make help` will list and describe the convenience commands available in the [Makefile](./Makefile) + ### Modularity Erigon by default is "all in one binary" solution, but it's possible start TxPool as separated processes. From 37ba45a627a5c2d905557d068726e33d6b1e93ea Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 16:09:50 +0100 Subject: [PATCH 51/72] feat(ci): badges for hive and code coverage on devel (#4793) * feat(ci): badges for hive and code coverage on devel * feat(ci): hive CI tidy up (#2) run hive on successful CI only run on non-draft PR only --- .github/workflows/ci.yml | 36 ++++++++++++++++++- .github/workflows/coverage.yml | 55 ++++++++++++++++++++++++++++++ .github/workflows/hive-results.yml | 46 ++++++++++++++++++++++--- .github/workflows/hive.yml | 35 ------------------- README.md | 4 +++ 5 files changed, 136 insertions(+), 40 deletions(-) create mode 100644 .github/workflows/coverage.yml delete mode 100644 .github/workflows/hive.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 26a80271b30..9781d2e3441 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -119,6 +119,25 @@ jobs: - name: sudo make docker run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + hive: + needs: + - tests + - tests-windows + - docker + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 # fetch git tags for "git describe" + + - name: build erigon image + run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + # check with root permissions, should be cached from previous build + - name: build erigon image (root permissions) + run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + - name: run hive run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 @@ -130,4 +149,19 @@ jobs: if: always() with: name: hive-ci-output - path: results/*.xml + path: results/*.xml + + event_file: + needs: + - tests + - tests-windows + - docker + name: archive event file + runs-on: ubuntu-latest + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} + steps: + - name: upload + uses: actions/upload-artifact@v2 + with: + name: event file + path: ${{ github.event_path }} diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 00000000000..72deaea1d53 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,55 @@ +name: Coverage +on: + push: + branches: + - devel + +jobs: + coverage: + runs-on: ubuntu-20.04 + + steps: + - uses: actions/checkout@v3 + - run: git submodule update --init --recursive --force + + - uses: actions/setup-go@v3 + with: + go-version: 1.18.x + + - name: install dependencies on Linux + if: runner.os == 'Linux' + run: sudo apt update && sudo apt install build-essential + + - name: run coverage + run: echo "COVERAGE=$(make coverage)" >> $GITHUB_ENV + + - name: set badge color + shell: bash + run: | + if [ ${{ env.COVERAGE }} -lt 40 ] + then + echo "BADGE_COLOR=800000" >> $GITHUB_ENV + elif [ ${{ env.COVERAGE }} -lt 75 ] + then + echo "BADGE_COLOR=696969" >> $GITHUB_ENV + else + echo "BADGE_COLOR=31c653" >> $GITHUB_ENV + fi + + - name: create badge + uses: emibcn/badge-action@d6f51ff11b5c3382b3b88689ae2d6db22d9737d1 + with: + label: Coverage + status: ${{ env.COVERAGE }} + color: ${{ env.BADGE_COLOR }} + path: badge.svg + + - name: upload badge to gist + if: > + github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'devel' || + github.event_name != 'workflow_run' && github.ref == 'refs/heads/devel' + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9 + file: badge.svg \ No newline at end of file diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml index b23dc2ce478..8147393e090 100644 --- a/.github/workflows/hive-results.yml +++ b/.github/workflows/hive-results.yml @@ -2,7 +2,7 @@ name: Hive results on: workflow_run: - workflows: ["Hive"] + workflows: ["CI"] types: - completed @@ -12,8 +12,13 @@ jobs: runs-on: ubuntu-latest if: github.event.workflow_run.conclusion != 'skipped' + permissions: + checks: write + pull-requests: write+ + actions: read + steps: - - name: Download and extract artifacts + - name: download and extract artifacts env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} run: | @@ -28,10 +33,43 @@ jobs: unzip -d "$name" "$name.zip" done - - name: Publish hive test results + - name: publish hive test results uses: EnricoMi/publish-unit-test-result-action@v1 with: commit: ${{ github.event.workflow_run.head_sha }} event_file: artifacts/Event File/event.json event_name: ${{ github.event.workflow_run.event }} - files: "artifacts/**/*.xml" \ No newline at end of file + files: "artifacts/**/*.xml" + + - name: set badge color + shell: bash + run: | + case ${{ fromJSON( steps.test-results.outputs.json ).conclusion }} in + success) + echo "BADGE_COLOR=31c653" >> $GITHUB_ENV + ;; + failure) + echo "BADGE_COLOR=800000" >> $GITHUB_ENV + ;; + neutral) + echo "BADGE_COLOR=696969" >> $GITHUB_ENV + ;; + esac + + - name: create badge + uses: emibcn/badge-action@d6f51ff11b5c3382b3b88689ae2d6db22d9737d1 + with: + label: Hive + status: '${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.tests }} tests, ${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.runs }} runs: ${{ fromJSON( steps.test-results.outputs.json ).conclusion }}' + color: ${{ env.BADGE_COLOR }} + path: badge.svg + + - name: upload badge to gist + if: > + github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'devel' || + github.event_name != 'workflow_run' && github.ref == 'refs/heads/devel' + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20 + file: badge.svg \ No newline at end of file diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml deleted file mode 100644 index 3705a1fe9ca..00000000000 --- a/.github/workflows/hive.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Hive -on: - workflow_run: - workflows: ["CI"] - types: - - completed - -jobs: - hive: - runs-on: ubuntu-20.04 - if: ${{ github.event.workflow_run.conclusion == 'success' }} - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 # fetch git tags for "git describe" - - - name: build erigon image - run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker - - # check with root permissions, should be cached from previous build - - name: build erigon image (root permissions) - run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker - - - name: run hive - run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 - - - name: parse hive output - run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results - - - name: archive hive results - uses: actions/upload-artifact@v3 - if: always() - with: - name: hive-ci-output - path: results/*.xml diff --git a/README.md b/README.md index 5fdff12f27a..760f5fdb034 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,10 @@ Erigon is an implementation of Ethereum (aka "Ethereum client"), on the efficien ![Build status](https://github.com/ledgerwatch/erigon/actions/workflows/ci.yml/badge.svg) +![Coverage](https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9/raw/19f7838ede42d896370aff17753346a01fc5d4ad/badge.svg) + +![Hive](https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20/raw/51f5a73aa31c5ae2f199969321161bcb9abc5c10/badge.svg) + - [System Requirements](#system-requirements) From 0d979e18aa996c5df728810c0125b4e674407d16 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 16:18:45 +0100 Subject: [PATCH 52/72] fix(readme): latest badge.svg gist links (#4795) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 760f5fdb034..9f2810c1a1c 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ Erigon is an implementation of Ethereum (aka "Ethereum client"), on the efficien ![Build status](https://github.com/ledgerwatch/erigon/actions/workflows/ci.yml/badge.svg) -![Coverage](https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9/raw/19f7838ede42d896370aff17753346a01fc5d4ad/badge.svg) +![Coverage](https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9/raw/badge.svg) -![Hive](https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20/raw/51f5a73aa31c5ae2f199969321161bcb9abc5c10/badge.svg) +![Hive](https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20/raw/badge.svg) From 01641e3900b21460a018c7a4542e72d4303400a9 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 17:05:08 +0100 Subject: [PATCH 53/72] fix(ci): hive results workflow syntax (#4796) --- .github/workflows/hive-results.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml index 8147393e090..d6191e38449 100644 --- a/.github/workflows/hive-results.yml +++ b/.github/workflows/hive-results.yml @@ -14,7 +14,7 @@ jobs: permissions: checks: write - pull-requests: write+ + pull-requests: write actions: read steps: From dd4bae789b990ed315ea6c607a8c910600f87337 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 09:33:45 +0700 Subject: [PATCH 54/72] Pool: parse rlp chain id for non-legacy transactions --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f96580dbf77..cb13e6b794f 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22 + github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 3447e0747c9..716a031501d 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22 h1:cGxOEtCnkVzX+RcGQbQHiDuV8dQHnGqcwTFl9q8Hnkg= -github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8 h1:rP2lzEZfJsakfZGDr7yaNzuuLZ7zDZw6LMEU/cTl+8o= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 6a759a34f81ae9b551022d963630149fca7d4a78 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 10:06:35 +0700 Subject: [PATCH 55/72] pool: metrics (#4800) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cb13e6b794f..56c600a4d8b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8 + github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 716a031501d..8d9b8062a76 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8 h1:rP2lzEZfJsakfZGDr7yaNzuuLZ7zDZw6LMEU/cTl+8o= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f h1:fLIc9erXDu+CmtjZdiDX3cfNMIfhkolkKop+LQFwW2c= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 62873649d9764f184a511f0dcd73a6a131bd1f7f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 10:09:24 +0700 Subject: [PATCH 56/72] lint up 47.2 (#4801) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7866cf895a1..c666796d954 100644 --- a/Makefile +++ b/Makefile @@ -155,7 +155,7 @@ lintci: ## lintci-deps: (re)installs golangci-lint to build/bin/golangci-lint lintci-deps: rm -f ./build/bin/golangci-lint - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.0 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.2 ## clean: cleans the go cache, build dir, libmdbx db dir clean: From 95f0338ddb0e8f0f05be31f11551b4c5dc302ed8 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 10:13:13 +0700 Subject: [PATCH 57/72] Pool: parse rlp chain id for non-legacy transactions #4802 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 56c600a4d8b..c733dbefb05 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f + github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 8d9b8062a76..38d9a79d686 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f h1:fLIc9erXDu+CmtjZdiDX3cfNMIfhkolkKop+LQFwW2c= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e h1:4tZnz9FCTIalm6VtGXBZX713Y+lcHqpMK6L3wP7OSHY= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 68e35417fc134022edada387796201af36bfe77f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 11:09:16 +0700 Subject: [PATCH 58/72] RetireBlocks: encapsulate delete logic --- core/rawdb/accessors_chain.go | 65 ++++++++++++++++++++++++ eth/stagedsync/stage.go | 71 --------------------------- eth/stagedsync/stage_execute.go | 10 ++-- eth/stagedsync/stage_senders.go | 24 +++------ turbo/app/snapshots.go | 39 ++++++--------- turbo/snapshotsync/block_snapshots.go | 20 ++++++++ 6 files changed, 113 insertions(+), 116 deletions(-) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index f27f11fc96e..b4403770621 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -25,6 +25,7 @@ import ( "math/big" "time" + common2 "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" @@ -1643,3 +1644,67 @@ func WriteSnapshots(tx kv.RwTx, list map[string]string) error { } return nil } + +// PruneTable has `limit` parameter to avoid too large data deletes per one sync cycle - better delete by small portions to reduce db.FreeList size +func PruneTable(tx kv.RwTx, table string, pruneTo uint64, ctx context.Context, limit int) error { + c, err := tx.RwCursor(table) + + if err != nil { + return fmt.Errorf("failed to create cursor for pruning %w", err) + } + defer c.Close() + + i := 0 + for k, _, err := c.First(); k != nil; k, _, err = c.Next() { + if err != nil { + return err + } + i++ + if i > limit { + break + } + + blockNum := binary.BigEndian.Uint64(k) + if blockNum >= pruneTo { + break + } + select { + case <-ctx.Done(): + return common2.ErrStopped + default: + } + if err = c.DeleteCurrent(); err != nil { + return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) + } + } + return nil +} + +func PruneTableDupSort(tx kv.RwTx, table string, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { + c, err := tx.RwCursorDupSort(table) + if err != nil { + return fmt.Errorf("failed to create cursor for pruning %w", err) + } + defer c.Close() + + for k, _, err := c.First(); k != nil; k, _, err = c.NextNoDup() { + if err != nil { + return fmt.Errorf("failed to move %s cleanup cursor: %w", table, err) + } + blockNum := binary.BigEndian.Uint64(k) + if blockNum >= pruneTo { + break + } + select { + case <-logEvery.C: + log.Info(fmt.Sprintf("[%s]", logPrefix), "table", table, "block", blockNum) + case <-ctx.Done(): + return common2.ErrStopped + default: + } + if err = c.DeleteCurrentDuplicates(); err != nil { + return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) + } + } + return nil +} diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index 5224e3c80c5..3565fcdb7ca 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -1,16 +1,9 @@ package stagedsync import ( - "context" - "encoding/binary" - "fmt" - "time" - - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/log/v3" ) // ExecFunc is the execution function for the stage to move forward. @@ -108,67 +101,3 @@ func (s *PruneState) Done(db kv.Putter) error { func (s *PruneState) DoneAt(db kv.Putter, blockNum uint64) error { return stages.SaveStagePruneProgress(db, s.ID, blockNum) } - -// PruneTable has `limit` parameter to avoid too large data deletes per one sync cycle - better delete by small portions to reduce db.FreeList size -func PruneTable(tx kv.RwTx, table string, pruneTo uint64, ctx context.Context, limit int) error { - c, err := tx.RwCursor(table) - - if err != nil { - return fmt.Errorf("failed to create cursor for pruning %w", err) - } - defer c.Close() - - i := 0 - for k, _, err := c.First(); k != nil; k, _, err = c.Next() { - if err != nil { - return err - } - i++ - if i > limit { - break - } - - blockNum := binary.BigEndian.Uint64(k) - if blockNum >= pruneTo { - break - } - select { - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - if err = c.DeleteCurrent(); err != nil { - return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) - } - } - return nil -} - -func PruneTableDupSort(tx kv.RwTx, table string, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { - c, err := tx.RwCursorDupSort(table) - if err != nil { - return fmt.Errorf("failed to create cursor for pruning %w", err) - } - defer c.Close() - - for k, _, err := c.First(); k != nil; k, _, err = c.NextNoDup() { - if err != nil { - return fmt.Errorf("failed to move %s cleanup cursor: %w", table, err) - } - blockNum := binary.BigEndian.Uint64(k) - if blockNum >= pruneTo { - break - } - select { - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s]", logPrefix), "table", table, "block", blockNum) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - if err = c.DeleteCurrentDuplicates(); err != nil { - return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) - } - } - return nil -} diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 0bb8ace725d..47856806474 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -599,25 +599,25 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con defer logEvery.Stop() if cfg.prune.History.Enabled() { - if err = PruneTableDupSort(tx, kv.AccountChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + if err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { return err } - if err = PruneTableDupSort(tx, kv.StorageChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + if err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { return err } } if cfg.prune.Receipts.Enabled() { - if err = PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { + if err = rawdb.PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { return err } // LogIndex.Prune will read everything what not pruned here - if err = PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { + if err = rawdb.PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { return err } } if cfg.prune.CallTraces.Enabled() { - if err = PruneTableDupSort(tx, kv.CallTraceSet, logPrefix, cfg.prune.CallTraces.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + if err = rawdb.PruneTableDupSort(tx, kv.CallTraceSet, logPrefix, cfg.prune.CallTraces.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { return err } } diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 0a9db4af808..a3f6b4ee47d 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -386,26 +386,18 @@ func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Co defer tx.Rollback() } + sn := cfg.blockRetire.Snapshots() // With snapsync - can prune old data only after snapshot for this data created: CanDeleteTo() - if cfg.blockRetire.Snapshots() != nil && cfg.blockRetire.Snapshots().Cfg().Enabled { - if cfg.blockRetire.Snapshots().Cfg().Produce { - if !cfg.blockRetire.Snapshots().Cfg().KeepBlocks { - canDeleteTo := snapshotsync.CanDeleteTo(s.ForwardProgress, cfg.blockRetire.Snapshots()) - if _, _, err := rawdb.DeleteAncientBlocks(tx, canDeleteTo, 100); err != nil { - return nil - } - if err = PruneTable(tx, kv.Senders, canDeleteTo, ctx, 100); err != nil { - return err - } - } - - if err := retireBlocksInSingleBackgroundThread(s, cfg, ctx); err != nil { - return fmt.Errorf("retireBlocksInSingleBackgroundThread: %w", err) - } + if sn != nil && sn.Cfg().Enabled && sn.Cfg().Produce { + if err := cfg.blockRetire.PruneAncientBlocks(tx); err != nil { + return err + } + if err := retireBlocksInSingleBackgroundThread(s, cfg, ctx); err != nil { + return fmt.Errorf("retireBlocksInSingleBackgroundThread: %w", err) } } else if cfg.prune.TxIndex.Enabled() { to := cfg.prune.TxIndex.PruneTo(s.ForwardProgress) - if err = PruneTable(tx, kv.Senders, to, ctx, 1_000); err != nil { + if err = rawdb.PruneTable(tx, kv.Senders, to, ctx, 1_000); err != nil { return err } } diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index 13a1155c629..92cda81a06f 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -21,9 +21,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/cmd/hack/tool" "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" @@ -234,8 +232,8 @@ func doRetireCommand(cliCtx *cli.Context) error { to := cliCtx.Uint64(SnapshotToFlag.Name) every := cliCtx.Uint64(SnapshotEveryFlag.Name) - chainDB := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen() - defer chainDB.Close() + db := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen() + defer db.Close() cfg := ethconfig.NewSnapCfg(true, true, true) snapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) @@ -244,21 +242,17 @@ func doRetireCommand(cliCtx *cli.Context) error { } workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) - br := snapshotsync.NewBlockRetire(workers, dirs.Tmp, snapshots, chainDB, nil, nil) + br := snapshotsync.NewBlockRetire(workers, dirs.Tmp, snapshots, db, nil, nil) log.Info("Params", "from", from, "to", to, "every", every) for i := from; i < to; i += every { if err := br.RetireBlocks(ctx, i, i+every, log.LvlInfo); err != nil { panic(err) } - if err := chainDB.Update(ctx, func(tx kv.RwTx) error { - progress, _ := stages.GetStageProgress(tx, stages.Headers) - canDeleteTo := snapshotsync.CanDeleteTo(progress, br.Snapshots()) - deletedFrom, deletedTo, err := rawdb.DeleteAncientBlocks(tx, canDeleteTo, 100) - if err != nil { - return nil + if err := db.Update(ctx, func(tx kv.RwTx) error { + if err := br.PruneAncientBlocks(tx); err != nil { + return err } - log.Info("Deleted blocks", "from", deletedFrom, "to", deletedTo) return nil }); err != nil { return err @@ -283,17 +277,17 @@ func doSnapshotCommand(cliCtx *cli.Context) error { dir.MustExist(filepath.Join(dirs.Snap, "db")) // this folder will be checked on existance - to understand that snapshots are ready dir.MustExist(dirs.Tmp) - chainDB := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).Readonly().MustOpen() - defer chainDB.Close() + db := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).Readonly().MustOpen() + defer db.Close() - if err := snapshotBlocks(ctx, chainDB, fromBlock, toBlock, segmentSize, dirs.Snap, dirs.Tmp); err != nil { + if err := snapshotBlocks(ctx, db, fromBlock, toBlock, segmentSize, dirs.Snap, dirs.Tmp); err != nil { log.Error("Error", "err", err) } return nil } -func rebuildIndices(ctx context.Context, chainDB kv.RoDB, cfg ethconfig.Snapshot, dirs datadir.Dirs, from uint64, workers int) error { - chainConfig := tool.ChainConfigFromDB(chainDB) +func rebuildIndices(ctx context.Context, db kv.RoDB, cfg ethconfig.Snapshot, dirs datadir.Dirs, from uint64, workers int) error { + chainConfig := tool.ChainConfigFromDB(db) chainID, _ := uint256.FromBig(chainConfig.ChainID) allSnapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) @@ -306,7 +300,7 @@ func rebuildIndices(ctx context.Context, chainDB kv.RoDB, cfg ethconfig.Snapshot return nil } -func snapshotBlocks(ctx context.Context, chainDB kv.RoDB, fromBlock, toBlock, blocksPerFile uint64, snapDir, tmpDir string) error { +func snapshotBlocks(ctx context.Context, db kv.RoDB, fromBlock, toBlock, blocksPerFile uint64, snapDir, tmpDir string) error { var last uint64 if toBlock > 0 { @@ -331,7 +325,7 @@ func snapshotBlocks(ctx context.Context, chainDB kv.RoDB, fromBlock, toBlock, bl return last, nil } - if err := chainDB.View(context.Background(), func(tx kv.Tx) (err error) { + if err := db.View(context.Background(), func(tx kv.Tx) (err error) { last, err = lastChunk(tx, blocksPerFile) return err }); err != nil { @@ -340,11 +334,8 @@ func snapshotBlocks(ctx context.Context, chainDB kv.RoDB, fromBlock, toBlock, bl } log.Info("Last body number", "last", last) - workers := runtime.GOMAXPROCS(-1) - 1 - if workers < 1 { - workers = 1 - } - if err := snapshotsync.DumpBlocks(ctx, fromBlock, last, blocksPerFile, tmpDir, snapDir, chainDB, workers, log.LvlInfo); err != nil { + workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) + if err := snapshotsync.DumpBlocks(ctx, fromBlock, last, blocksPerFile, tmpDir, snapDir, db, workers, log.LvlInfo); err != nil { return fmt.Errorf("DumpBlocks: %w", err) } return nil diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 1ba201f4cc0..c23c6da9068 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -32,6 +32,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" @@ -963,6 +964,25 @@ func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint chainID, _ := uint256.FromBig(chainConfig.ChainID) return retireBlocks(ctx, blockFrom, blockTo, *chainID, br.tmpDir, br.snapshots, br.db, br.workers, br.downloader, lvl, br.notifier) } + +func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx) error { + if !br.snapshots.cfg.KeepBlocks { + return nil + } + currentProgress, err := stages.GetStageProgress(tx, stages.Senders) + if err != nil { + return err + } + canDeleteTo := CanDeleteTo(currentProgress, br.snapshots) + if _, _, err := rawdb.DeleteAncientBlocks(tx, canDeleteTo, 100); err != nil { + return nil + } + if err := rawdb.PruneTable(tx, kv.Senders, canDeleteTo, context.Background(), 100); err != nil { + return err + } + return nil +} + func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProgress uint64, lvl log.Lvl) { if br.working.Load() { // go-routine is still working From d2389a1f26915e47deab2bde686f721406b89600 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Sat, 23 Jul 2022 18:02:31 +0200 Subject: [PATCH 59/72] Sepolia MergeNetsplit block (#4804) --- core/forkid/forkid_test.go | 10 ++++++++++ params/chainspecs/sepolia.json | 2 +- params/config.go | 4 +++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index f29b109985c..c2c97de8239 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -135,6 +135,16 @@ func TestCreation(t *testing.T) { {6000000, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block }, }, + // Sepolia test cases + { + params.SepoliaChainConfig, + params.SepoliaGenesisHash, + []testcase{ + {0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin and first London block + {1735370, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last pre-MergeNetsplit block + {1735371, ID{Hash: checksumToBytes(0xb96cbd13), Next: 0}}, // First MergeNetsplit block + }, + }, } for i, tt := range tests { for j, ttt := range tt.cases { diff --git a/params/chainspecs/sepolia.json b/params/chainspecs/sepolia.json index ec4f7538390..628a7440ded 100644 --- a/params/chainspecs/sepolia.json +++ b/params/chainspecs/sepolia.json @@ -15,6 +15,6 @@ "berlinBlock": 0, "londonBlock": 0, "terminalTotalDifficulty": 17000000000000000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "mergeNetsplitBlock": 1735371, "ethash": {} } diff --git a/params/config.go b/params/config.go index faf6fa6fa61..200af283c7f 100644 --- a/params/config.go +++ b/params/config.go @@ -405,7 +405,7 @@ func (c *ChainConfig) String() string { ) } - return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Merge Netsplit: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -422,6 +422,7 @@ func (c *ChainConfig) String() string { c.ArrowGlacierBlock, c.GrayGlacierBlock, c.TerminalTotalDifficulty, + c.MergeNetsplitBlock, engine, ) } @@ -615,6 +616,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "londonBlock", block: c.LondonBlock}, {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, {name: "grayGlacierBlock", block: c.GrayGlacierBlock, optional: true}, + {name: "mergeNetsplitBlock", block: c.MergeNetsplitBlock, optional: true}, } { if lastFork.name != "" { // Next one must be higher number From 1cb6be02a546b72907d35c7b3a3435f3bc3b09dc Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sat, 23 Jul 2022 18:57:23 +0200 Subject: [PATCH 60/72] Avoid constantly triggering stageloop when using Engine API (#4797) * avoid constantly triggering stageloop when using Engine API * fix lint + test * fixed comments * ops * little fixes here and there Co-authored-by: giuliorebuffo --- cmd/rpcdaemon/commands/eth_subscribe_test.go | 2 +- cmd/rpcdaemon/rpcdaemontest/test_util.go | 2 +- cmd/rpcdaemon22/rpcdaemontest/test_util.go | 2 +- eth/backend.go | 3 +- eth/stagedsync/stage_headers.go | 108 ++-------- ethdb/privateapi/engine_test.go | 33 ++-- ethdb/privateapi/ethbackend.go | 187 +++++++++++++----- turbo/engineapi/request_list.go | 11 ++ turbo/stages/headerdownload/header_algos.go | 5 +- .../headerdownload/header_data_struct.go | 23 ++- turbo/stages/mock_sentry.go | 2 +- turbo/stages/sentry_mock_test.go | 9 +- turbo/stages/stageloop.go | 7 +- 13 files changed, 208 insertions(+), 186 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index b7c8531e3d4..24f38220c45 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -40,7 +40,7 @@ func TestEthSubscribe(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed ctx := context.Background() - backendServer := privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, nil, false) + backendServer := privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false) backendClient := direct.NewEthBackendClientDirect(backendServer) backend := rpcservices.NewRemoteBackend(backendClient, m.DB, snapshotsync.NewBlockReader()) ff := rpchelper.New(ctx, backend, nil, nil, func() {}) diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go index ee53c28a891..f54a8429b29 100644 --- a/cmd/rpcdaemon/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go @@ -292,7 +292,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g ethashApi := apis[1].Service.(*ethash.API) server := grpc.NewServer() - remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, nil, false)) + remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false)) txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer) txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) starknet.RegisterCAIROVMServer(server, &starknet.UnimplementedCAIROVMServer{}) diff --git a/cmd/rpcdaemon22/rpcdaemontest/test_util.go b/cmd/rpcdaemon22/rpcdaemontest/test_util.go index ad73c3faad4..a2d6c1eb143 100644 --- a/cmd/rpcdaemon22/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon22/rpcdaemontest/test_util.go @@ -293,7 +293,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g ethashApi := apis[1].Service.(*ethash.API) server := grpc.NewServer() - remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, nil, false)) + remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false)) txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer) txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) starknet.RegisterCAIROVMServer(server, &starknet.UnimplementedCAIROVMServer{}) diff --git a/eth/backend.go b/eth/backend.go index 293f3989309..89cd9332846 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -411,8 +411,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere // Initialize ethbackend ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events, - blockReader, chainConfig, backend.sentriesClient.Hd.BeaconRequestList, backend.sentriesClient.Hd.PayloadStatusCh, - assembleBlockPOS, config.Miner.EnabledPOS) + blockReader, chainConfig, assembleBlockPOS, backend.sentriesClient.Hd, config.Miner.EnabledPOS) miningRPC = privateapi.NewMiningServer(ctx, backend, ethashApi) if stack.Config().PrivateApiAddr != "" { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index e8a4b7f863b..8e96b5b9bf6 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -180,7 +180,7 @@ func HeadersPOS( cfg.hd.ClearPendingPayloadHash() cfg.hd.SetPendingPayloadStatus(nil) - var payloadStatus *privateapi.PayloadStatus + var payloadStatus *engineapi.PayloadStatus if forkChoiceInsteadOfNewPayload { payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter) } else { @@ -190,7 +190,7 @@ func HeadersPOS( if err != nil { if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: err} } return err } @@ -257,7 +257,7 @@ func startHandlingForkChoice( tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, -) (*privateapi.PayloadStatus, error) { +) (*engineapi.PayloadStatus, error) { if cfg.memoryOverlay { defer cfg.forkValidator.ClearWithUnwind(tx, cfg.notifications.Accumulator, cfg.notifications.StateChangesConsumer) } @@ -274,27 +274,17 @@ func startHandlingForkChoice( return nil, err } if canonical { - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ Status: remote.EngineStatus_VALID, LatestValidHash: currentHeadHash, }, nil } else { - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ CriticalError: &privateapi.InvalidForkchoiceStateErr, }, nil } } - bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) - if bad { - log.Warn(fmt.Sprintf("[%s] Fork choice bad head block", s.LogPrefix()), "headerHash", headerHash) - cfg.hd.BeaconRequestList.Remove(requestId) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: lastValidHash, - }, nil - } - // Header itself may already be in the snapshots, if CL starts off at much earlier state than Erigon header, err := cfg.blockReader.HeaderByHash(ctx, tx, headerHash) if err != nil { @@ -307,33 +297,12 @@ func startHandlingForkChoice( log.Info(fmt.Sprintf("[%s] Fork choice missing header with hash %x", s.LogPrefix(), headerHash)) cfg.hd.SetPoSDownloaderTip(headerHash) schedulePoSDownload(requestId, headerHash, 0 /* header height is unknown, setting to 0 */, s, cfg) - return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } cfg.hd.BeaconRequestList.Remove(requestId) headerNumber := header.Number.Uint64() - // If header is canonical, then no reorgs are required - canonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) - if err != nil { - log.Warn(fmt.Sprintf("[%s] Fork choice err (reading canonical hash of %d)", s.LogPrefix(), headerNumber), "err", err) - cfg.hd.BeaconRequestList.Remove(requestId) - return nil, err - } - - if headerHash == canonicalHash { - log.Info(fmt.Sprintf("[%s] Fork choice on previously known block", s.LogPrefix())) - cfg.hd.BeaconRequestList.Remove(requestId) - // Per the Engine API spec: - // Client software MAY skip an update of the forkchoice state and MUST NOT begin a payload build process - // if forkchoiceState.headBlockHash references an ancestor of the head of canonical chain. - // In the case of such an event, client software MUST return - // {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, validationError: null}, payloadId: null}. - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - }, nil - } if cfg.memoryOverlay && headerHash == cfg.forkValidator.ExtendingForkHeadHash() { log.Info("Flushing in-memory state") @@ -350,7 +319,7 @@ func startHandlingForkChoice( cfg.hd.SetPendingPayloadHash(headerHash) return nil, nil } else { - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ CriticalError: &privateapi.InvalidForkchoiceStateErr, }, nil } @@ -369,7 +338,7 @@ func startHandlingForkChoice( // TODO(yperbasis): what if some bodies are missing and we have to download them? cfg.hd.SetPendingPayloadHash(headerHash) } else { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} } } @@ -418,7 +387,7 @@ func finishHandlingForkChoice( if !canonical { if cfg.hd.GetPendingPayloadHash() != (common.Hash{}) { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{ CriticalError: &privateapi.InvalidForkchoiceStateErr, } } @@ -438,7 +407,7 @@ func handleNewPayload( tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, -) (*privateapi.PayloadStatus, error) { +) (*engineapi.PayloadStatus, error) { header := block.Header() headerNumber := header.Number.Uint64() headerHash := block.Hash() @@ -446,40 +415,6 @@ func handleNewPayload( log.Debug(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) cfg.hd.UpdateTopSeenHeightPoS(headerNumber) - existingCanonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) - if err != nil { - log.Warn(fmt.Sprintf("[%s] New payload err", s.LogPrefix()), "err", err) - cfg.hd.BeaconRequestList.Remove(requestId) - return nil, err - } - - if existingCanonicalHash != (common.Hash{}) && headerHash == existingCanonicalHash { - log.Info(fmt.Sprintf("[%s] New payload: previously received valid header %d", s.LogPrefix(), headerNumber)) - cfg.hd.BeaconRequestList.Remove(requestId) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - }, nil - } - - bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) - if bad { - log.Warn(fmt.Sprintf("[%s] Previously known bad block", s.LogPrefix()), "height", headerNumber, "hash", headerHash) - } else { - bad, lastValidHash = cfg.hd.IsBadHeaderPoS(header.ParentHash) - if bad { - log.Warn(fmt.Sprintf("[%s] Previously known bad parent", s.LogPrefix()), "height", headerNumber, "hash", headerHash, "parentHash", header.ParentHash) - } - } - if bad { - cfg.hd.BeaconRequestList.Remove(requestId) - cfg.hd.ReportBadHeaderPoS(headerHash, lastValidHash) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: lastValidHash, - }, nil - } - parent, err := cfg.blockReader.HeaderByHash(ctx, tx, header.ParentHash) if err != nil { return nil, err @@ -488,18 +423,7 @@ func handleNewPayload( log.Info(fmt.Sprintf("[%s] New payload missing parent", s.LogPrefix())) cfg.hd.SetPoSDownloaderTip(headerHash) schedulePoSDownload(requestId, header.ParentHash, headerNumber-1, s, cfg) - return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil - } - - if headerNumber != parent.Number.Uint64()+1 { - log.Warn(fmt.Sprintf("[%s] Invalid block number", s.LogPrefix()), "headerNumber", headerNumber, "parentNumber", parent.Number.Uint64()) - cfg.hd.BeaconRequestList.Remove(requestId) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: errors.New("invalid block number"), - }, nil + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } cfg.hd.BeaconRequestList.Remove(requestId) @@ -526,7 +450,7 @@ func verifyAndSaveNewPoSHeader( cfg HeadersCfg, block *types.Block, headerInserter *headerdownload.HeaderInserter, -) (response *privateapi.PayloadStatus, success bool, err error) { +) (response *engineapi.PayloadStatus, success bool, err error) { header := block.Header() headerNumber := header.Number.Uint64() headerHash := block.Hash() @@ -534,7 +458,7 @@ func verifyAndSaveNewPoSHeader( if verificationErr := cfg.hd.VerifyHeader(header); verificationErr != nil { log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", verificationErr) cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: header.ParentHash, ValidationError: verificationErr, @@ -565,7 +489,7 @@ func verifyAndSaveNewPoSHeader( } else if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { return nil, false, err } - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ Status: status, LatestValidHash: latestValidHash, ValidationError: validationError, @@ -578,7 +502,7 @@ func verifyAndSaveNewPoSHeader( if !canExtendCanonical { log.Info("Side chain", "parentHash", header.ParentHash, "currentHead", currentHeadHash) - return &privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil + return &engineapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil } // OK, we're on the canonical chain @@ -725,7 +649,7 @@ func forkingPoint( func handleInterrupt(interrupt engineapi.Interrupt, cfg HeadersCfg, tx kv.RwTx, headerInserter *headerdownload.HeaderInserter, useExternalTx bool) (bool, error) { if interrupt != engineapi.None { if interrupt == engineapi.Stopping { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: errors.New("server is stopping")} + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: errors.New("server is stopping")} } if interrupt == engineapi.Synced { verifyAndSaveDownloadedPoSHeaders(tx, cfg, headerInserter) diff --git a/ethdb/privateapi/engine_test.go b/ethdb/privateapi/engine_test.go index 42903819ad9..74b0eab1efa 100644 --- a/ethdb/privateapi/engine_test.go +++ b/ethdb/privateapi/engine_test.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/engineapi" + "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/stretchr/testify/require" ) @@ -89,11 +90,10 @@ func TestMockDownloadRequest(t *testing.T) { require := require.New(t) makeTestDb(ctx, db) - beaconRequestList := engineapi.NewRequestList() - statusCh := make(chan PayloadStatus) + hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) events := NewEvents() - backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, beaconRequestList, statusCh, nil, false) + backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error var reply *remote.EnginePayloadStatus @@ -104,8 +104,8 @@ func TestMockDownloadRequest(t *testing.T) { done <- true }() - beaconRequestList.WaitForRequest(true) - statusCh <- PayloadStatus{Status: remote.EngineStatus_SYNCING} + hd.BeaconRequestList.WaitForRequest(true) + hd.PayloadStatusCh <- engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} <-done require.NoError(err) require.Equal(reply.Status, remote.EngineStatus_SYNCING) @@ -148,11 +148,10 @@ func TestMockValidExecution(t *testing.T) { makeTestDb(ctx, db) - beaconRequestList := engineapi.NewRequestList() - statusCh := make(chan PayloadStatus) + hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) events := NewEvents() - backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, beaconRequestList, statusCh, nil, false) + backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error var reply *remote.EnginePayloadStatus @@ -163,9 +162,9 @@ func TestMockValidExecution(t *testing.T) { done <- true }() - beaconRequestList.WaitForRequest(true) + hd.BeaconRequestList.WaitForRequest(true) - statusCh <- PayloadStatus{ + hd.PayloadStatusCh <- engineapi.PayloadStatus{ Status: remote.EngineStatus_VALID, LatestValidHash: payload3Hash, } @@ -184,11 +183,10 @@ func TestMockInvalidExecution(t *testing.T) { makeTestDb(ctx, db) - beaconRequestList := engineapi.NewRequestList() - statusCh := make(chan PayloadStatus) + hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) events := NewEvents() - backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, beaconRequestList, statusCh, nil, false) + backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error var reply *remote.EnginePayloadStatus @@ -199,9 +197,9 @@ func TestMockInvalidExecution(t *testing.T) { done <- true }() - beaconRequestList.WaitForRequest(true) + hd.BeaconRequestList.WaitForRequest(true) // Simulate invalid status - statusCh <- PayloadStatus{ + hd.PayloadStatusCh <- engineapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: startingHeadHash, } @@ -220,11 +218,10 @@ func TestNoTTD(t *testing.T) { makeTestDb(ctx, db) - beaconRequestList := engineapi.NewRequestList() - statusCh := make(chan PayloadStatus) + hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) events := NewEvents() - backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{}, beaconRequestList, statusCh, nil, false) + backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{}, nil, hd, false) var err error diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index f4abc963acf..6e1c22b2dfd 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -24,6 +24,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/builder" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" "golang.org/x/exp/slices" "google.golang.org/protobuf/types/known/emptypb" @@ -55,14 +56,12 @@ type EthBackendServer struct { // Block proposing for proof-of-stake payloadId uint64 builders map[uint64]*builder.BlockBuilder - // Send Beacon Chain requests to staged sync - requestList *engineapi.RequestList - // Replies to newPayload & forkchoice requests - statusCh <-chan PayloadStatus + builderFunc builder.BlockBuilderFunc proposing bool lock sync.Mutex // Engine API is asynchronous, we want to avoid CL to call different APIs at the same time logsFilter *LogsFilterAggregator + hd *headerdownload.HeaderDownload } type EthBackend interface { @@ -73,23 +72,12 @@ type EthBackend interface { Peers(ctx context.Context) (*remote.PeersReply, error) } -// This is the status of a newly execute block. -// Hash: Block hash -// Status: block's status -type PayloadStatus struct { - Status remote.EngineStatus - LatestValidHash common.Hash - ValidationError error - CriticalError error -} - func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RwDB, events *Events, blockReader services.BlockAndTxnReader, - config *params.ChainConfig, requestList *engineapi.RequestList, statusCh <-chan PayloadStatus, - builderFunc builder.BlockBuilderFunc, proposing bool, + config *params.ChainConfig, builderFunc builder.BlockBuilderFunc, hd *headerdownload.HeaderDownload, proposing bool, ) *EthBackendServer { s := &EthBackendServer{ctx: ctx, eth: eth, events: events, db: db, blockReader: blockReader, config: config, - requestList: requestList, statusCh: statusCh, builders: make(map[uint64]*builder.BlockBuilder), - builderFunc: builderFunc, proposing: proposing, logsFilter: NewLogsFilterAggregator(events), + builders: make(map[uint64]*builder.BlockBuilder), + builderFunc: builderFunc, proposing: proposing, logsFilter: NewLogsFilterAggregator(events), hd: hd, } ch, clean := s.events.AddLogsSubscription() @@ -244,7 +232,7 @@ func (s *EthBackendServer) Block(ctx context.Context, req *remote.BlockRequest) return &remote.BlockReply{BlockRlp: blockRlp, Senders: sendersBytes}, nil } -func convertPayloadStatus(payloadStatus *PayloadStatus) *remote.EnginePayloadStatus { +func convertPayloadStatus(payloadStatus *engineapi.PayloadStatus) *remote.EnginePayloadStatus { reply := remote.EnginePayloadStatus{Status: payloadStatus.Status} if payloadStatus.LatestValidHash != (common.Hash{}) { reply.LatestValidHash = gointerfaces.ConvertHashToH256(payloadStatus.LatestValidHash) @@ -257,7 +245,7 @@ func convertPayloadStatus(payloadStatus *PayloadStatus) *remote.EnginePayloadSta func (s *EthBackendServer) stageLoopIsBusy() bool { for i := 0; i < 20; i++ { - if !s.requestList.IsWaiting() { + if !s.hd.BeaconRequestList.IsWaiting() { // This might happen, for example, in the following scenario: // 1) CL sends NewPayload and immediately after that ForkChoiceUpdated. // 2) We happily process NewPayload and stage loop is at the end. @@ -269,7 +257,7 @@ func (s *EthBackendServer) stageLoopIsBusy() bool { time.Sleep(5 * time.Millisecond) } } - return !s.requestList.IsWaiting() + return !s.hd.BeaconRequestList.IsWaiting() } // EngineNewPayloadV1 validates and possibly executes payload @@ -344,12 +332,21 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E if err != nil { return nil, err } + + tx.Rollback() + if parentTd != nil && parentTd.Cmp(s.config.TerminalTotalDifficulty) < 0 { log.Warn("[NewPayload] TTD not reached yet", "height", header.Number, "hash", common.Hash(blockHash)) return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, nil } - tx.Rollback() + possibleStatus, err := s.getPayloadStatusFromHashIfPossible(blockHash, req.BlockNumber, header.ParentHash, true) + if err != nil { + return nil, err + } + if possibleStatus != nil { + return convertPayloadStatus(possibleStatus), nil + } // If another payload is already commissioned then we just reply with syncing if s.stageLoopIsBusy() { // We are still syncing a commissioned payload @@ -360,17 +357,13 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E log.Debug("[NewPayload] stage loop is busy") return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } - - // Lock the thread (We modify shared resources). - log.Debug("[NewPayload] acquiring lock") s.lock.Lock() defer s.lock.Unlock() - log.Debug("[NewPayload] lock acquired") log.Debug("[NewPayload] sending block", "height", header.Number, "hash", common.Hash(blockHash)) - s.requestList.AddPayloadRequest(block) + s.hd.BeaconRequestList.AddPayloadRequest(block) - payloadStatus := <-s.statusCh + payloadStatus := <-s.hd.PayloadStatusCh log.Debug("[NewPayload] got reply", "payloadStatus", payloadStatus) if payloadStatus.CriticalError != nil { @@ -380,6 +373,100 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E return convertPayloadStatus(&payloadStatus), nil } +// Check if we can make out a status from the payload hash/head hash. +func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.Hash, blockNumber uint64, parentHash common.Hash, newPayload bool) (*engineapi.PayloadStatus, error) { + if s.hd == nil { + return nil, nil + } + var prefix string + if newPayload { + prefix = "NewPayload" + } else { + prefix = "ForkChoiceUpdated" + } + tx, err := s.db.BeginRo(s.ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + header, err := rawdb.ReadHeaderByHash(tx, blockHash) + if err != nil { + return nil, err + } + var parent *types.Header + if newPayload { + parent, err = rawdb.ReadHeaderByHash(tx, parentHash) + } + if err != nil { + return nil, err + } + + var canonicalHash common.Hash + if header != nil { + canonicalHash, err = rawdb.ReadCanonicalHash(tx, header.Number.Uint64()) + } + if err != nil { + return nil, err + } + + if newPayload && parent != nil && blockNumber != parent.Number.Uint64()+1 { + log.Warn(fmt.Sprintf("[%s] Invalid block number", prefix), "headerNumber", blockNumber, "parentNumber", parent.Number.Uint64()) + s.hd.ReportBadHeaderPoS(blockHash, parent.Hash()) + return &engineapi.PayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: parent.Hash(), + ValidationError: errors.New("invalid block number"), + }, nil + } + // Check if we already determined if the hash is attributed to a previously received invalid header. + bad, lastValidHash := s.hd.IsBadHeaderPoS(blockHash) + if bad { + log.Warn(fmt.Sprintf("[%s] Previously known bad block", prefix), "hash", blockHash) + } else if newPayload { + bad, lastValidHash = s.hd.IsBadHeaderPoS(parentHash) + if bad { + log.Warn(fmt.Sprintf("[%s] Previously known bad block", prefix), "hash", blockHash, "parentHash", parentHash) + } + } + if bad { + s.hd.ReportBadHeaderPoS(blockHash, lastValidHash) + return &engineapi.PayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: lastValidHash}, nil + } + + // If header is already validated or has a missing parent, you can either return VALID or SYNCING. + if newPayload { + if header != nil && canonicalHash == blockHash { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil + } + + if parent == nil && s.hd.PosStatus() == headerdownload.Syncing { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + } + + return nil, nil + } + + if header == nil { + if s.hd.PosStatus() == headerdownload.Syncing { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + + } + return nil, nil + } + + headHash := rawdb.ReadHeadBlockHash(tx) + if err != nil { + return nil, err + } + + if blockHash != headHash && canonicalHash == blockHash { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil + } + + return nil, nil +} + // EngineGetPayloadV1 retrieves previously assembled payload (Validators only) func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.EngineGetPayloadRequest) (*types2.ExecutionPayload, error) { if !s.proposing { @@ -451,6 +538,7 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r return nil, err } defer tx1.Rollback() + td, err := rawdb.ReadTdByHash(tx1, forkChoice.HeadBlockHash) tx1.Rollback() if err != nil { @@ -463,31 +551,38 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r }, nil } - if s.stageLoopIsBusy() { - log.Debug("[ForkChoiceUpdated] stage loop is busy") - return &remote.EngineForkChoiceUpdatedReply{ - PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, - }, nil + status, err := s.getPayloadStatusFromHashIfPossible(forkChoice.HeadBlockHash, 0, common.Hash{}, false) + if err != nil { + return nil, err } + if status == nil { + if s.stageLoopIsBusy() { + log.Debug("[ForkChoiceUpdated] stage loop is busy") + return &remote.EngineForkChoiceUpdatedReply{ + PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, + }, nil + } + s.lock.Lock() + defer s.lock.Unlock() - log.Debug("[ForkChoiceUpdated] acquiring lock") - s.lock.Lock() - defer s.lock.Unlock() - log.Debug("[ForkChoiceUpdated] lock acquired") - - log.Debug("[ForkChoiceUpdated] sending forkChoiceMessage", "head", forkChoice.HeadBlockHash) - s.requestList.AddForkChoiceRequest(&forkChoice) + log.Debug("[ForkChoiceUpdated] sending forkChoiceMessage", "head", forkChoice.HeadBlockHash) + s.hd.BeaconRequestList.AddForkChoiceRequest(&forkChoice) - status := <-s.statusCh - log.Debug("[ForkChoiceUpdated] got reply", "payloadStatus", status) + statusRef := <-s.hd.PayloadStatusCh + status = &statusRef + log.Debug("[ForkChoiceUpdated] got reply", "payloadStatus", status) - if status.CriticalError != nil { - return nil, status.CriticalError + if status.CriticalError != nil { + return nil, status.CriticalError + } + } else { + s.lock.Lock() + defer s.lock.Unlock() } // No need for payload building if req.PayloadAttributes == nil || status.Status != remote.EngineStatus_VALID { - return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(&status)}, nil + return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(status)}, nil } if !s.proposing { @@ -514,7 +609,7 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r log.Warn("Skipping payload building because forkchoiceState.headBlockHash is not the head of the canonical chain", "forkChoice.HeadBlockHash", forkChoice.HeadBlockHash, "headHeader.Hash", headHeader.Hash()) - return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(&status)}, nil + return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(status)}, nil } if headHeader.Time >= req.PayloadAttributes.Timestamp { diff --git a/turbo/engineapi/request_list.go b/turbo/engineapi/request_list.go index 11a2bc0ba13..455a38825c0 100644 --- a/turbo/engineapi/request_list.go +++ b/turbo/engineapi/request_list.go @@ -6,10 +6,21 @@ import ( "github.com/emirpasic/gods/maps/treemap" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" ) +// This is the status of a newly execute block. +// Hash: Block hash +// Status: block's status +type PayloadStatus struct { + Status remote.EngineStatus + LatestValidHash common.Hash + ValidationError error + CriticalError error +} + // The message we are going to send to the stage sync in ForkchoiceUpdated type ForkChoiceMessage struct { HeadBlockHash common.Hash diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 1f9d9f83808..53cea0779bc 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -26,7 +26,6 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi" @@ -1134,13 +1133,13 @@ func (hd *HeaderDownload) ClearPendingPayloadHash() { hd.pendingPayloadHash = common.Hash{} } -func (hd *HeaderDownload) GetPendingPayloadStatus() *privateapi.PayloadStatus { +func (hd *HeaderDownload) GetPendingPayloadStatus() *engineapi.PayloadStatus { hd.lock.RLock() defer hd.lock.RUnlock() return hd.pendingPayloadStatus } -func (hd *HeaderDownload) SetPendingPayloadStatus(response *privateapi.PayloadStatus) { +func (hd *HeaderDownload) SetPendingPayloadStatus(response *engineapi.PayloadStatus) { hd.lock.Lock() defer hd.lock.Unlock() hd.pendingPayloadStatus = response diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index 1a097fcf28b..a858f1554fa 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -12,7 +12,6 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" @@ -305,16 +304,16 @@ type HeaderDownload struct { requestId int posAnchor *Anchor posStatus SyncStatus - posSync bool // Whether the chain is syncing in the PoS mode - headersCollector *etl.Collector // ETL collector for headers - BeaconRequestList *engineapi.RequestList // Requests from ethbackend to staged sync - PayloadStatusCh chan privateapi.PayloadStatus // Responses (validation/execution status) - pendingPayloadHash common.Hash // Header whose status we still should send to PayloadStatusCh - pendingPayloadStatus *privateapi.PayloadStatus // Alternatively, there can be an already prepared response to send to PayloadStatusCh - unsettledForkChoice *engineapi.ForkChoiceMessage // Forkchoice to process after unwind - unsettledHeadHeight uint64 // Height of unsettledForkChoice.headBlockHash - posDownloaderTip common.Hash // See https://hackmd.io/GDc0maGsQeKfP8o2C7L52w - badPoSHeaders map[common.Hash]common.Hash // Invalid Tip -> Last Valid Ancestor + posSync bool // Whether the chain is syncing in the PoS mode + headersCollector *etl.Collector // ETL collector for headers + BeaconRequestList *engineapi.RequestList // Requests from ethbackend to staged sync + PayloadStatusCh chan engineapi.PayloadStatus // Responses (validation/execution status) + pendingPayloadHash common.Hash // Header whose status we still should send to PayloadStatusCh + pendingPayloadStatus *engineapi.PayloadStatus // Alternatively, there can be an already prepared response to send to PayloadStatusCh + unsettledForkChoice *engineapi.ForkChoiceMessage // Forkchoice to process after unwind + unsettledHeadHeight uint64 // Height of unsettledForkChoice.headBlockHash + posDownloaderTip common.Hash // See https://hackmd.io/GDc0maGsQeKfP8o2C7L52w + badPoSHeaders map[common.Hash]common.Hash // Invalid Tip -> Last Valid Ancestor } // HeaderRecord encapsulates two forms of the same header - raw RLP encoding (to avoid duplicated decodings and encodings), and parsed value types.Header @@ -343,7 +342,7 @@ func NewHeaderDownload( DeliveryNotify: make(chan struct{}, 1), QuitPoWMining: make(chan struct{}), BeaconRequestList: engineapi.NewRequestList(), - PayloadStatusCh: make(chan privateapi.PayloadStatus, 1), + PayloadStatusCh: make(chan engineapi.PayloadStatus, 1), headerReader: headerReader, badPoSHeaders: make(map[common.Hash]common.Hash), } diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 774e98fab39..134bdc48ff2 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -530,7 +530,7 @@ func (ms *MockSentry) SendForkChoiceRequest(message *engineapi.ForkChoiceMessage ms.sentriesClient.Hd.BeaconRequestList.AddForkChoiceRequest(message) } -func (ms *MockSentry) ReceivePayloadStatus() privateapi.PayloadStatus { +func (ms *MockSentry) ReceivePayloadStatus() engineapi.PayloadStatus { return <-ms.sentriesClient.Hd.PayloadStatusCh } diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index bd8e552c1be..6e72276dea8 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -669,11 +669,10 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { FinalizedBlockHash: invalidTip.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) + _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) - stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) - payloadStatus2 := m.ReceivePayloadStatus() - require.Equal(t, remote.EngineStatus_INVALID, payloadStatus2.Status) - assert.Equal(t, lastValidHeader.Hash(), payloadStatus2.LatestValidHash) + bad, lastValidHash := m.HeaderDownload().IsBadHeaderPoS(invalidTip.Hash()) + assert.True(t, bad) + assert.Equal(t, lastValidHash, lastValidHeader.Hash()) } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 5280dc8b7e3..e2552990af9 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -23,7 +23,6 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" @@ -35,13 +34,13 @@ import ( func SendPayloadStatus(hd *headerdownload.HeaderDownload, headBlockHash common.Hash, err error) { if pendingPayloadStatus := hd.GetPendingPayloadStatus(); pendingPayloadStatus != nil { if err != nil { - hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: err} } else { hd.PayloadStatusCh <- *pendingPayloadStatus } } else if pendingPayloadHash := hd.GetPendingPayloadHash(); pendingPayloadHash != (common.Hash{}) { if err != nil { - hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: err} } else { var status remote.EngineStatus if headBlockHash == pendingPayloadHash { @@ -50,7 +49,7 @@ func SendPayloadStatus(hd *headerdownload.HeaderDownload, headBlockHash common.H log.Warn("Failed to execute pending payload", "pendingPayload", pendingPayloadHash, "headBlock", headBlockHash) status = remote.EngineStatus_INVALID } - hd.PayloadStatusCh <- privateapi.PayloadStatus{ + hd.PayloadStatusCh <- engineapi.PayloadStatus{ Status: status, LatestValidHash: headBlockHash, } From 81d106bc9df901cf9c5a2cbae6aa695437ecd367 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sat, 23 Jul 2022 18:39:08 +0100 Subject: [PATCH 61/72] Experiment in parallel execution (#4652) * Restructure tx execution * fixes * Fixes and traces * Tracing * More tracing * Drain the result channel * Intermediate * more efficient parallel exec * Sorted buffer * Fix results size * fix for the recon * Fix compilation * Sort keys in Write and Read sets, fix compilation in rpcdaemon22 * Update to latest erigon-lib * Update to erigon-lib * Remove go.mod replace * Update erigon-lib * Update to erigon-lib main * Fix lint Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp --- cmd/rpcdaemon22/commands/eth_receipts.go | 12 +- cmd/rpcdaemon22/commands/trace_filtering.go | 7 +- cmd/state/commands/erigon22.go | 12 +- cmd/state/commands/history22.go | 2 +- cmd/state/commands/replay_tx.go | 195 +++++++++++ cmd/state/commands/state_recon.go | 61 ++-- cmd/state/commands/state_recon_1.go | 242 ++++++++++---- core/state/history_reader_22.go | 15 +- core/state/intra_block_state.go | 2 +- core/state/recon_state_1.go | 345 +++++++++++++------- go.mod | 2 +- go.sum | 4 +- 12 files changed, 656 insertions(+), 243 deletions(-) create mode 100644 cmd/state/commands/replay_tx.go diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index 1a72299a2f0..0dd4d4d6efa 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -128,7 +128,9 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ txNumbers := roaring64.New() txNumbers.AddRange(fromTxNum, toTxNum) // [min,max) - topicsBitmap, err := getTopicsBitmap(api._agg, tx, crit.Topics, fromTxNum, toTxNum) + ac := api._agg.MakeContext() + + topicsBitmap, err := getTopicsBitmap(ac, tx, crit.Topics, fromTxNum, toTxNum) if err != nil { return nil, err } @@ -139,7 +141,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ var addrBitmap *roaring64.Bitmap for _, addr := range crit.Addresses { var bitmapForORing roaring64.Bitmap - it := api._agg.LogAddrIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + it := ac.LogAddrIterator(addr.Bytes(), fromTxNum, toTxNum, nil) for it.HasNext() { bitmapForORing.Add(it.Next()) } @@ -162,7 +164,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ var lastHeader *types.Header var lastSigner *types.Signer var lastRules *params.Rules - stateReader := state.NewHistoryReader22(api._agg, nil /* ReadIndices */) + stateReader := state.NewHistoryReader22(ac, nil /* ReadIndices */) iter := txNumbers.Iterator() for iter.HasNext() { txNum := iter.Next() @@ -233,12 +235,12 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ // {{}, {B}} matches any topic in first position AND B in second position // {{A}, {B}} matches topic A in first position AND B in second position // {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position -func getTopicsBitmap(a *libstate.Aggregator, c kv.Tx, topics [][]common.Hash, from, to uint64) (*roaring64.Bitmap, error) { +func getTopicsBitmap(ac *libstate.AggregatorContext, c kv.Tx, topics [][]common.Hash, from, to uint64) (*roaring64.Bitmap, error) { var result *roaring64.Bitmap for _, sub := range topics { var bitmapForORing roaring64.Bitmap for _, topic := range sub { - it := a.LogTopicIterator(topic.Bytes(), from, to, nil) + it := ac.LogTopicIterator(topic.Bytes(), from, to, nil) for it.HasNext() { bitmapForORing.Add(it.Next()) } diff --git a/cmd/rpcdaemon22/commands/trace_filtering.go b/cmd/rpcdaemon22/commands/trace_filtering.go index 7dd35347407..fc456811666 100644 --- a/cmd/rpcdaemon22/commands/trace_filtering.go +++ b/cmd/rpcdaemon22/commands/trace_filtering.go @@ -253,10 +253,11 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str allTxs roaring64.Bitmap txsTo roaring64.Bitmap ) + ac := api._agg.MakeContext() for _, addr := range req.FromAddress { if addr != nil { - it := api._agg.TraceFromIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + it := ac.TraceFromIterator(addr.Bytes(), fromTxNum, toTxNum, nil) for it.HasNext() { allTxs.Add(it.Next()) } @@ -266,7 +267,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str for _, addr := range req.ToAddress { if addr != nil { - it := api._agg.TraceToIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + it := ac.TraceToIterator(addr.Bytes(), fromTxNum, toTxNum, nil) for it.HasNext() { txsTo.Add(it.Next()) } @@ -319,7 +320,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str var lastHeader *types.Header var lastSigner *types.Signer var lastRules *params.Rules - stateReader := state.NewHistoryReader22(api._agg, nil /* ReadIndices */) + stateReader := state.NewHistoryReader22(ac, nil /* ReadIndices */) noop := state.NewNoopWriter() for it.HasNext() { txNum := uint64(it.Next()) diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index 0bcc8538c13..3e0d8202121 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -171,7 +171,7 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log } return h } - readWrapper := &ReaderWrapper22{r: agg, roTx: rwTx} + readWrapper := &ReaderWrapper22{ac: agg.MakeContext(), roTx: rwTx} writeWrapper := &WriterWrapper22{w: agg} for !interrupt { @@ -396,7 +396,7 @@ func processBlock22(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader // Implements StateReader and StateWriter type ReaderWrapper22 struct { roTx kv.Tx - r *libstate.Aggregator + ac *libstate.AggregatorContext blockNum uint64 } @@ -406,7 +406,7 @@ type WriterWrapper22 struct { } func (rw *ReaderWrapper22) ReadAccountData(address common.Address) (*accounts.Account, error) { - enc, err := rw.r.ReadAccountData(address.Bytes(), rw.roTx) + enc, err := rw.ac.ReadAccountData(address.Bytes(), rw.roTx) if err != nil { return nil, err } @@ -444,7 +444,7 @@ func (rw *ReaderWrapper22) ReadAccountData(address common.Address) (*accounts.Ac } func (rw *ReaderWrapper22) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { - enc, err := rw.r.ReadAccountStorage(address.Bytes(), key.Bytes(), rw.roTx) + enc, err := rw.ac.ReadAccountStorage(address.Bytes(), key.Bytes(), rw.roTx) if err != nil { return nil, err } @@ -458,11 +458,11 @@ func (rw *ReaderWrapper22) ReadAccountStorage(address common.Address, incarnatio } func (rw *ReaderWrapper22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { - return rw.r.ReadAccountCode(address.Bytes(), rw.roTx) + return rw.ac.ReadAccountCode(address.Bytes(), rw.roTx) } func (rw *ReaderWrapper22) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { - return rw.r.ReadAccountCodeSize(address.Bytes(), rw.roTx) + return rw.ac.ReadAccountCodeSize(address.Bytes(), rw.roTx) } func (rw *ReaderWrapper22) ReadAccountIncarnation(address common.Address) (uint64, error) { diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index 02890f1f3c3..d1976cd7f75 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -136,6 +136,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { return fmt.Errorf("reopen snapshot segments: %w", err) } blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + readWrapper := state.NewHistoryReader22(h.MakeContext(), ri) for !interrupt { select { @@ -169,7 +170,6 @@ func History22(genesis *core.Genesis, logger log.Logger) error { txNum += uint64(len(b.Transactions())) + 2 // Pre and Post block transaction continue } - readWrapper := state.NewHistoryReader22(h, ri) if traceBlock != 0 { readWrapper.SetTrace(blockNum == uint64(traceBlock)) } diff --git a/cmd/state/commands/replay_tx.go b/cmd/state/commands/replay_tx.go new file mode 100644 index 00000000000..e1c5552592f --- /dev/null +++ b/cmd/state/commands/replay_tx.go @@ -0,0 +1,195 @@ +package commands + +import ( + "context" + "fmt" + "path" + "path/filepath" + "sort" + + "github.com/ledgerwatch/erigon-lib/kv/memdb" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" +) + +var txhash string +var txnum uint64 + +func init() { + withDataDir(replayTxCmd) + rootCmd.AddCommand(replayTxCmd) + replayTxCmd.Flags().StringVar(&txhash, "txhash", "", "hash of the transaction to replay") + replayTxCmd.Flags().Uint64Var(&txnum, "txnum", 0, "tx num for replay") +} + +var replayTxCmd = &cobra.Command{ + Use: "replaytx", + Short: "Experimental command to replay a given transaction using only history", + RunE: func(cmd *cobra.Command, args []string) error { + return ReplayTx(genesis) + }, +} + +func ReplayTx(genesis *core.Genesis) error { + var blockReader services.FullBlockReader + var allSnapshots *snapshotsync.RoSnapshots + allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + defer allSnapshots.Close() + if err := allSnapshots.Reopen(); err != nil { + return fmt.Errorf("reopen snapshot segments: %w", err) + } + blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + // Compute mapping blockNum -> last TxNum in that block + txNums := make([]uint64, allSnapshots.BlocksAvailable()+1) + if err := allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { + for _, b := range bs { + if err := b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { + txNums[blockNum] = baseTxNum + txAmount + }); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("build txNum => blockNum mapping: %w", err) + } + ctx := context.Background() + var txNum uint64 + if txhash != "" { + txnHash := common.HexToHash(txhash) + fmt.Printf("Tx hash = [%x]\n", txnHash) + db := memdb.New() + roTx, err := db.BeginRo(ctx) + if err != nil { + return err + } + defer roTx.Rollback() + bn, ok, err := blockReader.TxnLookup(ctx, roTx, txnHash) + if err != nil { + return err + } + if !ok { + return fmt.Errorf("transaction not found") + } + fmt.Printf("Found in block %d\n", bn) + var header *types.Header + if header, err = blockReader.HeaderByNumber(ctx, nil, bn); err != nil { + return err + } + blockHash := header.Hash() + b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, bn) + if err != nil { + return err + } + txs := b.Transactions() + var txIndex int + for txIndex = 0; txIndex < len(txs); txIndex++ { + if txs[txIndex].Hash() == txnHash { + fmt.Printf("txIndex = %d\n", txIndex) + break + } + } + txNum = txNums[bn-1] + 1 + uint64(txIndex) + } else { + txNum = txnum + } + fmt.Printf("txNum = %d\n", txNum) + aggPath := filepath.Join(datadir, "erigon23") + agg, err := libstate.NewAggregator(aggPath, AggregationStep) + if err != nil { + return fmt.Errorf("create history: %w", err) + } + defer agg.Close() + ac := agg.MakeContext() + workCh := make(chan state.TxTask) + rs := state.NewReconState(workCh) + if err = replayTxNum(ctx, allSnapshots, blockReader, txNum, txNums, rs, ac); err != nil { + return err + } + return nil +} + +func replayTxNum(ctx context.Context, allSnapshots *snapshotsync.RoSnapshots, blockReader services.FullBlockReader, + txNum uint64, txNums []uint64, rs *state.ReconState, ac *libstate.AggregatorContext, +) error { + bn := uint64(sort.Search(len(txNums), func(i int) bool { + return txNums[i] > txNum + })) + txIndex := int(txNum - txNums[bn-1] - 1) + fmt.Printf("bn=%d, txIndex=%d\n", bn, txIndex) + var header *types.Header + var err error + if header, err = blockReader.HeaderByNumber(ctx, nil, bn); err != nil { + return err + } + blockHash := header.Hash() + b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, bn) + if err != nil { + return err + } + txn := b.Transactions()[txIndex] + stateWriter := state.NewStateReconWriter(ac, rs) + stateReader := state.NewHistoryReaderNoState(ac, rs) + stateReader.SetTxNum(txNum) + stateWriter.SetTxNum(txNum) + noop := state.NewNoopWriter() + rules := chainConfig.Rules(bn) + for { + stateReader.ResetError() + ibs := state.New(stateReader) + gp := new(core.GasPool).AddGas(txn.GetGas()) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) + vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(chainConfig, bn)} + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, err := blockReader.Header(ctx, nil, hash, number) + if err != nil { + panic(err) + } + return h + } + getHashFn := core.GetHashFn(header, getHeader) + logger := log.New() + engine := initConsensusEngine(chainConfig, logger, allSnapshots) + txnHash := txn.Hash() + blockContext := core.NewEVMBlockContext(header, getHashFn, engine, nil /* author */, contractHasTEVM) + ibs.Prepare(txnHash, blockHash, txIndex) + msg, err := txn.AsMessage(*types.MakeSigner(chainConfig, bn), header.BaseFee, rules) + if err != nil { + return err + } + txContext := core.NewEVMTxContext(msg) + vmenv := vm.NewEVM(blockContext, txContext, ibs, chainConfig, vmConfig) + + _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + return fmt.Errorf("could not apply tx %d [%x] failed: %w", txIndex, txnHash, err) + } + if err = ibs.FinalizeTx(rules, noop); err != nil { + return err + } + if dependency, ok := stateReader.ReadError(); ok { + fmt.Printf("dependency %d on %d\n", txNum, dependency) + if err = replayTxNum(ctx, allSnapshots, blockReader, dependency, txNums, rs, ac); err != nil { + return err + } + } else { + if err = ibs.CommitBlock(rules, stateWriter); err != nil { + return err + } + break + } + } + rs.CommitTxNum(txNum) + fmt.Printf("commited %d\n", txNum) + return nil +} diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go index db5ea046249..12a214838c4 100644 --- a/cmd/state/commands/state_recon.go +++ b/cmd/state/commands/state_recon.go @@ -131,9 +131,7 @@ func (rw *ReconWorker) runTxTask(txTask state.TxTask) { } else if daoForkTx { //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txNum, blockNum) misc.ApplyDAOHardFork(ibs) - if err := ibs.FinalizeTx(rules, noop); err != nil { - panic(err) - } + ibs.SoftFinalise() } else if txTask.Final { if txTask.BlockNum > 0 { //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txNum, blockNum) @@ -147,16 +145,25 @@ func (rw *ReconWorker) runTxTask(txTask state.TxTask) { } else { txHash := txTask.Tx.Hash() gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) - usedGas := new(uint64) vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) getHashFn := core.GetHashFn(txTask.Header, rw.getHeader) - _, _, err = core.ApplyTransaction(rw.chainConfig, getHashFn, rw.engine, nil, gp, ibs, noop, txTask.Header, txTask.Tx, usedGas, vmConfig, contractHasTEVM) + blockContext := core.NewEVMBlockContext(txTask.Header, getHashFn, rw.engine, nil /* author */, contractHasTEVM) + ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) + msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, rules) + if err != nil { + panic(err) + } + txContext := core.NewEVMTxContext(msg) + vmenv := vm.NewEVM(blockContext, txContext, ibs, rw.chainConfig, vmConfig) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, evm=%p\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex, vmenv) + _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) if err != nil { panic(fmt.Errorf("could not apply tx %d [%x] failed: %w", txTask.TxIndex, txHash, err)) } + if err = ibs.FinalizeTx(rules, noop); err != nil { + panic(err) + } } if dependency, ok := rw.stateReader.ReadError(); ok { //fmt.Printf("rollback %d\n", txNum) @@ -259,12 +266,8 @@ func (fw *FillWorker) fillStorage(plainStateCollector *etl.Collector) { fw.currentKey = key compositeKey := dbutils.PlainGenerateCompositeStorageKey(key[:20], state.FirstContractIncarnation, key[20:]) if len(val) > 0 { - if len(val) > 1 || val[0] != 0 { - if err := plainStateCollector.Collect(compositeKey, val); err != nil { - panic(err) - } - } else { - fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) + if err := plainStateCollector.Collect(compositeKey, val); err != nil { + panic(err) } //fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) } @@ -283,19 +286,15 @@ func (fw *FillWorker) fillCode(codeCollector, plainContractCollector *etl.Collec fw.currentKey = key compositeKey := dbutils.PlainGenerateStoragePrefix(key, state.FirstContractIncarnation) if len(val) > 0 { - if len(val) > 1 || val[0] != 0 { - codeHash, err := common.HashData(val) - if err != nil { - panic(err) - } - if err = codeCollector.Collect(codeHash[:], val); err != nil { - panic(err) - } - if err = plainContractCollector.Collect(compositeKey, codeHash[:]); err != nil { - panic(err) - } - } else { - fmt.Printf("Code [%x] => [%x]\n", compositeKey, val) + codeHash, err := common.HashData(val) + if err != nil { + panic(err) + } + if err = codeCollector.Collect(codeHash[:], val); err != nil { + panic(err) + } + if err = plainContractCollector.Collect(compositeKey, codeHash[:]); err != nil { + panic(err) } //fmt.Printf("Code [%x] => [%x]\n", compositeKey, val) } @@ -600,9 +599,9 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { } }() var inputTxNum uint64 + var header *types.Header for bn := uint64(0); bn < blockNum; bn++ { - header, err := blockReader.HeaderByNumber(ctx, nil, bn) - if err != nil { + if header, err = blockReader.HeaderByNumber(ctx, nil, bn); err != nil { panic(err) } blockHash := header.Hash() @@ -851,11 +850,15 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { if rwTx, err = db.BeginRw(ctx); err != nil { return err } - if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { + var rootHash common.Hash + if rootHash, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { return err } if err = rwTx.Commit(); err != nil { return err } + if rootHash != header.Root { + log.Error("Incorrect root hash", "expected", fmt.Sprintf("%x", header.Root)) + } return nil } diff --git a/cmd/state/commands/state_recon_1.go b/cmd/state/commands/state_recon_1.go index b149406a892..0ae69fe40f6 100644 --- a/cmd/state/commands/state_recon_1.go +++ b/cmd/state/commands/state_recon_1.go @@ -11,6 +11,7 @@ import ( "path/filepath" "runtime" "sync" + "sync/atomic" "syscall" "time" @@ -31,6 +32,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" + "golang.org/x/sync/semaphore" ) func init() { @@ -103,11 +105,12 @@ func (rw *ReconWorker1) run() { } rw.engine = initConsensusEngine(rw.chainConfig, rw.logger, rw.allSnapshots) for txTask, ok := rw.rs.Schedule(); ok; txTask, ok = rw.rs.Schedule() { - rw.runTxTask(txTask) + rw.runTxTask(&txTask) + rw.resultCh <- txTask // Needs to have outside of the lock } } -func (rw *ReconWorker1) runTxTask(txTask state.TxTask) { +func (rw *ReconWorker1) runTxTask(txTask *state.TxTask) { rw.lock.Lock() defer rw.lock.Unlock() txTask.Error = nil @@ -115,74 +118,103 @@ func (rw *ReconWorker1) runTxTask(txTask state.TxTask) { rw.stateWriter.SetTxNum(txTask.TxNum) rw.stateReader.ResetReadSet() rw.stateWriter.ResetWriteSet() - rules := rw.chainConfig.Rules(txTask.BlockNum) ibs := state.New(rw.stateReader) daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1 var err error if txTask.BlockNum == 0 && txTask.TxIndex == -1 { - fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) + //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) // Genesis block _, ibs, err = rw.genesis.ToBlock() if err != nil { panic(err) } } else if daoForkTx { - fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txTask.TxNum, txTask.BlockNum) + //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txTask.TxNum, txTask.BlockNum) misc.ApplyDAOHardFork(ibs) ibs.SoftFinalise() } else if txTask.TxIndex == -1 { // Block initialisation } else if txTask.Final { if txTask.BlockNum > 0 { - fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) + //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) // End of block transaction in a block if _, _, err := rw.engine.Finalize(rw.chainConfig, txTask.Header, ibs, txTask.Block.Transactions(), txTask.Block.Uncles(), nil /* receipts */, nil, nil, nil); err != nil { panic(fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err)) } } } else { - fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) txHash := txTask.Tx.Hash() gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) - usedGas := new(uint64) vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) - vmConfig.SkipAnalysis = core.SkipAnalysis(rw.chainConfig, txTask.BlockNum) getHashFn := core.GetHashFn(txTask.Header, rw.getHeader) blockContext := core.NewEVMBlockContext(txTask.Header, getHashFn, rw.engine, nil /* author */, contractHasTEVM) - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, ibs, rw.chainConfig, vmConfig) - msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, rules) + msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, txTask.Rules) if err != nil { panic(err) } txContext := core.NewEVMTxContext(msg) - - // Update the evm with the new transaction context. - vmenv.Reset(txContext, ibs) - - result, err := core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) - if err != nil { + vmenv := vm.NewEVM(blockContext, txContext, ibs, rw.chainConfig, vmConfig) + if _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */); err != nil { txTask.Error = err + //fmt.Printf("error=%v\n", err) } // Update the state with pending changes ibs.SoftFinalise() - *usedGas += result.UsedGas } // Prepare read set, write set and balanceIncrease set and send for serialisation if txTask.Error == nil { txTask.BalanceIncreaseSet = ibs.BalanceIncreaseSet() - for addr, bal := range txTask.BalanceIncreaseSet { - fmt.Printf("[%x]=>[%d]\n", addr, &bal) - } - if err = ibs.MakeWriteSet(rules, rw.stateWriter); err != nil { + //for addr, bal := range txTask.BalanceIncreaseSet { + // fmt.Printf("[%x]=>[%d]\n", addr, &bal) + //} + if err = ibs.MakeWriteSet(txTask.Rules, rw.stateWriter); err != nil { panic(err) } - txTask.ReadKeys, txTask.ReadVals = rw.stateReader.ReadSet() - txTask.WriteKeys, txTask.WriteVals = rw.stateWriter.WriteSet() + txTask.ReadLists = rw.stateReader.ReadSet() + txTask.WriteLists = rw.stateWriter.WriteSet() + size := (20 + 32) * len(txTask.BalanceIncreaseSet) + for _, list := range txTask.ReadLists { + for _, b := range list.Keys { + size += len(b) + } + for _, b := range list.Vals { + size += len(b) + } + } + for _, list := range txTask.WriteLists { + for _, b := range list.Keys { + size += len(b) + } + for _, b := range list.Vals { + size += len(b) + } + } + txTask.ResultsSize = int64(size) + } +} + +func processResultQueue(rws *state.TxTaskQueue, outputTxNum *uint64, rs *state.ReconState1, applyTx kv.Tx, + triggerCount *uint64, outputBlockNum *uint64, repeatCount *uint64, resultsSize *int64) { + for rws.Len() > 0 && (*rws)[0].TxNum == *outputTxNum { + txTask := heap.Pop(rws).(state.TxTask) + atomic.AddInt64(resultsSize, -txTask.ResultsSize) + if txTask.Error == nil && rs.ReadsValid(txTask.ReadLists) { + if err := rs.Apply(txTask.Rules.IsSpuriousDragon, applyTx, txTask); err != nil { + panic(err) + } + *triggerCount += rs.CommitTxNum(txTask.Sender, txTask.TxNum) + *outputTxNum++ + *outputBlockNum = txTask.BlockNum + //fmt.Printf("Applied %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + } else { + rs.AddWork(txTask) + *repeatCount++ + //fmt.Printf("Rolled back %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + } } - rw.resultCh <- txTask } func Recon1(genesis *core.Genesis, logger log.Logger) error { @@ -204,7 +236,8 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { } else if err = os.RemoveAll(reconDbPath); err != nil { return err } - db, err := kv2.NewMDBX(logger).Path(reconDbPath).WriteMap().Open() + limiter := semaphore.NewWeighted(int64(runtime.NumCPU() + 1)) + db, err := kv2.NewMDBX(logger).Path(reconDbPath).RoTxsLimiter(limiter).Open() if err != nil { return err } @@ -236,9 +269,18 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { fmt.Printf("Corresponding block num = %d, txNum = %d\n", blockNum, txNum) workerCount := runtime.NumCPU() workCh := make(chan state.TxTask, 128) - rs := state.NewReconState1(workCh) + rs := state.NewReconState1() var lock sync.RWMutex reconWorkers := make([]*ReconWorker1, workerCount) + var applyTx kv.Tx + defer func() { + if applyTx != nil { + applyTx.Rollback() + } + }() + if applyTx, err = db.BeginRo(ctx); err != nil { + return err + } roTxs := make([]kv.Tx, workerCount) defer func() { for i := 0; i < workerCount; i++ { @@ -263,71 +305,118 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { for i := 0; i < workerCount; i++ { go reconWorkers[i].run() } - commitThreshold := uint64(256 * 1024 * 1024) + commitThreshold := uint64(1024 * 1024 * 1024) + resultsThreshold := int64(1024 * 1024 * 1024) count := uint64(0) - rollbackCount := uint64(0) + repeatCount := uint64(0) + triggerCount := uint64(0) total := txNum prevCount := uint64(0) - prevRollbackCount := uint64(0) + prevRepeatCount := uint64(0) + //prevTriggerCount := uint64(0) + resultsSize := int64(0) prevTime := time.Now() logEvery := time.NewTicker(logInterval) defer logEvery.Stop() var rws state.TxTaskQueue + var rwsLock sync.Mutex + rwsReceiveCond := sync.NewCond(&rwsLock) heap.Init(&rws) var outputTxNum uint64 + var inputBlockNum, outputBlockNum uint64 + var prevOutputBlockNum uint64 // Go-routine gathering results from the workers go func() { + defer rs.Finish() for outputTxNum < txNum { select { case txTask := <-resultCh: - if txTask.TxNum == outputTxNum { - // Try to apply without placing on the queue first - if txTask.Error == nil && rs.ReadsValid(txTask.ReadKeys, txTask.ReadVals) { - rs.Apply(txTask.WriteKeys, txTask.WriteVals, txTask.BalanceIncreaseSet) - rs.CommitTxNum(txTask.Sender, txTask.TxNum) - outputTxNum++ - } else { - rs.RollbackTx(txTask) - } - } else { + //fmt.Printf("Saved %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + func() { + rwsLock.Lock() + defer rwsLock.Unlock() + atomic.AddInt64(&resultsSize, txTask.ResultsSize) heap.Push(&rws, txTask) - } - for rws.Len() > 0 && rws[0].TxNum == outputTxNum { - txTask = heap.Pop(&rws).(state.TxTask) - if txTask.Error == nil && rs.ReadsValid(txTask.ReadKeys, txTask.ReadVals) { - rs.Apply(txTask.WriteKeys, txTask.WriteVals, txTask.BalanceIncreaseSet) - rs.CommitTxNum(txTask.Sender, txTask.TxNum) - outputTxNum++ - } else { - rs.RollbackTx(txTask) - } - } + processResultQueue(&rws, &outputTxNum, rs, applyTx, &triggerCount, &outputBlockNum, &repeatCount, &resultsSize) + rwsReceiveCond.Signal() + }() case <-logEvery.C: var m runtime.MemStats libcommon.ReadMemStats(&m) sizeEstimate := rs.SizeEstimate() count = rs.DoneCount() - rollbackCount = rs.RollbackCount() currentTime := time.Now() interval := currentTime.Sub(prevTime) speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) + speedBlock := float64(outputBlockNum-prevOutputBlockNum) / (float64(interval) / float64(time.Second)) progress := 100.0 * float64(count) / float64(total) var repeatRatio float64 if count > prevCount { - repeatRatio = 100.0 * float64(rollbackCount-prevRollbackCount) / float64(count-prevCount) + repeatRatio = 100.0 * float64(repeatCount-prevRepeatCount) / float64(count-prevCount) } - prevTime = currentTime - prevCount = count - prevRollbackCount = rollbackCount - log.Info("Transaction replay", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", progress), "tx/s", fmt.Sprintf("%.1f", speedTx), "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "buffer", libcommon.ByteCount(sizeEstimate), + log.Info("Transaction replay", + //"workers", workerCount, + "at block", outputBlockNum, + "input block", atomic.LoadUint64(&inputBlockNum), + "progress", fmt.Sprintf("%.2f%%", progress), + "blk/s", fmt.Sprintf("%.1f", speedBlock), + "tx/s", fmt.Sprintf("%.1f", speedTx), + //"repeats", repeatCount-prevRepeatCount, + //"triggered", triggerCount-prevTriggerCount, + "result queue", rws.Len(), + "results size", libcommon.ByteCount(uint64(atomic.LoadInt64(&resultsSize))), + "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), + "buffer", libcommon.ByteCount(sizeEstimate), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), ) + prevTime = currentTime + prevCount = count + prevOutputBlockNum = outputBlockNum + prevRepeatCount = repeatCount + //prevTriggerCount = triggerCount if sizeEstimate >= commitThreshold { commitStart := time.Now() log.Info("Committing...") err := func() error { - lock.Lock() + rwsLock.Lock() + defer rwsLock.Unlock() + // Drain results (and process) channel because read sets do not carry over + for { + var drained bool + for !drained { + select { + case txTask := <-resultCh: + atomic.AddInt64(&resultsSize, txTask.ResultsSize) + heap.Push(&rws, txTask) + default: + drained = true + } + } + processResultQueue(&rws, &outputTxNum, rs, applyTx, &triggerCount, &outputBlockNum, &repeatCount, &resultsSize) + if rws.Len() == 0 { + break + } + } + rwsReceiveCond.Signal() + lock.Lock() // This is to prevent workers from starting work on any new txTask defer lock.Unlock() + // Drain results channel because read sets do not carry over + var drained bool + for !drained { + select { + case txTask := <-resultCh: + rs.AddWork(txTask) + default: + drained = true + } + } + // Drain results queue as well + for rws.Len() > 0 { + txTask := heap.Pop(&rws).(state.TxTask) + atomic.AddInt64(&resultsSize, -txTask.ResultsSize) + rs.AddWork(txTask) + } + applyTx.Rollback() for i := 0; i < workerCount; i++ { roTxs[i].Rollback() } @@ -341,6 +430,9 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { if err = rwTx.Commit(); err != nil { return err } + if applyTx, err = db.BeginRo(ctx); err != nil { + return err + } for i := 0; i < workerCount; i++ { if roTxs[i], err = db.BeginRo(ctx); err != nil { return err @@ -358,21 +450,32 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { } }() var inputTxNum uint64 + var header *types.Header for blockNum := uint64(0); blockNum <= block; blockNum++ { - header, err := blockReader.HeaderByNumber(ctx, nil, blockNum) - if err != nil { - panic(err) + atomic.StoreUint64(&inputBlockNum, blockNum) + rules := chainConfig.Rules(blockNum) + if header, err = blockReader.HeaderByNumber(ctx, nil, blockNum); err != nil { + return err } blockHash := header.Hash() b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, blockNum) if err != nil { - panic(err) + return err } txs := b.Transactions() for txIndex := -1; txIndex <= len(txs); txIndex++ { + // Do not oversend, wait for the result heap to go under certain size + func() { + rwsLock.Lock() + defer rwsLock.Unlock() + for rws.Len() > 128 || atomic.LoadInt64(&resultsSize) >= resultsThreshold || rs.SizeEstimate() >= commitThreshold { + rwsReceiveCond.Wait() + } + }() txTask := state.TxTask{ Header: header, BlockNum: blockNum, + Rules: rules, Block: b, TxNum: inputTxNum, TxIndex: txIndex, @@ -384,13 +487,18 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { if sender, ok := txs[txIndex].GetSender(); ok { txTask.Sender = &sender } + if ok := rs.RegisterSender(txTask); ok { + rs.AddWork(txTask) + } + } else { + rs.AddWork(txTask) } - workCh <- txTask inputTxNum++ } } close(workCh) wg.Wait() + applyTx.Rollback() for i := 0; i < workerCount; i++ { roTxs[i].Rollback() } @@ -424,11 +532,15 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { if rwTx, err = db.BeginRw(ctx); err != nil { return err } - if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { + var rootHash common.Hash + if rootHash, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { return err } if err = rwTx.Commit(); err != nil { return err } + if rootHash != header.Root { + log.Error("Incorrect root hash", "expected", fmt.Sprintf("%x", header.Root)) + } return nil } diff --git a/core/state/history_reader_22.go b/core/state/history_reader_22.go index be89d3bf43b..d0844f868de 100644 --- a/core/state/history_reader_22.go +++ b/core/state/history_reader_22.go @@ -21,14 +21,14 @@ func bytesToUint64(buf []byte) (x uint64) { // Implements StateReader and StateWriter type HistoryReader22 struct { - a *libstate.Aggregator + ac *libstate.AggregatorContext ri *libstate.ReadIndices txNum uint64 trace bool } -func NewHistoryReader22(a *libstate.Aggregator, ri *libstate.ReadIndices) *HistoryReader22 { - return &HistoryReader22{a: a, ri: ri} +func NewHistoryReader22(ac *libstate.AggregatorContext, ri *libstate.ReadIndices) *HistoryReader22 { + return &HistoryReader22{ac: ac, ri: ri} } func (hr *HistoryReader22) SetTx(tx kv.RwTx) { @@ -37,7 +37,6 @@ func (hr *HistoryReader22) SetTx(tx kv.RwTx) { func (hr *HistoryReader22) SetTxNum(txNum uint64) { hr.txNum = txNum - hr.a.SetTxNum(txNum) if hr.ri != nil { hr.ri.SetTxNum(txNum) } @@ -57,7 +56,7 @@ func (hr *HistoryReader22) ReadAccountData(address common.Address) (*accounts.Ac return nil, err } } - enc, err := hr.a.ReadAccountDataBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + enc, err := hr.ac.ReadAccountDataBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err } @@ -108,7 +107,7 @@ func (hr *HistoryReader22) ReadAccountStorage(address common.Address, incarnatio return nil, err } } - enc, err := hr.a.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hr.txNum, nil /* roTx */) + enc, err := hr.ac.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err } @@ -131,7 +130,7 @@ func (hr *HistoryReader22) ReadAccountCode(address common.Address, incarnation u return nil, err } } - enc, err := hr.a.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + enc, err := hr.ac.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err } @@ -147,7 +146,7 @@ func (hr *HistoryReader22) ReadAccountCodeSize(address common.Address, incarnati return 0, err } } - size, err := hr.a.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + size, err := hr.ac.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return 0, err } diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index 4c8dc91db7b..ba4859c9103 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -172,7 +172,7 @@ func (sdb *IntraBlockState) AddRefund(gas uint64) { func (sdb *IntraBlockState) SubRefund(gas uint64) { sdb.journal.append(refundChange{prev: sdb.refund}) if gas > sdb.refund { - panic("Refund counter below zero") + sdb.setErrorUnsafe(fmt.Errorf("Refund counter below zero")) } sdb.refund -= gas } diff --git a/core/state/recon_state_1.go b/core/state/recon_state_1.go index 3487a7e4ab8..7572a3a61cf 100644 --- a/core/state/recon_state_1.go +++ b/core/state/recon_state_1.go @@ -5,14 +5,19 @@ import ( "container/heap" "encoding/binary" "fmt" + "sort" "sync" + "unsafe" + "github.com/google/btree" "github.com/holiman/uint256" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/params" ) // ReadWriteSet contains ReadSet, WriteSet and BalanceIncrease of a transaction, @@ -21,6 +26,7 @@ import ( type TxTask struct { TxNum uint64 BlockNum uint64 + Rules *params.Rules Header *types.Header Block *types.Block BlockHash common.Hash @@ -29,10 +35,9 @@ type TxTask struct { Final bool Tx types.Transaction BalanceIncreaseSet map[common.Address]uint256.Int - ReadKeys map[string][][]byte - ReadVals map[string][][]byte - WriteKeys map[string][][]byte - WriteVals map[string][][]byte + ReadLists map[string]*KvList + WriteLists map[string]*KvList + ResultsSize int64 Error error } @@ -60,48 +65,50 @@ func (h *TxTaskQueue) Pop() interface{} { return c[len(c)-1] } +const CodeSizeTable = "CodeSize" + type ReconState1 struct { - lock sync.RWMutex - triggers map[uint64]TxTask - senderTxNums map[common.Address]uint64 - workCh chan TxTask - queue TxTaskQueue - changes map[string]map[string][]byte - sizeEstimate uint64 - rollbackCount uint64 - txsDone uint64 -} - -func NewReconState1(workCh chan TxTask) *ReconState1 { + lock sync.RWMutex + receiveWork *sync.Cond + triggers map[uint64]TxTask + senderTxNums map[common.Address]uint64 + triggerLock sync.RWMutex + queue TxTaskQueue + queueLock sync.Mutex + changes map[string]*btree.BTreeG[ReconStateItem1] + sizeEstimate uint64 + txsDone uint64 + finished bool +} + +type ReconStateItem1 struct { + key []byte + val []byte +} + +func reconStateItem1Less(i, j ReconStateItem1) bool { + return bytes.Compare(i.key, j.key) < 0 +} + +func NewReconState1() *ReconState1 { rs := &ReconState1{ - workCh: workCh, triggers: map[uint64]TxTask{}, senderTxNums: map[common.Address]uint64{}, - changes: map[string]map[string][]byte{}, + changes: map[string]*btree.BTreeG[ReconStateItem1]{}, } + rs.receiveWork = sync.NewCond(&rs.queueLock) return rs } func (rs *ReconState1) put(table string, key, val []byte) { t, ok := rs.changes[table] if !ok { - t = map[string][]byte{} - rs.changes[table] = t - } - t[string(key)] = val - rs.sizeEstimate += uint64(len(key)) + uint64(len(val)) -} - -func (rs *ReconState1) Delete(table string, key []byte) { - rs.lock.Lock() - defer rs.lock.Unlock() - t, ok := rs.changes[table] - if !ok { - t = map[string][]byte{} + t = btree.NewG[ReconStateItem1](32, reconStateItem1Less) rs.changes[table] = t } - t[string(key)] = nil - rs.sizeEstimate += uint64(len(key)) + item := ReconStateItem1{key: libcommon.Copy(key), val: libcommon.Copy(val)} + t.ReplaceOrInsert(item) + rs.sizeEstimate += uint64(unsafe.Sizeof(item)) + uint64(len(key)) + uint64(len(val)) } func (rs *ReconState1) Get(table string, key []byte) []byte { @@ -115,40 +122,45 @@ func (rs *ReconState1) get(table string, key []byte) []byte { if !ok { return nil } - return t[string(key)] + if i, ok := t.Get(ReconStateItem1{key: key}); ok { + return i.val + } + return nil } func (rs *ReconState1) Flush(rwTx kv.RwTx) error { rs.lock.Lock() defer rs.lock.Unlock() for table, t := range rs.changes { - for ks, val := range t { - if len(val) > 0 { - if err := rwTx.Put(table, []byte(ks), val); err != nil { - return err + var err error + t.Ascend(func(item ReconStateItem1) bool { + if len(item.val) == 0 { + if err = rwTx.Delete(table, item.key, nil); err != nil { + return false + } + //fmt.Printf("Flush [%x]=>\n", ks) + } else { + if err = rwTx.Put(table, item.key, item.val); err != nil { + return false } + //fmt.Printf("Flush [%x]=>[%x]\n", ks, val) } + return true + }) + if err != nil { + return err } + t.Clear(true) } - rs.changes = map[string]map[string][]byte{} rs.sizeEstimate = 0 return nil } func (rs *ReconState1) Schedule() (TxTask, bool) { - rs.lock.Lock() - defer rs.lock.Unlock() - for rs.queue.Len() < 16 { - txTask, ok := <-rs.workCh - if !ok { - // No more work, channel is closed - break - } - if txTask.Sender == nil { - heap.Push(&rs.queue, txTask) - } else if rs.registerSender(txTask) { - heap.Push(&rs.queue, txTask) - } + rs.queueLock.Lock() + defer rs.queueLock.Unlock() + for !rs.finished && rs.queue.Len() == 0 { + rs.receiveWork.Wait() } if rs.queue.Len() > 0 { return heap.Pop(&rs.queue).(TxTask), true @@ -156,7 +168,9 @@ func (rs *ReconState1) Schedule() (TxTask, bool) { return TxTask{}, false } -func (rs *ReconState1) registerSender(txTask TxTask) bool { +func (rs *ReconState1) RegisterSender(txTask TxTask) bool { + rs.triggerLock.Lock() + defer rs.triggerLock.Unlock() lastTxNum, deferral := rs.senderTxNums[*txTask.Sender] if deferral { // Transactions with the same sender have obvious data dependency, no point running it before lastTxNum @@ -169,11 +183,16 @@ func (rs *ReconState1) registerSender(txTask TxTask) bool { return !deferral } -func (rs *ReconState1) CommitTxNum(sender *common.Address, txNum uint64) { - rs.lock.Lock() - defer rs.lock.Unlock() +func (rs *ReconState1) CommitTxNum(sender *common.Address, txNum uint64) uint64 { + rs.queueLock.Lock() + defer rs.queueLock.Unlock() + rs.triggerLock.Lock() + defer rs.triggerLock.Unlock() + count := uint64(0) if triggered, ok := rs.triggers[txNum]; ok { heap.Push(&rs.queue, triggered) + rs.receiveWork.Signal() + count++ delete(rs.triggers, txNum) } if sender != nil { @@ -183,37 +202,65 @@ func (rs *ReconState1) CommitTxNum(sender *common.Address, txNum uint64) { } } rs.txsDone++ + return count } -func (rs *ReconState1) RollbackTx(txTask TxTask) { - rs.lock.Lock() - defer rs.lock.Unlock() +func (rs *ReconState1) AddWork(txTask TxTask) { + txTask.BalanceIncreaseSet = nil + txTask.ReadLists = nil + txTask.WriteLists = nil + txTask.ResultsSize = 0 + rs.queueLock.Lock() + defer rs.queueLock.Unlock() heap.Push(&rs.queue, txTask) - rs.rollbackCount++ + rs.receiveWork.Signal() } -func (rs *ReconState1) Apply(writeKeys, writeVals map[string][][]byte, balanceIncreaseSet map[common.Address]uint256.Int) { +func (rs *ReconState1) Finish() { + rs.queueLock.Lock() + defer rs.queueLock.Unlock() + rs.finished = true + rs.receiveWork.Broadcast() +} + +func (rs *ReconState1) Apply(emptyRemoval bool, roTx kv.Tx, txTask TxTask) error { rs.lock.Lock() defer rs.lock.Unlock() - for table, keyList := range writeKeys { - valList := writeVals[table] - for i, key := range keyList { - val := valList[i] - rs.put(table, key, val) + if txTask.WriteLists != nil { + for table, list := range txTask.WriteLists { + for i, key := range list.Keys { + val := list.Vals[i] + rs.put(table, key, val) + } } } - for addr, increase := range balanceIncreaseSet { + for addr, increase := range txTask.BalanceIncreaseSet { + //if increase.IsZero() { + // continue + //} enc := rs.get(kv.PlainState, addr.Bytes()) + if enc == nil { + var err error + enc, err = roTx.GetOne(kv.PlainState, addr.Bytes()) + if err != nil { + return err + } + } var a accounts.Account if err := a.DecodeForStorage(enc); err != nil { - panic(err) + return err } a.Balance.Add(&a.Balance, &increase) - l := a.EncodingLengthForStorage() - enc = make([]byte, l) - a.EncodeForStorage(enc) + if emptyRemoval && a.Nonce == 0 && a.Balance.IsZero() && a.IsEmptyCodeHash() { + enc = []byte{} + } else { + l := a.EncodingLengthForStorage() + enc = make([]byte, l) + a.EncodeForStorage(enc) + } rs.put(kv.PlainState, addr.Bytes(), enc) } + return nil } func (rs *ReconState1) DoneCount() uint64 { @@ -222,49 +269,80 @@ func (rs *ReconState1) DoneCount() uint64 { return rs.txsDone } -func (rs *ReconState1) RollbackCount() uint64 { - rs.lock.RLock() - defer rs.lock.RUnlock() - return rs.rollbackCount -} - func (rs *ReconState1) SizeEstimate() uint64 { rs.lock.RLock() defer rs.lock.RUnlock() return rs.sizeEstimate } -func (rs *ReconState1) ReadsValid(readKeys, readVals map[string][][]byte) bool { +func (rs *ReconState1) ReadsValid(readLists map[string]*KvList) bool { rs.lock.RLock() defer rs.lock.RUnlock() - for table, keyList := range readKeys { - t, ok := rs.changes[table] + //fmt.Printf("ValidReads\n") + for table, list := range readLists { + //fmt.Printf("Table %s\n", table) + var t *btree.BTreeG[ReconStateItem1] + var ok bool + if table == CodeSizeTable { + t, ok = rs.changes[kv.Code] + } else { + t, ok = rs.changes[table] + } if !ok { continue } - valList := readVals[table] - for i, key := range keyList { - val := valList[i] - if rereadVal, ok := t[string(key)]; ok { - if !bytes.Equal(val, rereadVal) { + for i, key := range list.Keys { + val := list.Vals[i] + if item, ok := t.Get(ReconStateItem1{key: key}); ok { + //fmt.Printf("key [%x] => [%x] vs [%x]\n", key, val, rereadVal) + if table == CodeSizeTable { + if binary.BigEndian.Uint64(val) != uint64(len(item.val)) { + return false + } + } else if !bytes.Equal(val, item.val) { return false } + } else { + //fmt.Printf("key [%x] => [%x] not present in changes\n", key, val) } } } return true } +// KvList sort.Interface to sort write list by keys +type KvList struct { + Keys, Vals [][]byte +} + +func (l KvList) Len() int { + return len(l.Keys) +} + +func (l KvList) Less(i, j int) bool { + return bytes.Compare(l.Keys[i], l.Keys[j]) < 0 +} + +func (l *KvList) Swap(i, j int) { + l.Keys[i], l.Keys[j] = l.Keys[j], l.Keys[i] + l.Vals[i], l.Vals[j] = l.Vals[j], l.Vals[i] +} + type StateReconWriter1 struct { - rs *ReconState1 - txNum uint64 - writeKeys map[string][][]byte - writeVals map[string][][]byte + rs *ReconState1 + txNum uint64 + writeLists map[string]*KvList } func NewStateReconWriter1(rs *ReconState1) *StateReconWriter1 { return &StateReconWriter1{ rs: rs, + writeLists: map[string]*KvList{ + kv.PlainState: {}, + kv.Code: {}, + kv.PlainContractCode: {}, + kv.IncarnationMap: {}, + }, } } @@ -273,42 +351,49 @@ func (w *StateReconWriter1) SetTxNum(txNum uint64) { } func (w *StateReconWriter1) ResetWriteSet() { - w.writeKeys = map[string][][]byte{} - w.writeVals = map[string][][]byte{} + w.writeLists = map[string]*KvList{ + kv.PlainState: {}, + kv.Code: {}, + kv.PlainContractCode: {}, + kv.IncarnationMap: {}, + } } -func (w *StateReconWriter1) WriteSet() (map[string][][]byte, map[string][][]byte) { - return w.writeKeys, w.writeVals +func (w *StateReconWriter1) WriteSet() map[string]*KvList { + for _, list := range w.writeLists { + sort.Sort(list) + } + return w.writeLists } func (w *StateReconWriter1) UpdateAccountData(address common.Address, original, account *accounts.Account) error { value := make([]byte, account.EncodingLengthForStorage()) account.EncodeForStorage(value) //fmt.Printf("account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x} txNum: %d\n", address, &account.Balance, account.Nonce, account.Root, account.CodeHash, w.txNum) - w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], address.Bytes()) - w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], value) + w.writeLists[kv.PlainState].Keys = append(w.writeLists[kv.PlainState].Keys, address.Bytes()) + w.writeLists[kv.PlainState].Vals = append(w.writeLists[kv.PlainState].Vals, value) return nil } func (w *StateReconWriter1) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { - w.writeKeys[kv.Code] = append(w.writeKeys[kv.Code], codeHash.Bytes()) - w.writeVals[kv.Code] = append(w.writeVals[kv.Code], code) + w.writeLists[kv.Code].Keys = append(w.writeLists[kv.Code].Keys, codeHash.Bytes()) + w.writeLists[kv.Code].Vals = append(w.writeLists[kv.Code].Vals, code) if len(code) > 0 { //fmt.Printf("code [%x] => [%x] CodeHash: %x, txNum: %d\n", address, code, codeHash, w.txNum) - w.writeKeys[kv.PlainContractCode] = append(w.writeKeys[kv.PlainContractCode], dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) - w.writeVals[kv.PlainContractCode] = append(w.writeVals[kv.PlainContractCode], codeHash.Bytes()) + w.writeLists[kv.PlainContractCode].Keys = append(w.writeLists[kv.PlainContractCode].Keys, dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) + w.writeLists[kv.PlainContractCode].Vals = append(w.writeLists[kv.PlainContractCode].Vals, codeHash.Bytes()) } return nil } func (w *StateReconWriter1) DeleteAccount(address common.Address, original *accounts.Account) error { - w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], address.Bytes()) - w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], nil) + w.writeLists[kv.PlainState].Keys = append(w.writeLists[kv.PlainState].Keys, address.Bytes()) + w.writeLists[kv.PlainState].Vals = append(w.writeLists[kv.PlainState].Vals, []byte{}) if original.Incarnation > 0 { var b [8]byte binary.BigEndian.PutUint64(b[:], original.Incarnation) - w.writeKeys[kv.IncarnationMap] = append(w.writeKeys[kv.IncarnationMap], address.Bytes()) - w.writeVals[kv.IncarnationMap] = append(w.writeVals[kv.IncarnationMap], b[:]) + w.writeLists[kv.IncarnationMap].Keys = append(w.writeLists[kv.IncarnationMap].Keys, address.Bytes()) + w.writeLists[kv.IncarnationMap].Vals = append(w.writeLists[kv.IncarnationMap].Vals, b[:]) } return nil } @@ -317,8 +402,8 @@ func (w *StateReconWriter1) WriteAccountStorage(address common.Address, incarnat if *original == *value { return nil } - w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), incarnation, key.Bytes())) - w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], value.Bytes()) + w.writeLists[kv.PlainState].Keys = append(w.writeLists[kv.PlainState].Keys, dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), incarnation, key.Bytes())) + w.writeLists[kv.PlainState].Vals = append(w.writeLists[kv.PlainState].Vals, value.Bytes()) //fmt.Printf("storage [%x] [%x] => [%x], txNum: %d\n", address, *key, v, w.txNum) return nil } @@ -335,12 +420,19 @@ type StateReconReader1 struct { readError bool stateTxNum uint64 composite []byte - readKeys map[string][][]byte - readVals map[string][][]byte + readLists map[string]*KvList } func NewStateReconReader1(rs *ReconState1) *StateReconReader1 { - return &StateReconReader1{rs: rs} + return &StateReconReader1{ + rs: rs, + readLists: map[string]*KvList{ + kv.PlainState: {}, + kv.Code: {}, + CodeSizeTable: {}, + kv.IncarnationMap: {}, + }, + } } func (r *StateReconReader1) SetTxNum(txNum uint64) { @@ -352,12 +444,19 @@ func (r *StateReconReader1) SetTx(tx kv.Tx) { } func (r *StateReconReader1) ResetReadSet() { - r.readKeys = map[string][][]byte{} - r.readVals = map[string][][]byte{} + r.readLists = map[string]*KvList{ + kv.PlainState: {}, + kv.Code: {}, + CodeSizeTable: {}, + kv.IncarnationMap: {}, + } } -func (r *StateReconReader1) ReadSet() (map[string][][]byte, map[string][][]byte) { - return r.readKeys, r.readVals +func (r *StateReconReader1) ReadSet() map[string]*KvList { + for _, list := range r.readLists { + sort.Sort(list) + } + return r.readLists } func (r *StateReconReader1) SetTrace(trace bool) { @@ -373,8 +472,8 @@ func (r *StateReconReader1) ReadAccountData(address common.Address) (*accounts.A return nil, err } } - r.readKeys[kv.PlainState] = append(r.readKeys[kv.PlainState], address.Bytes()) - r.readVals[kv.PlainState] = append(r.readVals[kv.PlainState], enc) + r.readLists[kv.PlainState].Keys = append(r.readLists[kv.PlainState].Keys, address.Bytes()) + r.readLists[kv.PlainState].Vals = append(r.readLists[kv.PlainState].Vals, common.CopyBytes(enc)) if len(enc) == 0 { return nil, nil } @@ -406,8 +505,8 @@ func (r *StateReconReader1) ReadAccountStorage(address common.Address, incarnati return nil, err } } - r.readKeys[kv.PlainState] = append(r.readKeys[kv.PlainState], r.composite) - r.readVals[kv.PlainState] = append(r.readVals[kv.PlainState], enc) + r.readLists[kv.PlainState].Keys = append(r.readLists[kv.PlainState].Keys, common.CopyBytes(r.composite)) + r.readLists[kv.PlainState].Vals = append(r.readLists[kv.PlainState].Vals, common.CopyBytes(enc)) if r.trace { if enc == nil { fmt.Printf("ReadAccountStorage [%x] [%x] => [], txNum: %d\n", address, key.Bytes(), r.txNum) @@ -430,8 +529,8 @@ func (r *StateReconReader1) ReadAccountCode(address common.Address, incarnation return nil, err } } - r.readKeys[kv.Code] = append(r.readKeys[kv.Code], address.Bytes()) - r.readVals[kv.Code] = append(r.readVals[kv.Code], enc) + r.readLists[kv.Code].Keys = append(r.readLists[kv.Code].Keys, address.Bytes()) + r.readLists[kv.Code].Vals = append(r.readLists[kv.Code].Vals, common.CopyBytes(enc)) if r.trace { fmt.Printf("ReadAccountCode [%x] => [%x], txNum: %d\n", address, enc, r.txNum) } @@ -447,8 +546,10 @@ func (r *StateReconReader1) ReadAccountCodeSize(address common.Address, incarnat return 0, err } } - r.readKeys[kv.Code] = append(r.readKeys[kv.Code], address.Bytes()) - r.readVals[kv.Code] = append(r.readVals[kv.Code], enc) + var sizebuf [8]byte + binary.BigEndian.PutUint64(sizebuf[:], uint64(len(enc))) + r.readLists[CodeSizeTable].Keys = append(r.readLists[CodeSizeTable].Keys, address.Bytes()) + r.readLists[CodeSizeTable].Vals = append(r.readLists[CodeSizeTable].Vals, sizebuf[:]) size := len(enc) if r.trace { fmt.Printf("ReadAccountCodeSize [%x] => [%d], txNum: %d\n", address, size, r.txNum) @@ -465,8 +566,8 @@ func (r *StateReconReader1) ReadAccountIncarnation(address common.Address) (uint return 0, err } } - r.readKeys[kv.IncarnationMap] = append(r.readKeys[kv.IncarnationMap], address.Bytes()) - r.readVals[kv.IncarnationMap] = append(r.readVals[kv.IncarnationMap], enc) + r.readLists[kv.IncarnationMap].Keys = append(r.readLists[kv.IncarnationMap].Keys, address.Bytes()) + r.readLists[kv.IncarnationMap].Vals = append(r.readLists[kv.IncarnationMap].Vals, common.CopyBytes(enc)) if len(enc) == 0 { return 0, nil } diff --git a/go.mod b/go.mod index c733dbefb05..f24623b10a0 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e + github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 38d9a79d686..1ea3734179d 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e h1:4tZnz9FCTIalm6VtGXBZX713Y+lcHqpMK6L3wP7OSHY= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13 h1:GsmPUJO6xeifKSxxnG+BUwGEFggljkchaYm/HomvIQs= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 4c0ab19bc672acfbe97f903abf961d4ccd2cdc47 Mon Sep 17 00:00:00 2001 From: banteg <4562643+banteg@users.noreply.github.com> Date: Sun, 24 Jul 2022 09:21:31 +0400 Subject: [PATCH 62/72] fix(vmtrace): return value pushed by smod (#4806) --- cmd/rpcdaemon/commands/trace_adhoc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index 2493edf0daa..c7f958e30f1 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -448,7 +448,7 @@ func (ot *OeTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost vm.ADD, vm.EXP, vm.CALLER, vm.SHA3, vm.SUB, vm.ADDRESS, vm.GAS, vm.MUL, vm.RETURNDATASIZE, vm.NOT, vm.SHR, vm.SHL, vm.EXTCODESIZE, vm.SLT, vm.OR, vm.NUMBER, vm.PC, vm.TIMESTAMP, vm.BALANCE, vm.SELFBALANCE, vm.MULMOD, vm.ADDMOD, vm.BASEFEE, vm.BLOCKHASH, vm.BYTE, vm.XOR, vm.ORIGIN, vm.CODESIZE, vm.MOD, vm.SIGNEXTEND, vm.GASLIMIT, vm.DIFFICULTY, vm.SGT, vm.GASPRICE, - vm.MSIZE, vm.EXTCODEHASH: + vm.MSIZE, vm.EXTCODEHASH, vm.SMOD: showStack = 1 } for i := showStack - 1; i >= 0; i-- { From 1533bea3f68b357d35b904c2393fbb4c467158cf Mon Sep 17 00:00:00 2001 From: banteg <4562643+banteg@users.noreply.github.com> Date: Sun, 24 Jul 2022 11:56:37 +0400 Subject: [PATCH 63/72] fix(vmtrace): missing pushes (#4808) * fix(vmtrace): add chainid stack value * fix(vmtrace): add coinbase stack value --- cmd/rpcdaemon/commands/trace_adhoc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index c7f958e30f1..b180f26951f 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -448,7 +448,7 @@ func (ot *OeTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost vm.ADD, vm.EXP, vm.CALLER, vm.SHA3, vm.SUB, vm.ADDRESS, vm.GAS, vm.MUL, vm.RETURNDATASIZE, vm.NOT, vm.SHR, vm.SHL, vm.EXTCODESIZE, vm.SLT, vm.OR, vm.NUMBER, vm.PC, vm.TIMESTAMP, vm.BALANCE, vm.SELFBALANCE, vm.MULMOD, vm.ADDMOD, vm.BASEFEE, vm.BLOCKHASH, vm.BYTE, vm.XOR, vm.ORIGIN, vm.CODESIZE, vm.MOD, vm.SIGNEXTEND, vm.GASLIMIT, vm.DIFFICULTY, vm.SGT, vm.GASPRICE, - vm.MSIZE, vm.EXTCODEHASH, vm.SMOD: + vm.MSIZE, vm.EXTCODEHASH, vm.SMOD, vm.CHAINID, vm.COINBASE: showStack = 1 } for i := showStack - 1; i >= 0; i-- { From e85796a38c6f25943bb7969e32c68269c1dcb705 Mon Sep 17 00:00:00 2001 From: dmitriyselivanov Date: Sun, 24 Jul 2022 11:50:00 +0300 Subject: [PATCH 64/72] rpcdaemon: added test for eth_call in case of a pruned block (#4776) --- cmd/rpcdaemon/commands/eth_call_test.go | 118 ++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/cmd/rpcdaemon/commands/eth_call_test.go b/cmd/rpcdaemon/commands/eth_call_test.go index fc204365a4f..6812cca6889 100644 --- a/cmd/rpcdaemon/commands/eth_call_test.go +++ b/cmd/rpcdaemon/commands/eth_call_test.go @@ -3,14 +3,28 @@ package commands import ( "context" "fmt" + "math/big" "testing" + "time" + + "github.com/holiman/uint256" + "github.com/stretchr/testify/assert" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" @@ -50,6 +64,29 @@ func TestEthCallNonCanonical(t *testing.T) { } } +func TestEthCallToPrunedBlock(t *testing.T) { + pruneTo := uint64(3) + ethCallBlockNumber := rpc.BlockNumber(2) + + db, bankAddress, contractAddress := chainWithDeployedContract(t) + + prune(t, db, pruneTo) + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + + callData := hexutil.MustDecode("0x2e64cec1") + callDataBytes := hexutil.Bytes(callData) + + if _, err := api.Call(context.Background(), ethapi.CallArgs{ + From: &bankAddress, + To: &contractAddress, + Data: &callDataBytes, + }, rpc.BlockNumberOrHashWithNumber(ethCallBlockNumber), nil); err != nil { + t.Errorf("unexpected error: %v", err) + } +} + func TestGetBlockByTimestampLatestTime(t *testing.T) { ctx := context.Background() db := rpcdaemontest.CreateTestKV(t) @@ -261,3 +298,84 @@ func TestGetBlockByTimestamp(t *testing.T) { t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) } } + +func chainWithDeployedContract(t *testing.T) (kv.RwDB, common.Address, common.Address) { + var ( + signer = types.LatestSignerForChainID(nil) + bankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bankAddress = crypto.PubkeyToAddress(bankKey.PublicKey) + bankFunds = big.NewInt(1e9) + contract = hexutil.MustDecode("0x608060405234801561001057600080fd5b50610150806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80632e64cec11461003b5780636057361d14610059575b600080fd5b610043610075565b60405161005091906100d9565b60405180910390f35b610073600480360381019061006e919061009d565b61007e565b005b60008054905090565b8060008190555050565b60008135905061009781610103565b92915050565b6000602082840312156100b3576100b26100fe565b5b60006100c184828501610088565b91505092915050565b6100d3816100f4565b82525050565b60006020820190506100ee60008301846100ca565b92915050565b6000819050919050565b600080fd5b61010c816100f4565b811461011757600080fd5b5056fea26469706673582212209a159a4f3847890f10bfb87871a61eba91c5dbf5ee3cf6398207e292eee22a1664736f6c63430008070033") + gspec = &core.Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: core.GenesisAlloc{bankAddress: {Balance: bankFunds}}, + } + ) + m := stages.MockWithGenesis(t, gspec, bankKey) + db := m.DB + + var contractAddr common.Address + + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 2, func(i int, block *core.BlockGen) { + nonce := block.TxNonce(bankAddress) + switch i { + case 0: + tx, err := types.SignTx(types.NewContractCreation(nonce, new(uint256.Int), 1e6, new(uint256.Int), contract), *signer, bankKey) + assert.NoError(t, err) + block.AddTx(tx) + contractAddr = crypto.CreateAddress(bankAddress, nonce) + case 1: + txn, err := types.SignTx(types.NewTransaction(nonce, contractAddr, new(uint256.Int), 90000, new(uint256.Int), nil), *signer, bankKey) + assert.NoError(t, err) + block.AddTx(txn) + } + }, false /* intermediateHashes */) + if err != nil { + t.Fatalf("generate blocks: %v", err) + } + + err = m.InsertChain(chain) + assert.NoError(t, err) + + tx, err := db.BeginRo(context.Background()) + if err != nil { + t.Fatalf("read only db tx to read state: %v", err) + } + defer tx.Rollback() + + st := state.New(state.NewPlainState(tx, 1)) + assert.NoError(t, err) + assert.False(t, st.Exist(contractAddr), "Contract should not exist at block #1") + + st = state.New(state.NewPlainState(tx, 2)) + assert.NoError(t, err) + assert.True(t, st.Exist(contractAddr), "Contract should exist at block #2") + + return db, bankAddress, contractAddr +} + +func prune(t *testing.T, db kv.RwDB, pruneTo uint64) { + ctx := context.Background() + tx, err := db.BeginRw(ctx) + assert.NoError(t, err) + + logEvery := time.NewTicker(20 * time.Second) + + err = stagedsync.PruneTableDupSort(tx, kv.AccountChangeSet, "", pruneTo, logEvery, ctx) + assert.NoError(t, err) + + err = stagedsync.PruneTableDupSort(tx, kv.StorageChangeSet, "", pruneTo, logEvery, ctx) + assert.NoError(t, err) + + err = stagedsync.PruneTable(tx, kv.Receipts, pruneTo, ctx, math.MaxInt32) + assert.NoError(t, err) + + err = stagedsync.PruneTable(tx, kv.Log, pruneTo, ctx, math.MaxInt32) + assert.NoError(t, err) + + err = stagedsync.PruneTableDupSort(tx, kv.CallTraceSet, "", pruneTo, logEvery, ctx) + assert.NoError(t, err) + + err = tx.Commit() + assert.NoError(t, err) +} From 648175717590fd76fa10afbe8894f2f691f62a73 Mon Sep 17 00:00:00 2001 From: bgelb Date: Sun, 24 Jul 2022 01:50:43 -0700 Subject: [PATCH 65/72] fix regressions in trace_call and eth_createAccessList introduced by PR #3517 (#4807) --- cmd/rpcdaemon/commands/eth_call.go | 2 +- cmd/rpcdaemon/commands/trace_adhoc.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index 714c0ecc195..e32d7d814bb 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -320,7 +320,7 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, } stateReader = state.NewCachedReader2(cacheView, tx) } else { - stateReader = state.NewPlainState(tx, blockNumber) + stateReader = state.NewPlainState(tx, blockNumber+1) } header := block.Header() diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index b180f26951f..acb42089017 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -879,7 +879,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp } stateReader = state.NewCachedReader2(cacheView, tx) } else { - stateReader = state.NewPlainState(tx, blockNumber) + stateReader = state.NewPlainState(tx, blockNumber+1) } ibs := state.New(stateReader) From 6f53d1ef4d71d467c9bc641ff5bf5c2228154bf2 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sun, 24 Jul 2022 10:44:52 +0100 Subject: [PATCH 66/72] Fix test compilation error (#4809) Co-authored-by: Alexey Sharp --- cmd/rpcdaemon/commands/eth_call_test.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_call_test.go b/cmd/rpcdaemon/commands/eth_call_test.go index 6812cca6889..7f20bc8daca 100644 --- a/cmd/rpcdaemon/commands/eth_call_test.go +++ b/cmd/rpcdaemon/commands/eth_call_test.go @@ -22,7 +22,6 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" @@ -361,19 +360,19 @@ func prune(t *testing.T, db kv.RwDB, pruneTo uint64) { logEvery := time.NewTicker(20 * time.Second) - err = stagedsync.PruneTableDupSort(tx, kv.AccountChangeSet, "", pruneTo, logEvery, ctx) + err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, "", pruneTo, logEvery, ctx) assert.NoError(t, err) - err = stagedsync.PruneTableDupSort(tx, kv.StorageChangeSet, "", pruneTo, logEvery, ctx) + err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, "", pruneTo, logEvery, ctx) assert.NoError(t, err) - err = stagedsync.PruneTable(tx, kv.Receipts, pruneTo, ctx, math.MaxInt32) + err = rawdb.PruneTable(tx, kv.Receipts, pruneTo, ctx, math.MaxInt32) assert.NoError(t, err) - err = stagedsync.PruneTable(tx, kv.Log, pruneTo, ctx, math.MaxInt32) + err = rawdb.PruneTable(tx, kv.Log, pruneTo, ctx, math.MaxInt32) assert.NoError(t, err) - err = stagedsync.PruneTableDupSort(tx, kv.CallTraceSet, "", pruneTo, logEvery, ctx) + err = rawdb.PruneTableDupSort(tx, kv.CallTraceSet, "", pruneTo, logEvery, ctx) assert.NoError(t, err) err = tx.Commit() From a3727463974072dc8c09d599e2320f2e66982a22 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 24 Jul 2022 16:20:08 +0200 Subject: [PATCH 67/72] Move some checks out enginePayload and fcu (#4805) * made in refactoring * test Co-authored-by: giuliorebuffo --- ethdb/privateapi/engine_test.go | 2 +- ethdb/privateapi/ethbackend.go | 74 +++++++++++---------------------- 2 files changed, 26 insertions(+), 50 deletions(-) diff --git a/ethdb/privateapi/engine_test.go b/ethdb/privateapi/engine_test.go index 74b0eab1efa..499dd602890 100644 --- a/ethdb/privateapi/engine_test.go +++ b/ethdb/privateapi/engine_test.go @@ -230,7 +230,7 @@ func TestNoTTD(t *testing.T) { go func() { _, err = backend.EngineNewPayloadV1(ctx, &types2.ExecutionPayload{ ParentHash: gointerfaces.ConvertHashToH256(common.HexToHash("0x2")), - BlockHash: gointerfaces.ConvertHashToH256(common.HexToHash("0x3")), + BlockHash: gointerfaces.ConvertHashToH256(common.HexToHash("0xe6a580606b065e08034dcd6eea026cfdcbd3b41918d98b41cb9bf797d0c27033")), ReceiptRoot: gointerfaces.ConvertHashToH256(common.HexToHash("0x4")), StateRoot: gointerfaces.ConvertHashToH256(common.HexToHash("0x4")), PrevRandao: gointerfaces.ConvertHashToH256(common.HexToHash("0x0b3")), diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 6e1c22b2dfd..e9c2e448113 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -234,7 +234,7 @@ func (s *EthBackendServer) Block(ctx context.Context, req *remote.BlockRequest) func convertPayloadStatus(payloadStatus *engineapi.PayloadStatus) *remote.EnginePayloadStatus { reply := remote.EnginePayloadStatus{Status: payloadStatus.Status} - if payloadStatus.LatestValidHash != (common.Hash{}) { + if payloadStatus.Status != remote.EngineStatus_SYNCING { reply.LatestValidHash = gointerfaces.ConvertHashToH256(payloadStatus.LatestValidHash) } if payloadStatus.ValidationError != nil { @@ -262,11 +262,6 @@ func (s *EthBackendServer) stageLoopIsBusy() bool { // EngineNewPayloadV1 validates and possibly executes payload func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.ExecutionPayload) (*remote.EnginePayloadStatus, error) { - if s.config.TerminalTotalDifficulty == nil { - log.Error("[NewPayload] not a proof-of-stake chain") - return nil, fmt.Errorf("not a proof-of-stake chain") - } - var baseFee *big.Int eip1559 := false @@ -322,24 +317,6 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E } block := types.NewBlockFromStorage(blockHash, &header, transactions, nil) - tx, err := s.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - parentTd, err := rawdb.ReadTd(tx, header.ParentHash, req.BlockNumber-1) - if err != nil { - return nil, err - } - - tx.Rollback() - - if parentTd != nil && parentTd.Cmp(s.config.TerminalTotalDifficulty) < 0 { - log.Warn("[NewPayload] TTD not reached yet", "height", header.Number, "hash", common.Hash(blockHash)) - return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, nil - } - possibleStatus, err := s.getPayloadStatusFromHashIfPossible(blockHash, req.BlockNumber, header.ParentHash, true) if err != nil { return nil, err @@ -375,15 +352,22 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E // Check if we can make out a status from the payload hash/head hash. func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.Hash, blockNumber uint64, parentHash common.Hash, newPayload bool) (*engineapi.PayloadStatus, error) { - if s.hd == nil { - return nil, nil - } + // Determine which prefix to use for logs var prefix string if newPayload { prefix = "NewPayload" } else { prefix = "ForkChoiceUpdated" } + if s.config.TerminalTotalDifficulty == nil { + log.Error(fmt.Sprintf("[%s] not a proof-of-stake chain", prefix)) + return nil, fmt.Errorf("not a proof-of-stake chain") + } + + if s.hd == nil { + return nil, nil + } + tx, err := s.db.BeginRo(s.ctx) if err != nil { return nil, err @@ -394,13 +378,27 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H if err != nil { return nil, err } + // Retrieve parent and total difficulty. var parent *types.Header + var td *big.Int if newPayload { + // Obtain TD parent, err = rawdb.ReadHeaderByHash(tx, parentHash) + if err != nil { + return nil, err + } + td, err = rawdb.ReadTdByHash(tx, parentHash) + } else { + td, err = rawdb.ReadTdByHash(tx, blockHash) } if err != nil { return nil, err } + // Check if we already reached TTD. + if td != nil && td.Cmp(s.config.TerminalTotalDifficulty) < 0 { + log.Warn(fmt.Sprintf("[%s] TTD not reached yet", prefix), "hash", common.Hash(blockHash)) + return &engineapi.PayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: common.Hash{}}, nil + } var canonicalHash common.Hash if header != nil { @@ -523,34 +521,12 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E // EngineForkChoiceUpdatedV1 either states new block head or request the assembling of a new block func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *remote.EngineForkChoiceUpdatedRequest) (*remote.EngineForkChoiceUpdatedReply, error) { - if s.config.TerminalTotalDifficulty == nil { - return nil, fmt.Errorf("not a proof-of-stake chain") - } - forkChoice := engineapi.ForkChoiceMessage{ HeadBlockHash: gointerfaces.ConvertH256ToHash(req.ForkchoiceState.HeadBlockHash), SafeBlockHash: gointerfaces.ConvertH256ToHash(req.ForkchoiceState.SafeBlockHash), FinalizedBlockHash: gointerfaces.ConvertH256ToHash(req.ForkchoiceState.FinalizedBlockHash), } - tx1, err := s.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx1.Rollback() - - td, err := rawdb.ReadTdByHash(tx1, forkChoice.HeadBlockHash) - tx1.Rollback() - if err != nil { - return nil, err - } - if td != nil && td.Cmp(s.config.TerminalTotalDifficulty) < 0 { - log.Warn("[ForkChoiceUpdated] TTD not reached yet", "forkChoice", forkChoice) - return &remote.EngineForkChoiceUpdatedReply{ - PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, - }, nil - } - status, err := s.getPayloadStatusFromHashIfPossible(forkChoice.HeadBlockHash, 0, common.Hash{}, false) if err != nil { return nil, err From 7826a33b876d6681c4ae52f93e00d9b9cf7dc4e0 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 11:26:42 +0700 Subject: [PATCH 68/72] save (#4817)afix "grafana user format" #4817 --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 0c6cf567c46..1d5567d8817 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -77,7 +77,7 @@ services: grafana: image: grafana/grafana:9.0.3 - user: 472:0 # required for grafana version >= 7.3 + user: "472:0" # required for grafana version >= 7.3 ports: [ "3000:3000" ] volumes: - ${ERIGON_GRAFANA_CONFIG:-./cmd/prometheus/grafana.ini}:/etc/grafana/grafana.ini From b231856c1cb38a53efe7b1a957a7d137d2f691e4 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 11:29:34 +0700 Subject: [PATCH 69/72] avoid sudo in makefile #4818 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c666796d954..360f3d0bb0f 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,7 @@ xdg_data_home_subdirs = $(xdg_data_home)/erigon $(xdg_data_home)/erigon-grafana ## setup_xdg_data_home: TODO setup_xdg_data_home: mkdir -p $(xdg_data_home_subdirs) - ls -aln $(xdg_data_home) | grep -E "472.*0.*erigon-grafana" || sudo chown -R 472:0 $(xdg_data_home)/erigon-grafana + ls -aln $(xdg_data_home) | grep -E "472.*0.*erigon-grafana" || chown -R 472:0 $(xdg_data_home)/erigon-grafana @echo "✔️ xdg_data_home setup" @ls -al $(xdg_data_home) From 9e371fef5ce22c8be559c630cc3c415e89848db1 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 11:31:57 +0700 Subject: [PATCH 70/72] remove only etl-tmp content, but not dir itself #4816 --- eth/backend.go | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/eth/backend.go b/eth/backend.go index 89cd9332846..ef965703603 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -149,7 +149,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } tmpdir := stack.Config().Dirs.Tmp - if err := os.RemoveAll(tmpdir); err != nil { // clean it on startup + if err := RemoveContents(tmpdir); err != nil { // clean it on startup return nil, fmt.Errorf("clean tmp dir: %s, %w", tmpdir, err) } @@ -912,3 +912,23 @@ func (s *Ethereum) SentryCtx() context.Context { func (s *Ethereum) SentryControlServer() *sentry.MultiClient { return s.sentriesClient } + +// RemoveContents is like os.RemoveAll, but preserve dir itself +func RemoveContents(dir string) error { + d, err := os.Open(dir) + if err != nil { + return err + } + defer d.Close() + names, err := d.Readdirnames(-1) + if err != nil { + return err + } + for _, name := range names { + err = os.RemoveAll(filepath.Join(dir, name)) + if err != nil { + return err + } + } + return nil +} From b20f7ecdd12039bde65d596ed98f7113ee2b73fe Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 12:49:29 +0700 Subject: [PATCH 71/72] docker_hub_default_pid (#4819) --- hooks/build | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hooks/build b/hooks/build index c242d1ab85f..8598f66b14c 100755 --- a/hooks/build +++ b/hooks/build @@ -10,9 +10,9 @@ set -o pipefail # fail if anything in pipe fails # $(id -u) and $(id -g) will be 0 # # so we need to specify the erigon user uid/gid in the image -# choose 3473 matching defaults in .env.example +# choose 1000 matching defaults in .env.example DOCKER_FLAGS="-t ${IMAGE_NAME}" \ -DOCKER_UID=3473 \ -DOCKER_GID=3473 \ +DOCKER_UID=1000 \ +DOCKER_GID=1000 \ GIT_TAG=$(git describe --tags '--match=v*' --dirty) \ make docker From 6faf337b27bad35c0ccb323478567b6d502d12a8 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 13:06:56 +0700 Subject: [PATCH 72/72] pool: allow non-parsable txs in db, skip them with warning --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f24623b10a0..a3a7d1d77e7 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13 + github.com/ledgerwatch/erigon-lib v0.0.0-20220725060110-41265c634d13 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 1ea3734179d..aff84e1bf89 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13 h1:GsmPUJO6xeifKSxxnG+BUwGEFggljkchaYm/HomvIQs= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220725060110-41265c634d13 h1:wms8ybhc0kbaOro5eO0wj+yLVRj21W0ocszsusFV+lY= +github.com/ledgerwatch/erigon-lib v0.0.0-20220725060110-41265c634d13/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc=