From 9f2f0cf6ffa3100200084db98e566333404c71aa Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Thu, 19 Sep 2024 18:12:01 +0300 Subject: [PATCH 01/30] versioning for consensus with and without equivalent messages --- consensus/spos/bls/v1/blsSubroundsFactory.go | 298 +++ .../spos/bls/v1/blsSubroundsFactory_test.go | 631 ++++++ consensus/spos/bls/{ => v1}/blsWorker.go | 2 +- consensus/spos/bls/v1/blsWorker_test.go | 417 ++++ consensus/spos/bls/{ => v1}/constants.go | 5 +- consensus/spos/bls/{ => v1}/errors.go | 2 +- consensus/spos/bls/v1/export_test.go | 359 ++++ consensus/spos/bls/v1/subroundBlock.go | 686 +++++++ consensus/spos/bls/v1/subroundBlock_test.go | 1125 +++++++++++ consensus/spos/bls/v1/subroundEndRound.go | 942 +++++++++ .../spos/bls/v1/subroundEndRound_test.go | 1769 +++++++++++++++++ consensus/spos/bls/v1/subroundSignature.go | 409 ++++ .../spos/bls/v1/subroundSignature_test.go | 776 ++++++++ consensus/spos/bls/v1/subroundStartRound.go | 374 ++++ .../spos/bls/v1/subroundStartRound_test.go | 835 ++++++++ consensus/spos/bls/{ => v2}/benchmark_test.go | 2 +- .../benchmark_verify_signatures_test.go | 2 +- .../spos/bls/{ => v2}/blsSubroundsFactory.go | 2 +- .../bls/{ => v2}/blsSubroundsFactory_test.go | 2 +- consensus/spos/bls/v2/blsWorker.go | 163 ++ consensus/spos/bls/{ => v2}/blsWorker_test.go | 2 +- consensus/spos/bls/v2/constants.go | 126 ++ consensus/spos/bls/v2/errors.go | 6 + consensus/spos/bls/{ => v2}/export_test.go | 2 +- consensus/spos/bls/{ => v2}/subroundBlock.go | 3 +- .../spos/bls/{ => v2}/subroundBlock_test.go | 2 +- .../spos/bls/{ => v2}/subroundEndRound.go | 2 +- .../bls/{ => v2}/subroundEndRound_test.go | 2 +- .../spos/bls/{ => v2}/subroundSignature.go | 2 +- .../bls/{ => v2}/subroundSignature_test.go | 2 +- .../spos/bls/{ => v2}/subroundStartRound.go | 2 +- .../bls/{ => v2}/subroundStartRound_test.go | 2 +- 32 files changed, 8936 insertions(+), 18 deletions(-) create mode 100644 consensus/spos/bls/v1/blsSubroundsFactory.go create mode 100644 consensus/spos/bls/v1/blsSubroundsFactory_test.go rename consensus/spos/bls/{ => v1}/blsWorker.go (99%) create mode 100644 consensus/spos/bls/v1/blsWorker_test.go rename consensus/spos/bls/{ => v1}/constants.go (99%) rename consensus/spos/bls/{ => v1}/errors.go (93%) create mode 100644 consensus/spos/bls/v1/export_test.go create mode 100644 consensus/spos/bls/v1/subroundBlock.go create mode 100644 consensus/spos/bls/v1/subroundBlock_test.go create mode 100644 consensus/spos/bls/v1/subroundEndRound.go create mode 100644 consensus/spos/bls/v1/subroundEndRound_test.go create mode 100644 consensus/spos/bls/v1/subroundSignature.go create mode 100644 consensus/spos/bls/v1/subroundSignature_test.go create mode 100644 consensus/spos/bls/v1/subroundStartRound.go create mode 100644 consensus/spos/bls/v1/subroundStartRound_test.go rename consensus/spos/bls/{ => v2}/benchmark_test.go (99%) rename consensus/spos/bls/{ => v2}/benchmark_verify_signatures_test.go (99%) rename consensus/spos/bls/{ => v2}/blsSubroundsFactory.go (99%) rename consensus/spos/bls/{ => v2}/blsSubroundsFactory_test.go (99%) create mode 100644 consensus/spos/bls/v2/blsWorker.go rename consensus/spos/bls/{ => v2}/blsWorker_test.go (99%) create mode 100644 consensus/spos/bls/v2/constants.go create mode 100644 consensus/spos/bls/v2/errors.go rename consensus/spos/bls/{ => v2}/export_test.go (99%) rename consensus/spos/bls/{ => v2}/subroundBlock.go (99%) rename consensus/spos/bls/{ => v2}/subroundBlock_test.go (99%) rename consensus/spos/bls/{ => v2}/subroundEndRound.go (99%) rename consensus/spos/bls/{ => v2}/subroundEndRound_test.go (99%) rename consensus/spos/bls/{ => v2}/subroundSignature.go (99%) rename consensus/spos/bls/{ => v2}/subroundSignature_test.go (99%) rename consensus/spos/bls/{ => v2}/subroundStartRound.go (99%) rename consensus/spos/bls/{ => v2}/subroundStartRound_test.go (99%) diff --git a/consensus/spos/bls/v1/blsSubroundsFactory.go b/consensus/spos/bls/v1/blsSubroundsFactory.go new file mode 100644 index 00000000000..8f6f7c1822d --- /dev/null +++ b/consensus/spos/bls/v1/blsSubroundsFactory.go @@ -0,0 +1,298 @@ +package v1 + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/outport" +) + +// factory defines the data needed by this factory to create all the subrounds and give them their specific +// functionality +type factory struct { + consensusCore spos.ConsensusCoreHandler + consensusState *spos.ConsensusState + worker spos.WorkerHandler + + appStatusHandler core.AppStatusHandler + outportHandler outport.OutportHandler + sentSignaturesTracker spos.SentSignaturesTracker + chainID []byte + currentPid core.PeerID +} + +// NewSubroundsFactory creates a new consensusState object +func NewSubroundsFactory( + consensusDataContainer spos.ConsensusCoreHandler, + consensusState *spos.ConsensusState, + worker spos.WorkerHandler, + chainID []byte, + currentPid core.PeerID, + appStatusHandler core.AppStatusHandler, + sentSignaturesTracker spos.SentSignaturesTracker, +) (*factory, error) { + err := checkNewFactoryParams( + consensusDataContainer, + consensusState, + worker, + chainID, + appStatusHandler, + sentSignaturesTracker, + ) + if err != nil { + return nil, err + } + + fct := factory{ + consensusCore: consensusDataContainer, + consensusState: consensusState, + worker: worker, + appStatusHandler: appStatusHandler, + chainID: chainID, + currentPid: currentPid, + sentSignaturesTracker: sentSignaturesTracker, + } + + return &fct, nil +} + +func checkNewFactoryParams( + container spos.ConsensusCoreHandler, + state *spos.ConsensusState, + worker spos.WorkerHandler, + chainID []byte, + appStatusHandler core.AppStatusHandler, + sentSignaturesTracker spos.SentSignaturesTracker, +) error { + err := spos.ValidateConsensusCore(container) + if err != nil { + return err + } + if state == nil { + return spos.ErrNilConsensusState + } + if check.IfNil(worker) { + return spos.ErrNilWorker + } + if check.IfNil(appStatusHandler) { + return spos.ErrNilAppStatusHandler + } + if check.IfNil(sentSignaturesTracker) { + return ErrNilSentSignatureTracker + } + if len(chainID) == 0 { + return spos.ErrInvalidChainID + } + + return nil +} + +// SetOutportHandler method will update the value of the factory's outport +func (fct *factory) SetOutportHandler(driver outport.OutportHandler) { + fct.outportHandler = driver +} + +// GenerateSubrounds will generate the subrounds used in BLS Cns +func (fct *factory) GenerateSubrounds() error { + fct.initConsensusThreshold() + fct.consensusCore.Chronology().RemoveAllSubrounds() + fct.worker.RemoveAllReceivedMessagesCalls() + + err := fct.generateStartRoundSubround() + if err != nil { + return err + } + + err = fct.generateBlockSubround() + if err != nil { + return err + } + + err = fct.generateSignatureSubround() + if err != nil { + return err + } + + err = fct.generateEndRoundSubround() + if err != nil { + return err + } + + return nil +} + +func (fct *factory) getTimeDuration() time.Duration { + return fct.consensusCore.RoundHandler().TimeDuration() +} + +func (fct *factory) generateStartRoundSubround() error { + subround, err := spos.NewSubround( + -1, + SrStartRound, + SrBlock, + int64(float64(fct.getTimeDuration())*srStartStartTime), + int64(float64(fct.getTimeDuration())*srStartEndTime), + getSubroundName(SrStartRound), + fct.consensusState, + fct.worker.GetConsensusStateChangedChannel(), + fct.worker.ExecuteStoredMessages, + fct.consensusCore, + fct.chainID, + fct.currentPid, + fct.appStatusHandler, + ) + if err != nil { + return err + } + + subroundStartRoundInstance, err := NewSubroundStartRound( + subround, + fct.worker.Extend, + processingThresholdPercent, + fct.worker.ExecuteStoredMessages, + fct.worker.ResetConsensusMessages, + fct.sentSignaturesTracker, + ) + if err != nil { + return err + } + + err = subroundStartRoundInstance.SetOutportHandler(fct.outportHandler) + if err != nil { + return err + } + + fct.consensusCore.Chronology().AddSubround(subroundStartRoundInstance) + + return nil +} + +func (fct *factory) generateBlockSubround() error { + subround, err := spos.NewSubround( + SrStartRound, + SrBlock, + SrSignature, + int64(float64(fct.getTimeDuration())*srBlockStartTime), + int64(float64(fct.getTimeDuration())*srBlockEndTime), + getSubroundName(SrBlock), + fct.consensusState, + fct.worker.GetConsensusStateChangedChannel(), + fct.worker.ExecuteStoredMessages, + fct.consensusCore, + fct.chainID, + fct.currentPid, + fct.appStatusHandler, + ) + if err != nil { + return err + } + + subroundBlockInstance, err := NewSubroundBlock( + subround, + fct.worker.Extend, + processingThresholdPercent, + ) + if err != nil { + return err + } + + fct.worker.AddReceivedMessageCall(MtBlockBodyAndHeader, subroundBlockInstance.receivedBlockBodyAndHeader) + fct.worker.AddReceivedMessageCall(MtBlockBody, subroundBlockInstance.receivedBlockBody) + fct.worker.AddReceivedMessageCall(MtBlockHeader, subroundBlockInstance.receivedBlockHeader) + fct.consensusCore.Chronology().AddSubround(subroundBlockInstance) + + return nil +} + +func (fct *factory) generateSignatureSubround() error { + subround, err := spos.NewSubround( + SrBlock, + SrSignature, + SrEndRound, + int64(float64(fct.getTimeDuration())*srSignatureStartTime), + int64(float64(fct.getTimeDuration())*srSignatureEndTime), + getSubroundName(SrSignature), + fct.consensusState, + fct.worker.GetConsensusStateChangedChannel(), + fct.worker.ExecuteStoredMessages, + fct.consensusCore, + fct.chainID, + fct.currentPid, + fct.appStatusHandler, + ) + if err != nil { + return err + } + + subroundSignatureObject, err := NewSubroundSignature( + subround, + fct.worker.Extend, + fct.appStatusHandler, + fct.sentSignaturesTracker, + ) + if err != nil { + return err + } + + fct.worker.AddReceivedMessageCall(MtSignature, subroundSignatureObject.receivedSignature) + fct.consensusCore.Chronology().AddSubround(subroundSignatureObject) + + return nil +} + +func (fct *factory) generateEndRoundSubround() error { + subround, err := spos.NewSubround( + SrSignature, + SrEndRound, + -1, + int64(float64(fct.getTimeDuration())*srEndStartTime), + int64(float64(fct.getTimeDuration())*srEndEndTime), + getSubroundName(SrEndRound), + fct.consensusState, + fct.worker.GetConsensusStateChangedChannel(), + fct.worker.ExecuteStoredMessages, + fct.consensusCore, + fct.chainID, + fct.currentPid, + fct.appStatusHandler, + ) + if err != nil { + return err + } + + subroundEndRoundObject, err := NewSubroundEndRound( + subround, + fct.worker.Extend, + spos.MaxThresholdPercent, + fct.worker.DisplayStatistics, + fct.appStatusHandler, + fct.sentSignaturesTracker, + ) + if err != nil { + return err + } + + fct.worker.AddReceivedMessageCall(MtBlockHeaderFinalInfo, subroundEndRoundObject.receivedBlockHeaderFinalInfo) + fct.worker.AddReceivedMessageCall(MtInvalidSigners, subroundEndRoundObject.receivedInvalidSignersInfo) + fct.worker.AddReceivedHeaderHandler(subroundEndRoundObject.receivedHeader) + fct.consensusCore.Chronology().AddSubround(subroundEndRoundObject) + + return nil +} + +func (fct *factory) initConsensusThreshold() { + pBFTThreshold := core.GetPBFTThreshold(fct.consensusState.ConsensusGroupSize()) + pBFTFallbackThreshold := core.GetPBFTFallbackThreshold(fct.consensusState.ConsensusGroupSize()) + fct.consensusState.SetThreshold(SrBlock, 1) + fct.consensusState.SetThreshold(SrSignature, pBFTThreshold) + fct.consensusState.SetFallbackThreshold(SrBlock, 1) + fct.consensusState.SetFallbackThreshold(SrSignature, pBFTFallbackThreshold) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (fct *factory) IsInterfaceNil() bool { + return fct == nil +} diff --git a/consensus/spos/bls/v1/blsSubroundsFactory_test.go b/consensus/spos/bls/v1/blsSubroundsFactory_test.go new file mode 100644 index 00000000000..9a8acd85d67 --- /dev/null +++ b/consensus/spos/bls/v1/blsSubroundsFactory_test.go @@ -0,0 +1,631 @@ +package v1_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/assert" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/testscommon" + consensusMock "github.com/multiversx/mx-chain-go/testscommon/consensus" + testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +var chainID = []byte("chain ID") + +const currentPid = core.PeerID("pid") + +const roundTimeDuration = 100 * time.Millisecond + +func displayStatistics() { +} + +func extend(subroundId int) { + fmt.Println(subroundId) +} + +// executeStoredMessages tries to execute all the messages received which are valid for execution +func executeStoredMessages() { +} + +// resetConsensusMessages resets at the start of each round, all the previous consensus messages received +func resetConsensusMessages() { +} + +func initRoundHandlerMock() *mock.RoundHandlerMock { + return &mock.RoundHandlerMock{ + RoundIndex: 0, + TimeStampCalled: func() time.Time { + return time.Unix(0, 0) + }, + TimeDurationCalled: func() time.Duration { + return roundTimeDuration + }, + } +} + +func initWorker() spos.WorkerHandler { + sposWorker := &mock.SposWorkerMock{} + sposWorker.GetConsensusStateChangedChannelsCalled = func() chan bool { + return make(chan bool) + } + sposWorker.RemoveAllReceivedMessagesCallsCalled = func() {} + + sposWorker.AddReceivedMessageCallCalled = + func(messageType consensus.MessageType, receivedMessageCall func(ctx context.Context, cnsDta *consensus.Message) bool) { + } + + return sposWorker +} + +func initFactoryWithContainer(container *consensusMock.ConsensusCoreMock) v1.Factory { + worker := initWorker() + consensusState := initConsensusState() + + fct, _ := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + return fct +} + +func initFactory() v1.Factory { + container := consensusMock.InitConsensusCore() + return initFactoryWithContainer(container) +} + +func TestFactory_GetMessageTypeName(t *testing.T) { + t.Parallel() + + r := v1.GetStringValue(v1.MtBlockBodyAndHeader) + assert.Equal(t, "(BLOCK_BODY_AND_HEADER)", r) + + r = v1.GetStringValue(v1.MtBlockBody) + assert.Equal(t, "(BLOCK_BODY)", r) + + r = v1.GetStringValue(v1.MtBlockHeader) + assert.Equal(t, "(BLOCK_HEADER)", r) + + r = v1.GetStringValue(v1.MtSignature) + assert.Equal(t, "(SIGNATURE)", r) + + r = v1.GetStringValue(v1.MtBlockHeaderFinalInfo) + assert.Equal(t, "(FINAL_INFO)", r) + + r = v1.GetStringValue(v1.MtUnknown) + assert.Equal(t, "(UNKNOWN)", r) + + r = v1.GetStringValue(consensus.MessageType(-1)) + assert.Equal(t, "Undefined message type", r) +} + +func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + nil, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilConsensusCore, err) +} + +func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + container, + nil, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetBlockchain(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilBlockChain, err) +} + +func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetBlockProcessor(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilBlockProcessor, err) +} + +func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetBootStrapper(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilBootstrapper, err) +} + +func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetChronology(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilChronologyHandler, err) +} + +func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetHasher(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilHasher, err) +} + +func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetMarshalizer(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilMarshalizer, err) +} + +func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetMultiSignerContainer(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetRoundHandler(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetShardCoordinator(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilShardCoordinator, err) +} + +func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetSyncTimer(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetValidatorGroupSelector(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilNodesCoordinator, err) +} + +func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + nil, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilWorker, err) +} + +func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + nil, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilAppStatusHandler, err) +} + +func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, v1.ErrNilSentSignatureTracker, err) +} + +func TestFactory_NewFactoryShouldWork(t *testing.T) { + t.Parallel() + + fct := *initFactory() + + assert.False(t, check.IfNil(&fct)) +} + +func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + nil, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrInvalidChainID, err) +} + +func TestFactory_GenerateSubroundStartRoundShouldFailWhenNewSubroundFail(t *testing.T) { + t.Parallel() + + fct := *initFactory() + fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + return nil + } + + err := fct.GenerateStartRoundSubround() + + assert.Equal(t, spos.ErrNilChannel, err) +} + +func TestFactory_GenerateSubroundStartRoundShouldFailWhenNewSubroundStartRoundFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + container.SetSyncTimer(nil) + + err := fct.GenerateStartRoundSubround() + + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_GenerateSubroundBlockShouldFailWhenNewSubroundFail(t *testing.T) { + t.Parallel() + + fct := *initFactory() + fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + return nil + } + + err := fct.GenerateBlockSubround() + + assert.Equal(t, spos.ErrNilChannel, err) +} + +func TestFactory_GenerateSubroundBlockShouldFailWhenNewSubroundBlockFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + container.SetSyncTimer(nil) + + err := fct.GenerateBlockSubround() + + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_GenerateSubroundSignatureShouldFailWhenNewSubroundFail(t *testing.T) { + t.Parallel() + + fct := *initFactory() + fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + return nil + } + + err := fct.GenerateSignatureSubround() + + assert.Equal(t, spos.ErrNilChannel, err) +} + +func TestFactory_GenerateSubroundSignatureShouldFailWhenNewSubroundSignatureFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + container.SetSyncTimer(nil) + + err := fct.GenerateSignatureSubround() + + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_GenerateSubroundEndRoundShouldFailWhenNewSubroundFail(t *testing.T) { + t.Parallel() + + fct := *initFactory() + fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + return nil + } + + err := fct.GenerateEndRoundSubround() + + assert.Equal(t, spos.ErrNilChannel, err) +} + +func TestFactory_GenerateSubroundEndRoundShouldFailWhenNewSubroundEndRoundFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + container.SetSyncTimer(nil) + + err := fct.GenerateEndRoundSubround() + + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_GenerateSubroundsShouldWork(t *testing.T) { + t.Parallel() + + subroundHandlers := 0 + + chrm := &consensusMock.ChronologyHandlerMock{} + chrm.AddSubroundCalled = func(subroundHandler consensus.SubroundHandler) { + subroundHandlers++ + } + container := consensusMock.InitConsensusCore() + container.SetChronology(chrm) + fct := *initFactoryWithContainer(container) + fct.SetOutportHandler(&testscommonOutport.OutportStub{}) + + err := fct.GenerateSubrounds() + assert.Nil(t, err) + + assert.Equal(t, 4, subroundHandlers) +} + +func TestFactory_GenerateSubroundsNilOutportShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + + err := fct.GenerateSubrounds() + assert.Equal(t, outport.ErrNilDriver, err) +} + +func TestFactory_SetIndexerShouldWork(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + + outportHandler := &testscommonOutport.OutportStub{} + fct.SetOutportHandler(outportHandler) + + assert.Equal(t, outportHandler, fct.Outport()) +} diff --git a/consensus/spos/bls/blsWorker.go b/consensus/spos/bls/v1/blsWorker.go similarity index 99% rename from consensus/spos/bls/blsWorker.go rename to consensus/spos/bls/v1/blsWorker.go index 456d4e8b1d8..602ae0e8305 100644 --- a/consensus/spos/bls/blsWorker.go +++ b/consensus/spos/bls/v1/blsWorker.go @@ -1,4 +1,4 @@ -package bls +package v1 import ( "github.com/multiversx/mx-chain-go/consensus" diff --git a/consensus/spos/bls/v1/blsWorker_test.go b/consensus/spos/bls/v1/blsWorker_test.go new file mode 100644 index 00000000000..15e5f5b03cd --- /dev/null +++ b/consensus/spos/bls/v1/blsWorker_test.go @@ -0,0 +1,417 @@ +package v1_test + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/assert" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + "github.com/multiversx/mx-chain-go/testscommon" +) + +func createEligibleList(size int) []string { + eligibleList := make([]string, 0) + for i := 0; i < size; i++ { + eligibleList = append(eligibleList, string([]byte{byte(i + 65)})) + } + return eligibleList +} + +func initConsensusState() *spos.ConsensusState { + return initConsensusStateWithKeysHandler(&testscommon.KeysHandlerStub{}) +} + +func initConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos.ConsensusState { + consensusGroupSize := 9 + eligibleList := createEligibleList(consensusGroupSize) + + eligibleNodesPubKeys := make(map[string]struct{}) + for _, key := range eligibleList { + eligibleNodesPubKeys[key] = struct{}{} + } + + indexLeader := 1 + rcns, _ := spos.NewRoundConsensus( + eligibleNodesPubKeys, + consensusGroupSize, + eligibleList[indexLeader], + keysHandler, + ) + + rcns.SetConsensusGroup(eligibleList) + rcns.ResetRoundState() + + pBFTThreshold := consensusGroupSize*2/3 + 1 + pBFTFallbackThreshold := consensusGroupSize*1/2 + 1 + + rthr := spos.NewRoundThreshold() + rthr.SetThreshold(1, 1) + rthr.SetThreshold(2, pBFTThreshold) + rthr.SetFallbackThreshold(1, 1) + rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) + + rstatus := spos.NewRoundStatus() + rstatus.ResetRoundStatus() + + cns := spos.NewConsensusState( + rcns, + rthr, + rstatus, + ) + + cns.Data = []byte("X") + cns.RoundIndex = 0 + return cns +} + +func TestWorker_NewConsensusServiceShouldWork(t *testing.T) { + t.Parallel() + + service, err := v1.NewConsensusService() + assert.Nil(t, err) + assert.False(t, check.IfNil(service)) +} + +func TestWorker_InitReceivedMessagesShouldWork(t *testing.T) { + t.Parallel() + + bnService, _ := v1.NewConsensusService() + messages := bnService.InitReceivedMessages() + + receivedMessages := make(map[consensus.MessageType][]*consensus.Message) + receivedMessages[v1.MtBlockBodyAndHeader] = make([]*consensus.Message, 0) + receivedMessages[v1.MtBlockBody] = make([]*consensus.Message, 0) + receivedMessages[v1.MtBlockHeader] = make([]*consensus.Message, 0) + receivedMessages[v1.MtSignature] = make([]*consensus.Message, 0) + receivedMessages[v1.MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) + receivedMessages[v1.MtInvalidSigners] = make([]*consensus.Message, 0) + + assert.Equal(t, len(receivedMessages), len(messages)) + assert.NotNil(t, messages[v1.MtBlockBodyAndHeader]) + assert.NotNil(t, messages[v1.MtBlockBody]) + assert.NotNil(t, messages[v1.MtBlockHeader]) + assert.NotNil(t, messages[v1.MtSignature]) + assert.NotNil(t, messages[v1.MtBlockHeaderFinalInfo]) + assert.NotNil(t, messages[v1.MtInvalidSigners]) +} + +func TestWorker_GetMessageRangeShouldWork(t *testing.T) { + t.Parallel() + + v := make([]consensus.MessageType, 0) + blsService, _ := v1.NewConsensusService() + + messagesRange := blsService.GetMessageRange() + assert.NotNil(t, messagesRange) + + for i := v1.MtBlockBodyAndHeader; i <= v1.MtInvalidSigners; i++ { + v = append(v, i) + } + assert.NotNil(t, v) + + for i, val := range messagesRange { + assert.Equal(t, v[i], val) + } +} + +func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyAndHeaderShouldWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrStartRound, spos.SsFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtBlockBodyAndHeader) + assert.True(t, canProceed) +} + +func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyAndHeaderShouldNotWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrStartRound, spos.SsNotFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtBlockBodyAndHeader) + assert.False(t, canProceed) +} + +func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyShouldWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrStartRound, spos.SsFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtBlockBody) + assert.True(t, canProceed) +} + +func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyShouldNotWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrStartRound, spos.SsNotFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtBlockBody) + assert.False(t, canProceed) +} + +func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockHeaderShouldWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrStartRound, spos.SsFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtBlockHeader) + assert.True(t, canProceed) +} + +func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockHeaderShouldNotWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrStartRound, spos.SsNotFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtBlockHeader) + assert.False(t, canProceed) +} + +func TestWorker_CanProceedWithSrBlockFinishedForMtBlockHeaderShouldWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrBlock, spos.SsFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtSignature) + assert.True(t, canProceed) +} + +func TestWorker_CanProceedWithSrBlockRoundNotFinishedForMtBlockHeaderShouldNotWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrBlock, spos.SsNotFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtSignature) + assert.False(t, canProceed) +} + +func TestWorker_CanProceedWithSrSignatureFinishedForMtBlockHeaderFinalInfoShouldWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrSignature, spos.SsFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtBlockHeaderFinalInfo) + assert.True(t, canProceed) +} + +func TestWorker_CanProceedWithSrSignatureRoundNotFinishedForMtBlockHeaderFinalInfoShouldNotWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + + consensusState := initConsensusState() + consensusState.SetStatus(v1.SrSignature, spos.SsNotFinished) + + canProceed := blsService.CanProceed(consensusState, v1.MtBlockHeaderFinalInfo) + assert.False(t, canProceed) +} + +func TestWorker_CanProceedWitUnkownMessageTypeShouldNotWork(t *testing.T) { + t.Parallel() + + blsService, _ := v1.NewConsensusService() + consensusState := initConsensusState() + + canProceed := blsService.CanProceed(consensusState, -1) + assert.False(t, canProceed) +} + +func TestWorker_GetSubroundName(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + r := service.GetSubroundName(v1.SrStartRound) + assert.Equal(t, "(START_ROUND)", r) + r = service.GetSubroundName(v1.SrBlock) + assert.Equal(t, "(BLOCK)", r) + r = service.GetSubroundName(v1.SrSignature) + assert.Equal(t, "(SIGNATURE)", r) + r = service.GetSubroundName(v1.SrEndRound) + assert.Equal(t, "(END_ROUND)", r) + r = service.GetSubroundName(-1) + assert.Equal(t, "Undefined subround", r) +} + +func TestWorker_GetStringValue(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + r := service.GetStringValue(v1.MtBlockBodyAndHeader) + assert.Equal(t, v1.BlockBodyAndHeaderStringValue, r) + r = service.GetStringValue(v1.MtBlockBody) + assert.Equal(t, v1.BlockBodyStringValue, r) + r = service.GetStringValue(v1.MtBlockHeader) + assert.Equal(t, v1.BlockHeaderStringValue, r) + r = service.GetStringValue(v1.MtSignature) + assert.Equal(t, v1.BlockSignatureStringValue, r) + r = service.GetStringValue(v1.MtBlockHeaderFinalInfo) + assert.Equal(t, v1.BlockHeaderFinalInfoStringValue, r) + r = service.GetStringValue(v1.MtUnknown) + assert.Equal(t, v1.BlockUnknownStringValue, r) + r = service.GetStringValue(-1) + assert.Equal(t, v1.BlockDefaultStringValue, r) +} + +func TestWorker_IsMessageWithBlockBodyAndHeader(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsMessageWithBlockBodyAndHeader(v1.MtBlockBody) + assert.False(t, ret) + + ret = service.IsMessageWithBlockBodyAndHeader(v1.MtBlockHeader) + assert.False(t, ret) + + ret = service.IsMessageWithBlockBodyAndHeader(v1.MtBlockBodyAndHeader) + assert.True(t, ret) +} + +func TestWorker_IsMessageWithBlockBody(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsMessageWithBlockBody(v1.MtBlockHeader) + assert.False(t, ret) + + ret = service.IsMessageWithBlockBody(v1.MtBlockBody) + assert.True(t, ret) +} + +func TestWorker_IsMessageWithBlockHeader(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsMessageWithBlockHeader(v1.MtBlockBody) + assert.False(t, ret) + + ret = service.IsMessageWithBlockHeader(v1.MtBlockHeader) + assert.True(t, ret) +} + +func TestWorker_IsMessageWithSignature(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsMessageWithSignature(v1.MtBlockBodyAndHeader) + assert.False(t, ret) + + ret = service.IsMessageWithSignature(v1.MtSignature) + assert.True(t, ret) +} + +func TestWorker_IsMessageWithFinalInfo(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsMessageWithFinalInfo(v1.MtSignature) + assert.False(t, ret) + + ret = service.IsMessageWithFinalInfo(v1.MtBlockHeaderFinalInfo) + assert.True(t, ret) +} + +func TestWorker_IsMessageWithInvalidSigners(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsMessageWithInvalidSigners(v1.MtBlockHeaderFinalInfo) + assert.False(t, ret) + + ret = service.IsMessageWithInvalidSigners(v1.MtInvalidSigners) + assert.True(t, ret) +} + +func TestWorker_IsSubroundSignature(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsSubroundSignature(v1.SrEndRound) + assert.False(t, ret) + + ret = service.IsSubroundSignature(v1.SrSignature) + assert.True(t, ret) +} + +func TestWorker_IsSubroundStartRound(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsSubroundStartRound(v1.SrSignature) + assert.False(t, ret) + + ret = service.IsSubroundStartRound(v1.SrStartRound) + assert.True(t, ret) +} + +func TestWorker_IsMessageTypeValid(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + + ret := service.IsMessageTypeValid(v1.MtBlockBody) + assert.True(t, ret) + + ret = service.IsMessageTypeValid(666) + assert.False(t, ret) +} + +func TestWorker_GetMaxNumOfMessageTypeAccepted(t *testing.T) { + t.Parallel() + + service, _ := v1.NewConsensusService() + t.Run("message type signature", func(t *testing.T) { + t.Parallel() + + assert.Equal(t, v1.MaxNumOfMessageTypeSignatureAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtSignature)) + }) + t.Run("other message types", func(t *testing.T) { + t.Parallel() + + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtUnknown)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtBlockBody)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtBlockHeader)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtBlockBodyAndHeader)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtBlockHeaderFinalInfo)) + }) +} diff --git a/consensus/spos/bls/constants.go b/consensus/spos/bls/v1/constants.go similarity index 99% rename from consensus/spos/bls/constants.go rename to consensus/spos/bls/v1/constants.go index 166abe70b65..1b80740483f 100644 --- a/consensus/spos/bls/constants.go +++ b/consensus/spos/bls/v1/constants.go @@ -1,8 +1,9 @@ -package bls +package v1 import ( - "github.com/multiversx/mx-chain-go/consensus" logger "github.com/multiversx/mx-chain-logger-go" + + "github.com/multiversx/mx-chain-go/consensus" ) var log = logger.GetOrCreate("consensus/spos/bls") diff --git a/consensus/spos/bls/errors.go b/consensus/spos/bls/v1/errors.go similarity index 93% rename from consensus/spos/bls/errors.go rename to consensus/spos/bls/v1/errors.go index b840f9e2c85..05c55b9592c 100644 --- a/consensus/spos/bls/errors.go +++ b/consensus/spos/bls/v1/errors.go @@ -1,4 +1,4 @@ -package bls +package v1 import "errors" diff --git a/consensus/spos/bls/v1/export_test.go b/consensus/spos/bls/v1/export_test.go new file mode 100644 index 00000000000..2eedd84cd95 --- /dev/null +++ b/consensus/spos/bls/v1/export_test.go @@ -0,0 +1,359 @@ +package v1 + +import ( + "context" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + + cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/ntp" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +const ProcessingThresholdPercent = processingThresholdPercent +const DefaultMaxNumOfMessageTypeAccepted = defaultMaxNumOfMessageTypeAccepted +const MaxNumOfMessageTypeSignatureAccepted = maxNumOfMessageTypeSignatureAccepted + +// factory + +// Factory defines a type for the factory structure +type Factory *factory + +// BlockChain gets the chain handler object +func (fct *factory) BlockChain() data.ChainHandler { + return fct.consensusCore.Blockchain() +} + +// BlockProcessor gets the block processor object +func (fct *factory) BlockProcessor() process.BlockProcessor { + return fct.consensusCore.BlockProcessor() +} + +// Bootstrapper gets the bootstrapper object +func (fct *factory) Bootstrapper() process.Bootstrapper { + return fct.consensusCore.BootStrapper() +} + +// ChronologyHandler gets the chronology handler object +func (fct *factory) ChronologyHandler() consensus.ChronologyHandler { + return fct.consensusCore.Chronology() +} + +// ConsensusState gets the consensus state struct pointer +func (fct *factory) ConsensusState() *spos.ConsensusState { + return fct.consensusState +} + +// Hasher gets the hasher object +func (fct *factory) Hasher() hashing.Hasher { + return fct.consensusCore.Hasher() +} + +// Marshalizer gets the marshalizer object +func (fct *factory) Marshalizer() marshal.Marshalizer { + return fct.consensusCore.Marshalizer() +} + +// MultiSigner gets the multi signer object +func (fct *factory) MultiSignerContainer() cryptoCommon.MultiSignerContainer { + return fct.consensusCore.MultiSignerContainer() +} + +// RoundHandler gets the roundHandler object +func (fct *factory) RoundHandler() consensus.RoundHandler { + return fct.consensusCore.RoundHandler() +} + +// ShardCoordinator gets the shard coordinator object +func (fct *factory) ShardCoordinator() sharding.Coordinator { + return fct.consensusCore.ShardCoordinator() +} + +// SyncTimer gets the sync timer object +func (fct *factory) SyncTimer() ntp.SyncTimer { + return fct.consensusCore.SyncTimer() +} + +// NodesCoordinator gets the nodes coordinator object +func (fct *factory) NodesCoordinator() nodesCoordinator.NodesCoordinator { + return fct.consensusCore.NodesCoordinator() +} + +// Worker gets the worker object +func (fct *factory) Worker() spos.WorkerHandler { + return fct.worker +} + +// SetWorker sets the worker object +func (fct *factory) SetWorker(worker spos.WorkerHandler) { + fct.worker = worker +} + +// GenerateStartRoundSubround generates the instance of subround StartRound and added it to the chronology subrounds list +func (fct *factory) GenerateStartRoundSubround() error { + return fct.generateStartRoundSubround() +} + +// GenerateBlockSubround generates the instance of subround Block and added it to the chronology subrounds list +func (fct *factory) GenerateBlockSubround() error { + return fct.generateBlockSubround() +} + +// GenerateSignatureSubround generates the instance of subround Signature and added it to the chronology subrounds list +func (fct *factory) GenerateSignatureSubround() error { + return fct.generateSignatureSubround() +} + +// GenerateEndRoundSubround generates the instance of subround EndRound and added it to the chronology subrounds list +func (fct *factory) GenerateEndRoundSubround() error { + return fct.generateEndRoundSubround() +} + +// AppStatusHandler gets the app status handler object +func (fct *factory) AppStatusHandler() core.AppStatusHandler { + return fct.appStatusHandler +} + +// Outport gets the outport object +func (fct *factory) Outport() outport.OutportHandler { + return fct.outportHandler +} + +// subroundStartRound + +// SubroundStartRound defines a type for the subroundStartRound structure +type SubroundStartRound *subroundStartRound + +// DoStartRoundJob method does the job of the subround StartRound +func (sr *subroundStartRound) DoStartRoundJob() bool { + return sr.doStartRoundJob(context.Background()) +} + +// DoStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound +func (sr *subroundStartRound) DoStartRoundConsensusCheck() bool { + return sr.doStartRoundConsensusCheck() +} + +// GenerateNextConsensusGroup generates the next consensu group based on current (random seed, shard id and round) +func (sr *subroundStartRound) GenerateNextConsensusGroup(roundIndex int64) error { + return sr.generateNextConsensusGroup(roundIndex) +} + +// InitCurrentRound inits all the stuff needed in the current round +func (sr *subroundStartRound) InitCurrentRound() bool { + return sr.initCurrentRound() +} + +// GetSentSignatureTracker returns the subroundStartRound's SentSignaturesTracker instance +func (sr *subroundStartRound) GetSentSignatureTracker() spos.SentSignaturesTracker { + return sr.sentSignatureTracker +} + +// subroundBlock + +// SubroundBlock defines a type for the subroundBlock structure +type SubroundBlock *subroundBlock + +// Blockchain gets the ChainHandler stored in the ConsensusCore +func (sr *subroundBlock) BlockChain() data.ChainHandler { + return sr.Blockchain() +} + +// DoBlockJob method does the job of the subround Block +func (sr *subroundBlock) DoBlockJob() bool { + return sr.doBlockJob(context.Background()) +} + +// ProcessReceivedBlock method processes the received proposed block in the subround Block +func (sr *subroundBlock) ProcessReceivedBlock(cnsDta *consensus.Message) bool { + return sr.processReceivedBlock(context.Background(), cnsDta) +} + +// DoBlockConsensusCheck method checks if the consensus in the subround Block is achieved +func (sr *subroundBlock) DoBlockConsensusCheck() bool { + return sr.doBlockConsensusCheck() +} + +// IsBlockReceived method checks if the block was received from the leader in the current round +func (sr *subroundBlock) IsBlockReceived(threshold int) bool { + return sr.isBlockReceived(threshold) +} + +// CreateHeader method creates the proposed block header in the subround Block +func (sr *subroundBlock) CreateHeader() (data.HeaderHandler, error) { + return sr.createHeader() +} + +// CreateBody method creates the proposed block body in the subround Block +func (sr *subroundBlock) CreateBlock(hdr data.HeaderHandler) (data.HeaderHandler, data.BodyHandler, error) { + return sr.createBlock(hdr) +} + +// SendBlockBody method sends the proposed block body in the subround Block +func (sr *subroundBlock) SendBlockBody(body data.BodyHandler, marshalizedBody []byte) bool { + return sr.sendBlockBody(body, marshalizedBody) +} + +// SendBlockHeader method sends the proposed block header in the subround Block +func (sr *subroundBlock) SendBlockHeader(header data.HeaderHandler, marshalizedHeader []byte) bool { + return sr.sendBlockHeader(header, marshalizedHeader) +} + +// ComputeSubroundProcessingMetric computes processing metric related to the subround Block +func (sr *subroundBlock) ComputeSubroundProcessingMetric(startTime time.Time, metric string) { + sr.computeSubroundProcessingMetric(startTime, metric) +} + +// ReceivedBlockBody method is called when a block body is received through the block body channel +func (sr *subroundBlock) ReceivedBlockBody(cnsDta *consensus.Message) bool { + return sr.receivedBlockBody(context.Background(), cnsDta) +} + +// ReceivedBlockHeader method is called when a block header is received through the block header channel +func (sr *subroundBlock) ReceivedBlockHeader(cnsDta *consensus.Message) bool { + return sr.receivedBlockHeader(context.Background(), cnsDta) +} + +// ReceivedBlockBodyAndHeader is called when both a header and block body have been received +func (sr *subroundBlock) ReceivedBlockBodyAndHeader(cnsDta *consensus.Message) bool { + return sr.receivedBlockBodyAndHeader(context.Background(), cnsDta) +} + +// subroundSignature + +// SubroundSignature defines a type for the subroundSignature structure +type SubroundSignature *subroundSignature + +// DoSignatureJob method does the job of the subround Signature +func (sr *subroundSignature) DoSignatureJob() bool { + return sr.doSignatureJob(context.Background()) +} + +// ReceivedSignature method is called when a signature is received through the signature channel +func (sr *subroundSignature) ReceivedSignature(cnsDta *consensus.Message) bool { + return sr.receivedSignature(context.Background(), cnsDta) +} + +// DoSignatureConsensusCheck method checks if the consensus in the subround Signature is achieved +func (sr *subroundSignature) DoSignatureConsensusCheck() bool { + return sr.doSignatureConsensusCheck() +} + +// AreSignaturesCollected method checks if the number of signatures received from the nodes are more than the given threshold +func (sr *subroundSignature) AreSignaturesCollected(threshold int) (bool, int) { + return sr.areSignaturesCollected(threshold) +} + +// subroundEndRound + +// SubroundEndRound defines a type for the subroundEndRound structure +type SubroundEndRound *subroundEndRound + +// DoEndRoundJob method does the job of the subround EndRound +func (sr *subroundEndRound) DoEndRoundJob() bool { + return sr.doEndRoundJob(context.Background()) +} + +// DoEndRoundConsensusCheck method checks if the consensus is achieved +func (sr *subroundEndRound) DoEndRoundConsensusCheck() bool { + return sr.doEndRoundConsensusCheck() +} + +// CheckSignaturesValidity method checks the signature validity for the nodes included in bitmap +func (sr *subroundEndRound) CheckSignaturesValidity(bitmap []byte) error { + return sr.checkSignaturesValidity(bitmap) +} + +// DoEndRoundJobByParticipant calls the unexported doEndRoundJobByParticipant function +func (sr *subroundEndRound) DoEndRoundJobByParticipant(cnsDta *consensus.Message) bool { + return sr.doEndRoundJobByParticipant(cnsDta) +} + +// DoEndRoundJobByLeader calls the unexported doEndRoundJobByLeader function +func (sr *subroundEndRound) DoEndRoundJobByLeader() bool { + return sr.doEndRoundJobByLeader() +} + +// HaveConsensusHeaderWithFullInfo calls the unexported haveConsensusHeaderWithFullInfo function +func (sr *subroundEndRound) HaveConsensusHeaderWithFullInfo(cnsDta *consensus.Message) (bool, data.HeaderHandler) { + return sr.haveConsensusHeaderWithFullInfo(cnsDta) +} + +// CreateAndBroadcastHeaderFinalInfo calls the unexported createAndBroadcastHeaderFinalInfo function +func (sr *subroundEndRound) CreateAndBroadcastHeaderFinalInfo() { + sr.createAndBroadcastHeaderFinalInfo() +} + +// ReceivedBlockHeaderFinalInfo calls the unexported receivedBlockHeaderFinalInfo function +func (sr *subroundEndRound) ReceivedBlockHeaderFinalInfo(cnsDta *consensus.Message) bool { + return sr.receivedBlockHeaderFinalInfo(context.Background(), cnsDta) +} + +// IsBlockHeaderFinalInfoValid calls the unexported isBlockHeaderFinalInfoValid function +func (sr *subroundEndRound) IsBlockHeaderFinalInfoValid(cnsDta *consensus.Message) bool { + return sr.isBlockHeaderFinalInfoValid(cnsDta) +} + +// IsConsensusHeaderReceived calls the unexported isConsensusHeaderReceived function +func (sr *subroundEndRound) IsConsensusHeaderReceived() (bool, data.HeaderHandler) { + return sr.isConsensusHeaderReceived() +} + +// IsOutOfTime calls the unexported isOutOfTime function +func (sr *subroundEndRound) IsOutOfTime() bool { + return sr.isOutOfTime() +} + +// VerifyNodesOnAggSigFail calls the unexported verifyNodesOnAggSigFail function +func (sr *subroundEndRound) VerifyNodesOnAggSigFail() ([]string, error) { + return sr.verifyNodesOnAggSigFail() +} + +// ComputeAggSigOnValidNodes calls the unexported computeAggSigOnValidNodes function +func (sr *subroundEndRound) ComputeAggSigOnValidNodes() ([]byte, []byte, error) { + return sr.computeAggSigOnValidNodes() +} + +// ReceivedInvalidSignersInfo calls the unexported receivedInvalidSignersInfo function +func (sr *subroundEndRound) ReceivedInvalidSignersInfo(cnsDta *consensus.Message) bool { + return sr.receivedInvalidSignersInfo(context.Background(), cnsDta) +} + +// VerifyInvalidSigners calls the unexported verifyInvalidSigners function +func (sr *subroundEndRound) VerifyInvalidSigners(invalidSigners []byte) error { + return sr.verifyInvalidSigners(invalidSigners) +} + +// GetMinConsensusGroupIndexOfManagedKeys calls the unexported getMinConsensusGroupIndexOfManagedKeys function +func (sr *subroundEndRound) GetMinConsensusGroupIndexOfManagedKeys() int { + return sr.getMinConsensusGroupIndexOfManagedKeys() +} + +// CreateAndBroadcastInvalidSigners calls the unexported createAndBroadcastInvalidSigners function +func (sr *subroundEndRound) CreateAndBroadcastInvalidSigners(invalidSigners []byte) { + sr.createAndBroadcastInvalidSigners(invalidSigners) +} + +// GetFullMessagesForInvalidSigners calls the unexported getFullMessagesForInvalidSigners function +func (sr *subroundEndRound) GetFullMessagesForInvalidSigners(invalidPubKeys []string) ([]byte, error) { + return sr.getFullMessagesForInvalidSigners(invalidPubKeys) +} + +// GetSentSignatureTracker returns the subroundEndRound's SentSignaturesTracker instance +func (sr *subroundEndRound) GetSentSignatureTracker() spos.SentSignaturesTracker { + return sr.sentSignatureTracker +} + +// GetStringValue calls the unexported getStringValue function +func GetStringValue(messageType consensus.MessageType) string { + return getStringValue(messageType) +} diff --git a/consensus/spos/bls/v1/subroundBlock.go b/consensus/spos/bls/v1/subroundBlock.go new file mode 100644 index 00000000000..8b88c5a02a8 --- /dev/null +++ b/consensus/spos/bls/v1/subroundBlock.go @@ -0,0 +1,686 @@ +package v1 + +import ( + "context" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" +) + +// maxAllowedSizeInBytes defines how many bytes are allowed as payload in a message +const maxAllowedSizeInBytes = uint32(core.MegabyteSize * 95 / 100) + +// subroundBlock defines the data needed by the subround Block +type subroundBlock struct { + *spos.Subround + + processingThresholdPercentage int +} + +// NewSubroundBlock creates a subroundBlock object +func NewSubroundBlock( + baseSubround *spos.Subround, + extend func(subroundId int), + processingThresholdPercentage int, +) (*subroundBlock, error) { + err := checkNewSubroundBlockParams(baseSubround) + if err != nil { + return nil, err + } + + srBlock := subroundBlock{ + Subround: baseSubround, + processingThresholdPercentage: processingThresholdPercentage, + } + + srBlock.Job = srBlock.doBlockJob + srBlock.Check = srBlock.doBlockConsensusCheck + srBlock.Extend = extend + + return &srBlock, nil +} + +func checkNewSubroundBlockParams( + baseSubround *spos.Subround, +) error { + if baseSubround == nil { + return spos.ErrNilSubround + } + + if baseSubround.ConsensusState == nil { + return spos.ErrNilConsensusState + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err +} + +// doBlockJob method does the job of the subround Block +func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !isSelfLeader && !sr.IsMultiKeyLeaderInCurrentRound() { // is NOT self leader in this round? + return false + } + + if sr.RoundHandler().Index() <= sr.getRoundInLastCommittedBlock() { + return false + } + + if sr.IsLeaderJobDone(sr.Current()) { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + return false + } + + metricStatTime := time.Now() + defer sr.computeSubroundProcessingMetric(metricStatTime, common.MetricCreatedProposedBlock) + + header, err := sr.createHeader() + if err != nil { + printLogMessage(ctx, "doBlockJob.createHeader", err) + return false + } + + header, body, err := sr.createBlock(header) + if err != nil { + printLogMessage(ctx, "doBlockJob.createBlock", err) + return false + } + + sentWithSuccess := sr.sendBlock(header, body) + if !sentWithSuccess { + return false + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("doBlockJob.GetLeader", "error", errGetLeader) + return false + } + + err = sr.SetJobDone(leader, sr.Current(), true) + if err != nil { + log.Debug("doBlockJob.SetSelfJobDone", "error", err.Error()) + return false + } + + // placeholder for subroundBlock.doBlockJob script + + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(header, body, sr.RoundTimeStamp) + + return true +} + +func printLogMessage(ctx context.Context, baseMessage string, err error) { + if common.IsContextDone(ctx) { + log.Debug(baseMessage + " context is closing") + return + } + + log.Debug(baseMessage, "error", err.Error()) +} + +func (sr *subroundBlock) sendBlock(header data.HeaderHandler, body data.BodyHandler) bool { + marshalizedBody, err := sr.Marshalizer().Marshal(body) + if err != nil { + log.Debug("sendBlock.Marshal: body", "error", err.Error()) + return false + } + + marshalizedHeader, err := sr.Marshalizer().Marshal(header) + if err != nil { + log.Debug("sendBlock.Marshal: header", "error", err.Error()) + return false + } + + if sr.couldBeSentTogether(marshalizedBody, marshalizedHeader) { + return sr.sendHeaderAndBlockBody(header, body, marshalizedBody, marshalizedHeader) + } + + if !sr.sendBlockBody(body, marshalizedBody) || !sr.sendBlockHeader(header, marshalizedHeader) { + return false + } + + return true +} + +func (sr *subroundBlock) couldBeSentTogether(marshalizedBody []byte, marshalizedHeader []byte) bool { + bodyAndHeaderSize := uint32(len(marshalizedBody) + len(marshalizedHeader)) + log.Debug("couldBeSentTogether", + "body size", len(marshalizedBody), + "header size", len(marshalizedHeader), + "body and header size", bodyAndHeaderSize, + "max allowed size in bytes", maxAllowedSizeInBytes) + return bodyAndHeaderSize <= maxAllowedSizeInBytes +} + +func (sr *subroundBlock) createBlock(header data.HeaderHandler) (data.HeaderHandler, data.BodyHandler, error) { + startTime := sr.RoundTimeStamp + maxTime := time.Duration(sr.EndTime()) + haveTimeInCurrentSubround := func() bool { + return sr.RoundHandler().RemainingTime(startTime, maxTime) > 0 + } + + finalHeader, blockBody, err := sr.BlockProcessor().CreateBlock( + header, + haveTimeInCurrentSubround, + ) + if err != nil { + return nil, nil, err + } + + return finalHeader, blockBody, nil +} + +// sendHeaderAndBlockBody method sends the proposed header and block body in the subround Block +func (sr *subroundBlock) sendHeaderAndBlockBody( + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, + marshalizedBody []byte, + marshalizedHeader []byte, +) bool { + headerHash := sr.Hasher().Compute(string(marshalizedHeader)) + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("sendBlockBodyAndHeader.GetLeader", "error", errGetLeader) + return false + } + + cnsMsg := consensus.NewConsensusMessage( + headerHash, + nil, + marshalizedBody, + marshalizedHeader, + []byte(leader), + nil, + int(MtBlockBodyAndHeader), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid([]byte(leader)), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("sendHeaderAndBlockBody.BroadcastConsensusMessage", "error", err.Error()) + return false + } + + log.Debug("step 1: block body and header have been sent", + "nonce", headerHandler.GetNonce(), + "hash", headerHash) + + sr.Data = headerHash + sr.Body = bodyHandler + sr.Header = headerHandler + + return true +} + +// sendBlockBody method sends the proposed block body in the subround Block +func (sr *subroundBlock) sendBlockBody(bodyHandler data.BodyHandler, marshalizedBody []byte) bool { + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("sendBlockBody.GetLeader", "error", errGetLeader) + return false + } + + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + marshalizedBody, + nil, + []byte(leader), + nil, + int(MtBlockBody), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid([]byte(leader)), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("sendBlockBody.BroadcastConsensusMessage", "error", err.Error()) + return false + } + + log.Debug("step 1: block body has been sent") + + sr.Body = bodyHandler + + return true +} + +// sendBlockHeader method sends the proposed block header in the subround Block +func (sr *subroundBlock) sendBlockHeader(headerHandler data.HeaderHandler, marshalizedHeader []byte) bool { + headerHash := sr.Hasher().Compute(string(marshalizedHeader)) + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("sendBlockBody.GetLeader", "error", errGetLeader) + return false + } + + cnsMsg := consensus.NewConsensusMessage( + headerHash, + nil, + nil, + marshalizedHeader, + []byte(leader), + nil, + int(MtBlockHeader), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid([]byte(leader)), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("sendBlockHeader.BroadcastConsensusMessage", "error", err.Error()) + return false + } + + log.Debug("step 1: block header has been sent", + "nonce", headerHandler.GetNonce(), + "hash", headerHash) + + sr.Data = headerHash + sr.Header = headerHandler + + return true +} + +func (sr *subroundBlock) createHeader() (data.HeaderHandler, error) { + var nonce uint64 + var prevHash []byte + var prevRandSeed []byte + + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + nonce = sr.Blockchain().GetGenesisHeader().GetNonce() + 1 + prevHash = sr.Blockchain().GetGenesisHeaderHash() + prevRandSeed = sr.Blockchain().GetGenesisHeader().GetRandSeed() + } else { + nonce = currentHeader.GetNonce() + 1 + prevHash = sr.Blockchain().GetCurrentBlockHeaderHash() + prevRandSeed = currentHeader.GetRandSeed() + } + + round := uint64(sr.RoundHandler().Index()) + hdr, err := sr.BlockProcessor().CreateNewHeader(round, nonce) + if err != nil { + return nil, err + } + + err = hdr.SetPrevHash(prevHash) + if err != nil { + return nil, err + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + return nil, errGetLeader + } + + randSeed, err := sr.SigningHandler().CreateSignatureForPublicKey(prevRandSeed, []byte(leader)) + if err != nil { + return nil, err + } + + err = hdr.SetShardID(sr.ShardCoordinator().SelfId()) + if err != nil { + return nil, err + } + + err = hdr.SetTimeStamp(uint64(sr.RoundHandler().TimeStamp().Unix())) + if err != nil { + return nil, err + } + + err = hdr.SetPrevRandSeed(prevRandSeed) + if err != nil { + return nil, err + } + + err = hdr.SetRandSeed(randSeed) + if err != nil { + return nil, err + } + + err = hdr.SetChainID(sr.ChainID()) + if err != nil { + return nil, err + } + + return hdr, nil +} + +// receivedBlockBodyAndHeader method is called when a block body and a block header is received +func (sr *subroundBlock) receivedBlockBodyAndHeader(ctx context.Context, cnsDta *consensus.Message) bool { + sw := core.NewStopWatch() + sw.Start("receivedBlockBodyAndHeader") + + defer func() { + sw.Stop("receivedBlockBodyAndHeader") + log.Debug("time measurements of receivedBlockBodyAndHeader", sw.GetMeasurements()...) + }() + + node := string(cnsDta.PubKey) + + if sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsBlockBodyAlreadyReceived() { + return false + } + + if sr.IsHeaderAlreadyReceived() { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + sr.Data = cnsDta.BlockHeaderHash + sr.Body = sr.BlockProcessor().DecodeBlockBody(cnsDta.Body) + sr.Header = sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header) + + isInvalidData := check.IfNil(sr.Body) || sr.isInvalidHeaderOrData() + if isInvalidData { + return false + } + + log.Debug("step 1: block body and header have been received", + "nonce", sr.Header.GetNonce(), + "hash", cnsDta.BlockHeaderHash) + + sw.Start("processReceivedBlock") + blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta) + sw.Stop("processReceivedBlock") + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return blockProcessedWithSuccess +} + +func (sr *subroundBlock) isInvalidHeaderOrData() bool { + return sr.Data == nil || check.IfNil(sr.Header) || sr.Header.CheckFieldsForNil() != nil +} + +// receivedBlockBody method is called when a block body is received through the block body channel +func (sr *subroundBlock) receivedBlockBody(ctx context.Context, cnsDta *consensus.Message) bool { + node := string(cnsDta.PubKey) + + if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsBlockBodyAlreadyReceived() { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + sr.Body = sr.BlockProcessor().DecodeBlockBody(cnsDta.Body) + + if check.IfNil(sr.Body) { + return false + } + + log.Debug("step 1: block body has been received") + + blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta) + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return blockProcessedWithSuccess +} + +// receivedBlockHeader method is called when a block header is received through the block header channel. +// If the block header is valid, then the validatorRoundStates map corresponding to the node which sent it, +// is set on true for the subround Block +func (sr *subroundBlock) receivedBlockHeader(ctx context.Context, cnsDta *consensus.Message) bool { + node := string(cnsDta.PubKey) + + if sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsHeaderAlreadyReceived() { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + sr.Data = cnsDta.BlockHeaderHash + sr.Header = sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header) + + if sr.isInvalidHeaderOrData() { + return false + } + + log.Debug("step 1: block header has been received", + "nonce", sr.Header.GetNonce(), + "hash", cnsDta.BlockHeaderHash) + blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta) + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return blockProcessedWithSuccess +} + +func (sr *subroundBlock) processReceivedBlock(ctx context.Context, cnsDta *consensus.Message) bool { + if check.IfNil(sr.Body) { + return false + } + if check.IfNil(sr.Header) { + return false + } + + defer func() { + sr.SetProcessingBlock(false) + }() + + sr.SetProcessingBlock(true) + + shouldNotProcessBlock := sr.ExtendedCalled || cnsDta.RoundIndex < sr.RoundHandler().Index() + if shouldNotProcessBlock { + log.Debug("canceled round, extended has been called or round index has been changed", + "round", sr.RoundHandler().Index(), + "subround", sr.Name(), + "cnsDta round", cnsDta.RoundIndex, + "extended called", sr.ExtendedCalled, + ) + return false + } + + node := string(cnsDta.PubKey) + + startTime := sr.RoundTimeStamp + maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 + remainingTimeInCurrentRound := func() time.Duration { + return sr.RoundHandler().RemainingTime(startTime, maxTime) + } + + metricStatTime := time.Now() + defer sr.computeSubroundProcessingMetric(metricStatTime, common.MetricProcessedProposedBlock) + + err := sr.BlockProcessor().ProcessBlock( + sr.Header, + sr.Body, + remainingTimeInCurrentRound, + ) + + if cnsDta.RoundIndex < sr.RoundHandler().Index() { + log.Debug("canceled round, round index has been changed", + "round", sr.RoundHandler().Index(), + "subround", sr.Name(), + "cnsDta round", cnsDta.RoundIndex, + ) + return false + } + + if err != nil { + sr.printCancelRoundLogMessage(ctx, err) + sr.RoundCanceled = true + + return false + } + + err = sr.SetJobDone(node, sr.Current(), true) + if err != nil { + sr.printCancelRoundLogMessage(ctx, err) + return false + } + + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(sr.Header, sr.Body, sr.RoundTimeStamp) + + return true +} + +func (sr *subroundBlock) printCancelRoundLogMessage(ctx context.Context, err error) { + if common.IsContextDone(ctx) { + log.Debug("canceled round as the context is closing") + return + } + + log.Debug("canceled round", + "round", sr.RoundHandler().Index(), + "subround", sr.Name(), + "error", err.Error()) +} + +func (sr *subroundBlock) computeSubroundProcessingMetric(startTime time.Time, metric string) { + subRoundDuration := sr.EndTime() - sr.StartTime() + if subRoundDuration == 0 { + // can not do division by 0 + return + } + + percent := uint64(time.Since(startTime)) * 100 / uint64(subRoundDuration) + sr.AppStatusHandler().SetUInt64Value(metric, percent) +} + +// doBlockConsensusCheck method checks if the consensus in the subround Block is achieved +func (sr *subroundBlock) doBlockConsensusCheck() bool { + if sr.RoundCanceled { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + return true + } + + threshold := sr.Threshold(sr.Current()) + if sr.isBlockReceived(threshold) { + log.Debug("step 1: subround has been finished", + "subround", sr.Name()) + sr.SetStatus(sr.Current(), spos.SsFinished) + return true + } + + return false +} + +// isBlockReceived method checks if the block was received from the leader in the current round +func (sr *subroundBlock) isBlockReceived(threshold int) bool { + n := 0 + + for i := 0; i < len(sr.ConsensusGroup()); i++ { + node := sr.ConsensusGroup()[i] + isJobDone, err := sr.JobDone(node, sr.Current()) + if err != nil { + log.Debug("isBlockReceived.JobDone", + "node", node, + "subround", sr.Name(), + "error", err.Error()) + continue + } + + if isJobDone { + n++ + } + } + + return n >= threshold +} + +func (sr *subroundBlock) getRoundInLastCommittedBlock() int64 { + roundInLastCommittedBlock := int64(0) + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if !check.IfNil(currentHeader) { + roundInLastCommittedBlock = int64(currentHeader.GetRound()) + } + + return roundInLastCommittedBlock +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sr *subroundBlock) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/v1/subroundBlock_test.go b/consensus/spos/bls/v1/subroundBlock_test.go new file mode 100644 index 00000000000..8a3289b4d5d --- /dev/null +++ b/consensus/spos/bls/v1/subroundBlock_test.go @@ -0,0 +1,1125 @@ +package v1_test + +import ( + "errors" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func defaultSubroundForSRBlock(consensusState *spos.ConsensusState, ch chan bool, + container *mock.ConsensusCoreMock, appStatusHandler core.AppStatusHandler) (*spos.Subround, error) { + return spos.NewSubround( + v1.SrStartRound, + v1.SrBlock, + v1.SrSignature, + int64(5*roundTimeDuration/100), + int64(25*roundTimeDuration/100), + "(BLOCK)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) +} + +func createDefaultHeader() *block.Header { + return &block.Header{ + Nonce: 1, + PrevHash: []byte("prev hash"), + PrevRandSeed: []byte("prev rand seed"), + RandSeed: []byte("rand seed"), + RootHash: []byte("roothash"), + TxCount: 0, + ChainID: []byte("chain ID"), + SoftwareVersion: []byte("software version"), + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } +} + +func defaultSubroundBlockFromSubround(sr *spos.Subround) (v1.SubroundBlock, error) { + srBlock, err := v1.NewSubroundBlock( + sr, + extend, + v1.ProcessingThresholdPercent, + ) + + return srBlock, err +} + +func defaultSubroundBlockWithoutErrorFromSubround(sr *spos.Subround) v1.SubroundBlock { + srBlock, _ := v1.NewSubroundBlock( + sr, + extend, + v1.ProcessingThresholdPercent, + ) + + return srBlock +} + +func initSubroundBlock( + blockChain data.ChainHandler, + container *mock.ConsensusCoreMock, + appStatusHandler core.AppStatusHandler, +) v1.SubroundBlock { + if blockChain == nil { + blockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{} + }, + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Nonce: uint64(0), + Signature: []byte("genesis signature"), + RandSeed: []byte{0}, + } + }, + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis header hash") + }, + } + } + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + container.SetBlockchain(blockChain) + + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, appStatusHandler) + srBlock, _ := defaultSubroundBlockFromSubround(sr) + return srBlock +} + +func createConsensusContainers() []*mock.ConsensusCoreMock { + consensusContainers := make([]*mock.ConsensusCoreMock, 0) + container := mock.InitConsensusCore() + consensusContainers = append(consensusContainers, container) + container = mock.InitConsensusCoreHeaderV2() + consensusContainers = append(consensusContainers, container) + return consensusContainers +} + +func initSubroundBlockWithBlockProcessor( + bp *testscommon.BlockProcessorStub, + container *mock.ConsensusCoreMock, +) v1.SubroundBlock { + blockChain := &testscommon.ChainHandlerStub{ + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Nonce: uint64(0), + Signature: []byte("genesis signature"), + } + }, + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis header hash") + }, + } + blockProcessorMock := bp + + container.SetBlockchain(blockChain) + container.SetBlockProcessor(blockProcessorMock) + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + srBlock, _ := defaultSubroundBlockFromSubround(sr) + return srBlock +} + +func TestSubroundBlock_NewSubroundBlockNilSubroundShouldFail(t *testing.T) { + t.Parallel() + + srBlock, err := v1.NewSubroundBlock( + nil, + extend, + v1.ProcessingThresholdPercent, + ) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilSubround, err) +} + +func TestSubroundBlock_NewSubroundBlockNilBlockchainShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetBlockchain(nil) + + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilBlockChain, err) +} + +func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetBlockProcessor(nil) + + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilBlockProcessor, err) +} + +func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + sr.ConsensusState = nil + + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestSubroundBlock_NewSubroundBlockNilHasherShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetHasher(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilHasher, err) +} + +func TestSubroundBlock_NewSubroundBlockNilMarshalizerShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetMarshalizer(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilMarshalizer, err) +} + +func TestSubroundBlock_NewSubroundBlockNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetMultiSignerContainer(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestSubroundBlock_NewSubroundBlockNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetRoundHandler(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestSubroundBlock_NewSubroundBlockNilShardCoordinatorShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetShardCoordinator(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilShardCoordinator, err) +} + +func TestSubroundBlock_NewSubroundBlockNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetSyncTimer(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestSubroundBlock_NewSubroundBlockShouldWork(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.NotNil(t, srBlock) + assert.Nil(t, err) +} + +func TestSubroundBlock_DoBlockJob(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + r := sr.DoBlockJob() + assert.False(t, r) + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + _ = sr.SetJobDone(sr.SelfPubKey(), v1.SrBlock, true) + r = sr.DoBlockJob() + assert.False(t, r) + + _ = sr.SetJobDone(sr.SelfPubKey(), v1.SrBlock, false) + sr.SetStatus(v1.SrBlock, spos.SsFinished) + r = sr.DoBlockJob() + assert.False(t, r) + + sr.SetStatus(v1.SrBlock, spos.SsNotFinished) + bpm := &testscommon.BlockProcessorStub{} + err := errors.New("error") + bpm.CreateBlockCalled = func(header data.HeaderHandler, remainingTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return header, nil, err + } + container.SetBlockProcessor(bpm) + r = sr.DoBlockJob() + assert.False(t, r) + + bpm = mock.InitBlockProcessorMock(container.Marshalizer()) + container.SetBlockProcessor(bpm) + bm := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + return nil + }, + } + container.SetBroadcastMessenger(bm) + container.SetRoundHandler(&mock.RoundHandlerMock{ + RoundIndex: 1, + }) + r = sr.DoBlockJob() + assert.True(t, r) + assert.Equal(t, uint64(1), sr.Header.GetNonce()) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + + sr.Data = []byte("some data") + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[1]), v1.MtBlockBodyAndHeader) + + sr.Data = nil + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + + sr.Data = nil + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrBlock, true) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + blProc := mock.InitBlockProcessorMock(container.Marshalizer()) + blProc.DecodeBlockHeaderCalled = func(dta []byte) data.HeaderHandler { + // error decoding so return nil + return nil + } + container.SetBlockProcessor(blProc) + + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + + sr.Data = nil + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + + sr.Data = nil + sr.Body = &block.Body{} + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + + sr.Data = nil + sr.Header = &block.Header{Nonce: 1} + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + t.Run("block is valid", func(t *testing.T) { + hdr := createDefaultHeader() + blkBody := &block.Body{} + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + sr.Data = nil + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.True(t, r) + }) + t.Run("block is not valid", func(t *testing.T) { + hdr := &block.Header{ + Nonce: 1, + } + blkBody := &block.Body{} + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + sr.Data = nil + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.False(t, r) + }) +} + +func createConsensusMessage(header *block.Header, body *block.Body, leader []byte, topic consensus.MessageType) *consensus.Message { + marshaller := &mock.MarshalizerMock{} + hasher := &hashingMocks.HasherMock{} + + hdrStr, _ := marshaller.Marshal(header) + hdrHash := hasher.Compute(string(hdrStr)) + blkBodyStr, _ := marshaller.Marshal(body) + + return consensus.NewConsensusMessage( + hdrHash, + nil, + blkBodyStr, + hdrStr, + leader, + []byte("sig"), + int(topic), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) +} + +func TestSubroundBlock_ReceivedBlock(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + blockProcessorMock := mock.InitBlockProcessorMock(container.Marshalizer()) + blkBody := &block.Body{} + blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(sr.ConsensusGroup()[0]), + []byte("sig"), + int(v1.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.Body = &block.Body{} + r := sr.ReceivedBlockBody(cnsMsg) + assert.False(t, r) + + sr.Body = nil + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) + r = sr.ReceivedBlockBody(cnsMsg) + assert.False(t, r) + + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[0]) + sr.SetStatus(v1.SrBlock, spos.SsFinished) + r = sr.ReceivedBlockBody(cnsMsg) + assert.False(t, r) + + sr.SetStatus(v1.SrBlock, spos.SsNotFinished) + r = sr.ReceivedBlockBody(cnsMsg) + assert.False(t, r) + + hdr := createDefaultHeader() + hdr.Nonce = 2 + hdrStr, _ := container.Marshalizer().Marshal(hdr) + hdrHash := (&hashingMocks.HasherMock{}).Compute(string(hdrStr)) + cnsMsg = consensus.NewConsensusMessage( + hdrHash, + nil, + nil, + hdrStr, + []byte(sr.ConsensusGroup()[0]), + []byte("sig"), + int(v1.MtBlockHeader), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + r = sr.ReceivedBlockHeader(cnsMsg) + assert.False(t, r) + + sr.Data = nil + sr.Header = hdr + r = sr.ReceivedBlockHeader(cnsMsg) + assert.False(t, r) + + sr.Header = nil + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) + r = sr.ReceivedBlockHeader(cnsMsg) + assert.False(t, r) + + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[0]) + sr.SetStatus(v1.SrBlock, spos.SsFinished) + r = sr.ReceivedBlockHeader(cnsMsg) + assert.False(t, r) + + sr.SetStatus(v1.SrBlock, spos.SsNotFinished) + container.SetBlockProcessor(blockProcessorMock) + sr.Data = nil + sr.Header = nil + hdr = createDefaultHeader() + hdr.Nonce = 1 + hdrStr, _ = mock.MarshalizerMock{}.Marshal(hdr) + hdrHash = (&hashingMocks.HasherMock{}).Compute(string(hdrStr)) + cnsMsg.BlockHeaderHash = hdrHash + cnsMsg.Header = hdrStr + r = sr.ReceivedBlockHeader(cnsMsg) + assert.True(t, r) +} + +func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAreNotSet(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + nil, + nil, + []byte(sr.ConsensusGroup()[0]), + []byte("sig"), + int(v1.MtBlockBodyAndHeader), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) +} + +func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFails(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + blProcMock := mock.InitBlockProcessorMock(container.Marshalizer()) + err := errors.New("error process block") + blProcMock.ProcessBlockCalled = func(data.HeaderHandler, data.BodyHandler, func() time.Duration) error { + return err + } + container.SetBlockProcessor(blProcMock) + hdr := &block.Header{} + blkBody := &block.Body{} + blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(sr.ConsensusGroup()[0]), + []byte("sig"), + int(v1.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.Header = hdr + sr.Body = blkBody + assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) +} + +func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockReturnsInNextRound(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + hdr := &block.Header{} + blkBody := &block.Body{} + blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(sr.ConsensusGroup()[0]), + []byte("sig"), + int(v1.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.Header = hdr + sr.Body = blkBody + blockProcessorMock := mock.InitBlockProcessorMock(container.Marshalizer()) + blockProcessorMock.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { + return errors.New("error") + } + container.SetBlockProcessor(blockProcessorMock) + container.SetRoundHandler(&mock.RoundHandlerMock{RoundIndex: 1}) + assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) +} + +func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { + t.Parallel() + + consensusContainers := createConsensusContainers() + for _, container := range consensusContainers { + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + hdr, _ := container.BlockProcessor().CreateNewHeader(1, 1) + hdr, blkBody, _ := container.BlockProcessor().CreateBlock(hdr, func() bool { return true }) + + blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(sr.ConsensusGroup()[0]), + []byte("sig"), + int(v1.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.Header = hdr + sr.Body = blkBody + assert.True(t, sr.ProcessReceivedBlock(cnsMsg)) + } +} + +func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + roundHandlerMock := initRoundHandlerMock() + container.SetRoundHandler(roundHandlerMock) + + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + remainingTimeInThisRound := func() time.Duration { + roundStartTime := sr.RoundHandler().TimeStamp() + currentTime := sr.SyncTimer().CurrentTime() + elapsedTime := currentTime.Sub(roundStartTime) + remainingTime := sr.RoundHandler().TimeDuration()*85/100 - elapsedTime + + return remainingTime + } + container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + return time.Unix(0, 0).Add(roundTimeDuration * 84 / 100) + }}) + ret := remainingTimeInThisRound() + assert.True(t, ret > 0) + + container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + return time.Unix(0, 0).Add(roundTimeDuration * 85 / 100) + }}) + ret = remainingTimeInThisRound() + assert.True(t, ret == 0) + + container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + return time.Unix(0, 0).Add(roundTimeDuration * 86 / 100) + }}) + ret = remainingTimeInThisRound() + assert.True(t, ret < 0) +} + +func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr.RoundCanceled = true + assert.False(t, sr.DoBlockConsensusCheck()) +} + +func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr.SetStatus(v1.SrBlock, spos.SsFinished) + assert.True(t, sr.DoBlockConsensusCheck()) +} + +func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedReturnTrue(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + for i := 0; i < sr.Threshold(v1.SrBlock); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrBlock, true) + } + assert.True(t, sr.DoBlockConsensusCheck()) +} + +func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenBlockIsReceivedReturnFalse(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + assert.False(t, sr.DoBlockConsensusCheck()) +} + +func TestSubroundBlock_IsBlockReceived(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + for i := 0; i < len(sr.ConsensusGroup()); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrBlock, false) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, false) + } + ok := sr.IsBlockReceived(1) + assert.False(t, ok) + + _ = sr.SetJobDone("A", v1.SrBlock, true) + isJobDone, _ := sr.JobDone("A", v1.SrBlock) + assert.True(t, isJobDone) + + ok = sr.IsBlockReceived(1) + assert.True(t, ok) + + ok = sr.IsBlockReceived(2) + assert.False(t, ok) +} + +func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + haveTimeInCurrentSubound := func() bool { + roundStartTime := sr.RoundHandler().TimeStamp() + currentTime := sr.SyncTimer().CurrentTime() + elapsedTime := currentTime.Sub(roundStartTime) + remainingTime := sr.EndTime() - int64(elapsedTime) + + return time.Duration(remainingTime) > 0 + } + roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock.TimeDurationCalled = func() time.Duration { + return 4000 * time.Millisecond + } + roundHandlerMock.TimeStampCalled = func() time.Time { + return time.Unix(0, 0) + } + syncTimerMock := &mock.SyncTimerMock{} + timeElapsed := sr.EndTime() - 1 + syncTimerMock.CurrentTimeCalled = func() time.Time { + return time.Unix(0, timeElapsed) + } + container.SetRoundHandler(roundHandlerMock) + container.SetSyncTimer(syncTimerMock) + + assert.True(t, haveTimeInCurrentSubound()) +} + +func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + haveTimeInCurrentSubound := func() bool { + roundStartTime := sr.RoundHandler().TimeStamp() + currentTime := sr.SyncTimer().CurrentTime() + elapsedTime := currentTime.Sub(roundStartTime) + remainingTime := sr.EndTime() - int64(elapsedTime) + + return time.Duration(remainingTime) > 0 + } + roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock.TimeDurationCalled = func() time.Duration { + return 4000 * time.Millisecond + } + roundHandlerMock.TimeStampCalled = func() time.Time { + return time.Unix(0, 0) + } + syncTimerMock := &mock.SyncTimerMock{} + timeElapsed := sr.EndTime() + 1 + syncTimerMock.CurrentTimeCalled = func() time.Time { + return time.Unix(0, timeElapsed) + } + container.SetRoundHandler(roundHandlerMock) + container.SetSyncTimer(syncTimerMock) + + assert.False(t, haveTimeInCurrentSubound()) +} + +func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { + blockChain := &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return nil + }, + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Nonce: uint64(0), + Signature: []byte("genesis signature"), + RandSeed: []byte{0}, + } + }, + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis header hash") + }, + } + + consensusContainers := createConsensusContainers() + for _, container := range consensusContainers { + sr := *initSubroundBlock(blockChain, container, &statusHandler.AppStatusHandlerStub{}) + _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(nil, nil) + header, _ := sr.CreateHeader() + header, body, _ := sr.CreateBlock(header) + marshalizedBody, _ := sr.Marshalizer().Marshal(body) + marshalizedHeader, _ := sr.Marshalizer().Marshal(header) + _ = sr.SendBlockBody(body, marshalizedBody) + _ = sr.SendBlockHeader(header, marshalizedHeader) + + expectedHeader, _ := container.BlockProcessor().CreateNewHeader(uint64(sr.RoundHandler().Index()), uint64(1)) + err := expectedHeader.SetTimeStamp(uint64(sr.RoundHandler().TimeStamp().Unix())) + require.Nil(t, err) + err = expectedHeader.SetRootHash([]byte{}) + require.Nil(t, err) + err = expectedHeader.SetPrevHash(sr.BlockChain().GetGenesisHeaderHash()) + require.Nil(t, err) + err = expectedHeader.SetPrevRandSeed(sr.BlockChain().GetGenesisHeader().GetRandSeed()) + require.Nil(t, err) + err = expectedHeader.SetRandSeed(make([]byte, 0)) + require.Nil(t, err) + err = expectedHeader.SetMiniBlockHeaderHandlers(header.GetMiniBlockHeaderHandlers()) + require.Nil(t, err) + err = expectedHeader.SetChainID(chainID) + require.Nil(t, err) + require.Equal(t, expectedHeader, header) + } +} + +func TestSubroundBlock_CreateHeaderNotNilCurrentHeader(t *testing.T) { + consensusContainers := createConsensusContainers() + for _, container := range consensusContainers { + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ + Nonce: 1, + }, []byte("root hash")) + + header, _ := sr.CreateHeader() + header, body, _ := sr.CreateBlock(header) + marshalizedBody, _ := sr.Marshalizer().Marshal(body) + marshalizedHeader, _ := sr.Marshalizer().Marshal(header) + _ = sr.SendBlockBody(body, marshalizedBody) + _ = sr.SendBlockHeader(header, marshalizedHeader) + + expectedHeader, _ := container.BlockProcessor().CreateNewHeader( + uint64(sr.RoundHandler().Index()), + sr.BlockChain().GetCurrentBlockHeader().GetNonce()+1) + err := expectedHeader.SetTimeStamp(uint64(sr.RoundHandler().TimeStamp().Unix())) + require.Nil(t, err) + err = expectedHeader.SetRootHash([]byte{}) + require.Nil(t, err) + err = expectedHeader.SetPrevHash(sr.BlockChain().GetCurrentBlockHeaderHash()) + require.Nil(t, err) + err = expectedHeader.SetRandSeed(make([]byte, 0)) + require.Nil(t, err) + err = expectedHeader.SetMiniBlockHeaderHandlers(header.GetMiniBlockHeaderHandlers()) + require.Nil(t, err) + err = expectedHeader.SetChainID(chainID) + require.Nil(t, err) + require.Equal(t, expectedHeader, header) + } +} + +func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { + mbHeaders := []block.MiniBlockHeader{ + {Hash: []byte("mb1"), SenderShardID: 1, ReceiverShardID: 1}, + {Hash: []byte("mb2"), SenderShardID: 1, ReceiverShardID: 2}, + {Hash: []byte("mb3"), SenderShardID: 2, ReceiverShardID: 3}, + } + blockChainMock := testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Nonce: 1, + } + }, + } + container := mock.InitConsensusCore() + bp := mock.InitBlockProcessorMock(container.Marshalizer()) + bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + shardHeader, _ := header.(*block.Header) + shardHeader.MiniBlockHeaders = mbHeaders + shardHeader.RootHash = []byte{} + + return shardHeader, &block.Body{}, nil + } + sr := *initSubroundBlockWithBlockProcessor(bp, container) + container.SetBlockchain(&blockChainMock) + + header, _ := sr.CreateHeader() + header, body, _ := sr.CreateBlock(header) + marshalizedBody, _ := sr.Marshalizer().Marshal(body) + marshalizedHeader, _ := sr.Marshalizer().Marshal(header) + _ = sr.SendBlockBody(body, marshalizedBody) + _ = sr.SendBlockHeader(header, marshalizedHeader) + + expectedHeader := &block.Header{ + Round: uint64(sr.RoundHandler().Index()), + TimeStamp: uint64(sr.RoundHandler().TimeStamp().Unix()), + RootHash: []byte{}, + Nonce: sr.BlockChain().GetCurrentBlockHeader().GetNonce() + 1, + PrevHash: sr.BlockChain().GetCurrentBlockHeaderHash(), + RandSeed: make([]byte, 0), + MiniBlockHeaders: mbHeaders, + ChainID: chainID, + } + + assert.Equal(t, expectedHeader, header) +} + +func TestSubroundBlock_CreateHeaderNilMiniBlocks(t *testing.T) { + expectedErr := errors.New("nil mini blocks") + container := mock.InitConsensusCore() + bp := mock.InitBlockProcessorMock(container.Marshalizer()) + bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return nil, nil, expectedErr + } + sr := *initSubroundBlockWithBlockProcessor(bp, container) + _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ + Nonce: 1, + }, []byte("root hash")) + header, _ := sr.CreateHeader() + _, _, err := sr.CreateBlock(header) + assert.Equal(t, expectedErr, err) +} + +func TestSubroundBlock_CallFuncRemainingTimeWithStructShouldWork(t *testing.T) { + roundStartTime := time.Now() + maxTime := 100 * time.Millisecond + newRoundStartTime := roundStartTime + remainingTimeInCurrentRound := func() time.Duration { + return RemainingTimeWithStruct(newRoundStartTime, maxTime) + } + assert.True(t, remainingTimeInCurrentRound() > 0) + + time.Sleep(200 * time.Millisecond) + assert.True(t, remainingTimeInCurrentRound() < 0) +} + +func TestSubroundBlock_CallFuncRemainingTimeWithStructShouldNotWork(t *testing.T) { + roundStartTime := time.Now() + maxTime := 100 * time.Millisecond + remainingTimeInCurrentRound := func() time.Duration { + return RemainingTimeWithStruct(roundStartTime, maxTime) + } + assert.True(t, remainingTimeInCurrentRound() > 0) + + time.Sleep(200 * time.Millisecond) + assert.True(t, remainingTimeInCurrentRound() < 0) + + roundStartTime = roundStartTime.Add(500 * time.Millisecond) + assert.False(t, remainingTimeInCurrentRound() < 0) +} + +func RemainingTimeWithStruct(startTime time.Time, maxTime time.Duration) time.Duration { + currentTime := time.Now() + elapsedTime := currentTime.Sub(startTime) + remainingTime := maxTime - elapsedTime + return remainingTime +} + +func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { + t.Parallel() + + srStartTime := int64(5 * roundTimeDuration / 100) + srEndTime := int64(25 * roundTimeDuration / 100) + srDuration := srEndTime - srStartTime + delay := srDuration * 430 / 1000 + + container := mock.InitConsensusCore() + receivedValue := uint64(0) + container.SetBlockProcessor(&testscommon.BlockProcessorStub{ + ProcessBlockCalled: func(_ data.HeaderHandler, _ data.BodyHandler, _ func() time.Duration) error { + time.Sleep(time.Duration(delay)) + return nil + }, + }) + sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + receivedValue = value + }}) + hdr := &block.Header{} + blkBody := &block.Body{} + blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(sr.ConsensusGroup()[0]), + []byte("sig"), + int(v1.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.Header = hdr + sr.Body = blkBody + + minimumExpectedValue := uint64(delay * 100 / srDuration) + _ = sr.ProcessReceivedBlock(cnsMsg) + + assert.True(t, + receivedValue >= minimumExpectedValue, + fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), + ) +} + +func TestSubroundBlock_ReceivedBlockComputeProcessDurationWithZeroDurationShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not have paniced", r) + } + }() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + srBlock := *defaultSubroundBlockWithoutErrorFromSubround(sr) + + srBlock.ComputeSubroundProcessingMetric(time.Now(), "dummy") +} diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go new file mode 100644 index 00000000000..bc275f19272 --- /dev/null +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -0,0 +1,942 @@ +package v1 + +import ( + "bytes" + "context" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/display" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process/headerCheck" +) + +type subroundEndRound struct { + *spos.Subround + processingThresholdPercentage int + displayStatistics func() + appStatusHandler core.AppStatusHandler + mutProcessingEndRound sync.Mutex + sentSignatureTracker spos.SentSignaturesTracker +} + +// NewSubroundEndRound creates a subroundEndRound object +func NewSubroundEndRound( + baseSubround *spos.Subround, + extend func(subroundId int), + processingThresholdPercentage int, + displayStatistics func(), + appStatusHandler core.AppStatusHandler, + sentSignatureTracker spos.SentSignaturesTracker, +) (*subroundEndRound, error) { + err := checkNewSubroundEndRoundParams( + baseSubround, + ) + if err != nil { + return nil, err + } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if check.IfNil(appStatusHandler) { + return nil, spos.ErrNilAppStatusHandler + } + if check.IfNil(sentSignatureTracker) { + return nil, ErrNilSentSignatureTracker + } + + srEndRound := subroundEndRound{ + Subround: baseSubround, + processingThresholdPercentage: processingThresholdPercentage, + displayStatistics: displayStatistics, + appStatusHandler: appStatusHandler, + mutProcessingEndRound: sync.Mutex{}, + sentSignatureTracker: sentSignatureTracker, + } + srEndRound.Job = srEndRound.doEndRoundJob + srEndRound.Check = srEndRound.doEndRoundConsensusCheck + srEndRound.Extend = extend + + return &srEndRound, nil +} + +func checkNewSubroundEndRoundParams( + baseSubround *spos.Subround, +) error { + if baseSubround == nil { + return spos.ErrNilSubround + } + if baseSubround.ConsensusState == nil { + return spos.ErrNilConsensusState + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err +} + +// receivedBlockHeaderFinalInfo method is called when a block header final info is received +func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsDta *consensus.Message) bool { + node := string(cnsDta.PubKey) + + if !sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { + return false + } + + if !sr.IsConsensusDataEqual(cnsDta.BlockHeaderHash) { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + if !sr.isBlockHeaderFinalInfoValid(cnsDta) { + return false + } + + log.Debug("step 3: block header final info has been received", + "PubKeysBitmap", cnsDta.PubKeysBitmap, + "AggregateSignature", cnsDta.AggregateSignature, + "LeaderSignature", cnsDta.LeaderSignature) + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return sr.doEndRoundJobByParticipant(cnsDta) +} + +func (sr *subroundEndRound) isBlockHeaderFinalInfoValid(cnsDta *consensus.Message) bool { + if check.IfNil(sr.Header) { + return false + } + + header := sr.Header.ShallowClone() + err := header.SetPubKeysBitmap(cnsDta.PubKeysBitmap) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.SetPubKeysBitmap", "error", err.Error()) + return false + } + + err = header.SetSignature(cnsDta.AggregateSignature) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.SetSignature", "error", err.Error()) + return false + } + + err = header.SetLeaderSignature(cnsDta.LeaderSignature) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.SetLeaderSignature", "error", err.Error()) + return false + } + + err = sr.HeaderSigVerifier().VerifyLeaderSignature(header) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.VerifyLeaderSignature", "error", err.Error()) + return false + } + + err = sr.HeaderSigVerifier().VerifySignature(header) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.VerifySignature", "error", err.Error()) + return false + } + + return true +} + +// receivedInvalidSignersInfo method is called when a message with invalid signers has been received +func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta *consensus.Message) bool { + messageSender := string(cnsDta.PubKey) + + if !sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeLeaderInCurrentRound(messageSender) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + messageSender, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { + return false + } + + if !sr.IsConsensusDataEqual(cnsDta.BlockHeaderHash) { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + if len(cnsDta.InvalidSigners) == 0 { + return false + } + + err := sr.verifyInvalidSigners(cnsDta.InvalidSigners) + if err != nil { + log.Trace("receivedInvalidSignersInfo.verifyInvalidSigners", "error", err.Error()) + return false + } + + log.Debug("step 3: invalid signers info has been evaluated") + + sr.PeerHonestyHandler().ChangeScore( + messageSender, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return true +} + +func (sr *subroundEndRound) verifyInvalidSigners(invalidSigners []byte) error { + messages, err := sr.MessageSigningHandler().Deserialize(invalidSigners) + if err != nil { + return err + } + + for _, msg := range messages { + err = sr.verifyInvalidSigner(msg) + if err != nil { + return err + } + } + + return nil +} + +func (sr *subroundEndRound) verifyInvalidSigner(msg p2p.MessageP2P) error { + err := sr.MessageSigningHandler().Verify(msg) + if err != nil { + return err + } + + cnsMsg := &consensus.Message{} + err = sr.Marshalizer().Unmarshal(cnsMsg, msg.Data()) + if err != nil { + return err + } + + err = sr.SigningHandler().VerifySingleSignature(cnsMsg.PubKey, cnsMsg.BlockHeaderHash, cnsMsg.SignatureShare) + if err != nil { + log.Debug("verifyInvalidSigner: confirmed that node provided invalid signature", + "pubKey", cnsMsg.PubKey, + "blockHeaderHash", cnsMsg.BlockHeaderHash, + "error", err.Error(), + ) + sr.applyBlacklistOnNode(msg.Peer()) + } + + return nil +} + +func (sr *subroundEndRound) applyBlacklistOnNode(peer core.PeerID) { + sr.PeerBlacklistHandler().BlacklistPeer(peer, common.InvalidSigningBlacklistDuration) +} + +func (sr *subroundEndRound) receivedHeader(headerHandler data.HeaderHandler) { + if sr.ConsensusGroup() == nil || sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { + return + } + + sr.AddReceivedHeader(headerHandler) + + sr.doEndRoundJobByParticipant(nil) +} + +// doEndRoundJob method does the job of the subround EndRound +func (sr *subroundEndRound) doEndRoundJob(_ context.Context) bool { + if !sr.IsSelfLeaderInCurrentRound() && !sr.IsMultiKeyLeaderInCurrentRound() { + if sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || sr.IsMultiKeyInConsensusGroup() { + err := sr.prepareBroadcastBlockDataForValidator() + if err != nil { + log.Warn("validator in consensus group preparing for delayed broadcast", + "error", err.Error()) + } + } + + return sr.doEndRoundJobByParticipant(nil) + } + + return sr.doEndRoundJobByLeader() +} + +func (sr *subroundEndRound) doEndRoundJobByLeader() bool { + bitmap := sr.GenerateBitmap(SrSignature) + err := sr.checkSignaturesValidity(bitmap) + if err != nil { + log.Debug("doEndRoundJobByLeader.checkSignaturesValidity", "error", err.Error()) + return false + } + + if check.IfNil(sr.Header) { + log.Error("doEndRoundJobByLeader.CheckNilHeader", "error", spos.ErrNilHeader) + return false + } + + // Aggregate sig and add it to the block + bitmap, sig, err := sr.aggregateSigsAndHandleInvalidSigners(bitmap) + if err != nil { + log.Debug("doEndRoundJobByLeader.aggregateSigsAndHandleInvalidSigners", "error", err.Error()) + return false + } + + err = sr.Header.SetPubKeysBitmap(bitmap) + if err != nil { + log.Debug("doEndRoundJobByLeader.SetPubKeysBitmap", "error", err.Error()) + return false + } + + err = sr.Header.SetSignature(sig) + if err != nil { + log.Debug("doEndRoundJobByLeader.SetSignature", "error", err.Error()) + return false + } + + // Header is complete so the leader can sign it + leaderSignature, err := sr.signBlockHeader() + if err != nil { + log.Error(err.Error()) + return false + } + + err = sr.Header.SetLeaderSignature(leaderSignature) + if err != nil { + log.Debug("doEndRoundJobByLeader.SetLeaderSignature", "error", err.Error()) + return false + } + + ok := sr.ScheduledProcessor().IsProcessedOKWithTimeout() + // placeholder for subroundEndRound.doEndRoundJobByLeader script + if !ok { + return false + } + + roundHandler := sr.RoundHandler() + if roundHandler.RemainingTime(roundHandler.TimeStamp(), roundHandler.TimeDuration()) < 0 { + log.Debug("doEndRoundJob: time is out -> cancel broadcasting final info and header", + "round time stamp", roundHandler.TimeStamp(), + "current time", time.Now()) + return false + } + + // broadcast header and final info section + + sr.createAndBroadcastHeaderFinalInfo() + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("doEndRoundJobByLeader.GetLeader", "error", errGetLeader) + return false + } + + // broadcast header + err = sr.BroadcastMessenger().BroadcastHeader(sr.Header, []byte(leader)) + if err != nil { + log.Debug("doEndRoundJobByLeader.BroadcastHeader", "error", err.Error()) + } + + startTime := time.Now() + err = sr.BlockProcessor().CommitBlock(sr.Header, sr.Body) + elapsedTime := time.Since(startTime) + if elapsedTime >= common.CommitMaxTime { + log.Warn("doEndRoundJobByLeader.CommitBlock", "elapsed time", elapsedTime) + } else { + log.Debug("elapsed time to commit block", + "time [s]", elapsedTime, + ) + } + if err != nil { + log.Debug("doEndRoundJobByLeader.CommitBlock", "error", err) + return false + } + + sr.SetStatus(sr.Current(), spos.SsFinished) + + sr.displayStatistics() + + log.Debug("step 3: Body and Header have been committed and header has been broadcast") + + err = sr.broadcastBlockDataLeader() + if err != nil { + log.Debug("doEndRoundJobByLeader.broadcastBlockDataLeader", "error", err.Error()) + } + + msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", sr.Header.GetNonce()) + log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "+")) + + sr.updateMetricsForLeader() + + return true +} + +func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) ([]byte, []byte, error) { + sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) + if err != nil { + log.Debug("doEndRoundJobByLeader.AggregateSigs", "error", err.Error()) + + return sr.handleInvalidSignersOnAggSigFail() + } + + err = sr.SigningHandler().SetAggregatedSig(sig) + if err != nil { + log.Debug("doEndRoundJobByLeader.SetAggregatedSig", "error", err.Error()) + return nil, nil, err + } + + err = sr.SigningHandler().Verify(sr.GetData(), bitmap, sr.Header.GetEpoch()) + if err != nil { + log.Debug("doEndRoundJobByLeader.Verify", "error", err.Error()) + + return sr.handleInvalidSignersOnAggSigFail() + } + + return bitmap, sig, nil +} + +func (sr *subroundEndRound) verifyNodesOnAggSigFail() ([]string, error) { + invalidPubKeys := make([]string, 0) + pubKeys := sr.ConsensusGroup() + + if check.IfNil(sr.Header) { + return nil, spos.ErrNilHeader + } + + for i, pk := range pubKeys { + isJobDone, err := sr.JobDone(pk, SrSignature) + if err != nil || !isJobDone { + continue + } + + sigShare, err := sr.SigningHandler().SignatureShare(uint16(i)) + if err != nil { + return nil, err + } + + isSuccessfull := true + err = sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.Header.GetEpoch()) + if err != nil { + isSuccessfull = false + + err = sr.SetJobDone(pk, SrSignature, false) + if err != nil { + return nil, err + } + + // use increase factor since it was added optimistically, and it proved to be wrong + decreaseFactor := -spos.ValidatorPeerHonestyIncreaseFactor + spos.ValidatorPeerHonestyDecreaseFactor + sr.PeerHonestyHandler().ChangeScore( + pk, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + decreaseFactor, + ) + + invalidPubKeys = append(invalidPubKeys, pk) + } + + log.Trace("verifyNodesOnAggSigVerificationFail: verifying signature share", "public key", pk, "is successfull", isSuccessfull) + } + + return invalidPubKeys, nil +} + +func (sr *subroundEndRound) getFullMessagesForInvalidSigners(invalidPubKeys []string) ([]byte, error) { + p2pMessages := make([]p2p.MessageP2P, 0) + + for _, pk := range invalidPubKeys { + p2pMsg, ok := sr.GetMessageWithSignature(pk) + if !ok { + log.Trace("message not found in state for invalid signer", "pubkey", pk) + continue + } + + p2pMessages = append(p2pMessages, p2pMsg) + } + + invalidSigners, err := sr.MessageSigningHandler().Serialize(p2pMessages) + if err != nil { + return nil, err + } + + return invalidSigners, nil +} + +func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, error) { + invalidPubKeys, err := sr.verifyNodesOnAggSigFail() + if err != nil { + log.Debug("doEndRoundJobByLeader.verifyNodesOnAggSigFail", "error", err.Error()) + return nil, nil, err + } + + invalidSigners, err := sr.getFullMessagesForInvalidSigners(invalidPubKeys) + if err != nil { + log.Debug("doEndRoundJobByLeader.getFullMessagesForInvalidSigners", "error", err.Error()) + return nil, nil, err + } + + if len(invalidSigners) > 0 { + sr.createAndBroadcastInvalidSigners(invalidSigners) + } + + bitmap, sig, err := sr.computeAggSigOnValidNodes() + if err != nil { + log.Debug("doEndRoundJobByLeader.computeAggSigOnValidNodes", "error", err.Error()) + return nil, nil, err + } + + return bitmap, sig, nil +} + +func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) { + threshold := sr.Threshold(sr.Current()) + numValidSigShares := sr.ComputeSize(SrSignature) + + if check.IfNil(sr.Header) { + return nil, nil, spos.ErrNilHeader + } + + if numValidSigShares < threshold { + return nil, nil, fmt.Errorf("%w: number of valid sig shares lower than threshold, numSigShares: %d, threshold: %d", + spos.ErrInvalidNumSigShares, numValidSigShares, threshold) + } + + bitmap := sr.GenerateBitmap(SrSignature) + err := sr.checkSignaturesValidity(bitmap) + if err != nil { + return nil, nil, err + } + + sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) + if err != nil { + return nil, nil, err + } + + err = sr.SigningHandler().SetAggregatedSig(sig) + if err != nil { + return nil, nil, err + } + + return bitmap, sig, nil +} + +func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("createAndBroadcastHeaderFinalInfo.GetLeader", "error", errGetLeader) + return + } + + cnsMsg := consensus.NewConsensusMessage( + sr.GetData(), + nil, + nil, + nil, + []byte(leader), + nil, + int(MtBlockHeaderFinalInfo), + sr.RoundHandler().Index(), + sr.ChainID(), + sr.Header.GetPubKeysBitmap(), + sr.Header.GetSignature(), + sr.Header.GetLeaderSignature(), + sr.GetAssociatedPid([]byte(leader)), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("doEndRoundJob.BroadcastConsensusMessage", "error", err.Error()) + return + } + + log.Debug("step 3: block header final info has been sent", + "PubKeysBitmap", sr.Header.GetPubKeysBitmap(), + "AggregateSignature", sr.Header.GetSignature(), + "LeaderSignature", sr.Header.GetLeaderSignature()) +} + +func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) { + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !(isSelfLeader || sr.IsMultiKeyLeaderInCurrentRound()) { + return + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("createAndBroadcastInvalidSigners.GetLeader", "error", errGetLeader) + return + } + + cnsMsg := consensus.NewConsensusMessage( + sr.GetData(), + nil, + nil, + nil, + []byte(leader), + nil, + int(MtInvalidSigners), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid([]byte(leader)), + invalidSigners, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("doEndRoundJob.BroadcastConsensusMessage", "error", err.Error()) + return + } + + log.Debug("step 3: invalid signers info has been sent") +} + +func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message) bool { + sr.mutProcessingEndRound.Lock() + defer sr.mutProcessingEndRound.Unlock() + + if sr.RoundCanceled { + return false + } + if !sr.IsConsensusDataSet() { + return false + } + if !sr.IsSubroundFinished(sr.Previous()) { + return false + } + if sr.IsSubroundFinished(sr.Current()) { + return false + } + + haveHeader, header := sr.haveConsensusHeaderWithFullInfo(cnsDta) + if !haveHeader { + return false + } + + defer func() { + sr.SetProcessingBlock(false) + }() + + sr.SetProcessingBlock(true) + + shouldNotCommitBlock := sr.ExtendedCalled || int64(header.GetRound()) < sr.RoundHandler().Index() + if shouldNotCommitBlock { + log.Debug("canceled round, extended has been called or round index has been changed", + "round", sr.RoundHandler().Index(), + "subround", sr.Name(), + "header round", header.GetRound(), + "extended called", sr.ExtendedCalled, + ) + return false + } + + if sr.isOutOfTime() { + return false + } + + ok := sr.ScheduledProcessor().IsProcessedOKWithTimeout() + if !ok { + return false + } + + startTime := time.Now() + err := sr.BlockProcessor().CommitBlock(header, sr.Body) + elapsedTime := time.Since(startTime) + if elapsedTime >= common.CommitMaxTime { + log.Warn("doEndRoundJobByParticipant.CommitBlock", "elapsed time", elapsedTime) + } else { + log.Debug("elapsed time to commit block", + "time [s]", elapsedTime, + ) + } + if err != nil { + log.Debug("doEndRoundJobByParticipant.CommitBlock", "error", err.Error()) + return false + } + + sr.SetStatus(sr.Current(), spos.SsFinished) + + if sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || sr.IsMultiKeyInConsensusGroup() { + err = sr.setHeaderForValidator(header) + if err != nil { + log.Warn("doEndRoundJobByParticipant", "error", err.Error()) + } + } + + sr.displayStatistics() + + log.Debug("step 3: Body and Header have been committed") + + headerTypeMsg := "received" + if cnsDta != nil { + headerTypeMsg = "assembled" + } + + msg := fmt.Sprintf("Added %s block with nonce %d in blockchain", headerTypeMsg, header.GetNonce()) + log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "-")) + return true +} + +func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Message) (bool, data.HeaderHandler) { + if cnsDta == nil { + return sr.isConsensusHeaderReceived() + } + + if check.IfNil(sr.Header) { + return false, nil + } + + header := sr.Header.ShallowClone() + err := header.SetPubKeysBitmap(cnsDta.PubKeysBitmap) + if err != nil { + return false, nil + } + + err = header.SetSignature(cnsDta.AggregateSignature) + if err != nil { + return false, nil + } + + err = header.SetLeaderSignature(cnsDta.LeaderSignature) + if err != nil { + return false, nil + } + + return true, header +} + +func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandler) { + if check.IfNil(sr.Header) { + return false, nil + } + + consensusHeaderHash, err := core.CalculateHash(sr.Marshalizer(), sr.Hasher(), sr.Header) + if err != nil { + log.Debug("isConsensusHeaderReceived: calculate consensus header hash", "error", err.Error()) + return false, nil + } + + receivedHeaders := sr.GetReceivedHeaders() + + var receivedHeaderHash []byte + for index := range receivedHeaders { + receivedHeader := receivedHeaders[index].ShallowClone() + err = receivedHeader.SetLeaderSignature(nil) + if err != nil { + log.Debug("isConsensusHeaderReceived - SetLeaderSignature", "error", err.Error()) + return false, nil + } + + err = receivedHeader.SetPubKeysBitmap(nil) + if err != nil { + log.Debug("isConsensusHeaderReceived - SetPubKeysBitmap", "error", err.Error()) + return false, nil + } + + err = receivedHeader.SetSignature(nil) + if err != nil { + log.Debug("isConsensusHeaderReceived - SetSignature", "error", err.Error()) + return false, nil + } + + receivedHeaderHash, err = core.CalculateHash(sr.Marshalizer(), sr.Hasher(), receivedHeader) + if err != nil { + log.Debug("isConsensusHeaderReceived: calculate received header hash", "error", err.Error()) + return false, nil + } + + if bytes.Equal(receivedHeaderHash, consensusHeaderHash) { + return true, receivedHeaders[index] + } + } + + return false, nil +} + +func (sr *subroundEndRound) signBlockHeader() ([]byte, error) { + headerClone := sr.Header.ShallowClone() + err := headerClone.SetLeaderSignature(nil) + if err != nil { + return nil, err + } + + marshalizedHdr, err := sr.Marshalizer().Marshal(headerClone) + if err != nil { + return nil, err + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + return nil, errGetLeader + } + + return sr.SigningHandler().CreateSignatureForPublicKey(marshalizedHdr, []byte(leader)) +} + +func (sr *subroundEndRound) updateMetricsForLeader() { + sr.appStatusHandler.Increment(common.MetricCountAcceptedBlocks) + sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, + fmt.Sprintf("valid block produced in %f sec", time.Since(sr.RoundHandler().TimeStamp()).Seconds())) +} + +func (sr *subroundEndRound) broadcastBlockDataLeader() error { + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.Header, sr.Body) + if err != nil { + return err + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("broadcastBlockDataLeader.GetLeader", "error", errGetLeader) + return errGetLeader + } + + return sr.BroadcastMessenger().BroadcastBlockDataLeader(sr.Header, miniBlocks, transactions, []byte(leader)) +} + +func (sr *subroundEndRound) setHeaderForValidator(header data.HeaderHandler) error { + idx, pk, miniBlocks, transactions, err := sr.getIndexPkAndDataToBroadcast() + if err != nil { + return err + } + + go sr.BroadcastMessenger().PrepareBroadcastHeaderValidator(header, miniBlocks, transactions, idx, pk) + + return nil +} + +func (sr *subroundEndRound) prepareBroadcastBlockDataForValidator() error { + idx, pk, miniBlocks, transactions, err := sr.getIndexPkAndDataToBroadcast() + if err != nil { + return err + } + + go sr.BroadcastMessenger().PrepareBroadcastBlockDataValidator(sr.Header, miniBlocks, transactions, idx, pk) + + return nil +} + +// doEndRoundConsensusCheck method checks if the consensus is achieved +func (sr *subroundEndRound) doEndRoundConsensusCheck() bool { + if sr.RoundCanceled { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + return true + } + + return false +} + +func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { + consensusGroup := sr.ConsensusGroup() + signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, bitmap) + for _, pubKey := range signers { + isSigJobDone, err := sr.JobDone(pubKey, SrSignature) + if err != nil { + return err + } + + if !isSigJobDone { + return spos.ErrNilSignature + } + } + + return nil +} + +func (sr *subroundEndRound) isOutOfTime() bool { + startTime := sr.RoundTimeStamp + maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 + if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { + log.Debug("canceled round, time is out", + "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), + "subround", sr.Name()) + + sr.RoundCanceled = true + return true + } + + return false +} + +func (sr *subroundEndRound) getIndexPkAndDataToBroadcast() (int, []byte, map[uint32][]byte, map[string][][]byte, error) { + minIdx := sr.getMinConsensusGroupIndexOfManagedKeys() + + idx, err := sr.SelfConsensusGroupIndex() + if err == nil { + if idx < minIdx { + minIdx = idx + } + } + + if minIdx == sr.ConsensusGroupSize() { + return -1, nil, nil, nil, err + } + + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.Header, sr.Body) + if err != nil { + return -1, nil, nil, nil, err + } + + consensusGroup := sr.ConsensusGroup() + pk := []byte(consensusGroup[minIdx]) + + return minIdx, pk, miniBlocks, transactions, nil +} + +func (sr *subroundEndRound) getMinConsensusGroupIndexOfManagedKeys() int { + minIdx := sr.ConsensusGroupSize() + + for idx, validator := range sr.ConsensusGroup() { + if !sr.IsKeyManagedByCurrentNode([]byte(validator)) { + continue + } + + if idx < minIdx { + minIdx = idx + } + } + + return minIdx +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sr *subroundEndRound) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/v1/subroundEndRound_test.go b/consensus/spos/bls/v1/subroundEndRound_test.go new file mode 100644 index 00000000000..c202cc15a7e --- /dev/null +++ b/consensus/spos/bls/v1/subroundEndRound_test.go @@ -0,0 +1,1769 @@ +package v1_test + +import ( + "bytes" + "errors" + "sync" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/p2p/factory" + "github.com/multiversx/mx-chain-go/testscommon" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func initSubroundEndRoundWithContainer( + container *mock.ConsensusCoreMock, + appStatusHandler core.AppStatusHandler, +) v1.SubroundEndRound { + ch := make(chan bool, 1) + consensusState := initConsensusState() + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srEndRound, _ := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + appStatusHandler, + &testscommon.SentSignatureTrackerStub{}, + ) + + return srEndRound +} + +func initSubroundEndRound(appStatusHandler core.AppStatusHandler) v1.SubroundEndRound { + container := mock.InitConsensusCore() + return initSubroundEndRoundWithContainer(container, appStatusHandler) +} + +func TestNewSubroundEndRound(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := v1.NewSubroundEndRound( + nil, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := v1.NewSubroundEndRound( + sr, + nil, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + nil, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, spos.ErrNilAppStatusHandler, err) + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + nil, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, v1.ErrNilSentSignatureTracker, err) + }) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetBlockchain(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilBlockChain, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetBlockProcessor(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilBlockProcessor, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + sr.ConsensusState = nil + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetMultiSignerContainer(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetRoundHandler(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetSyncTimer(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.False(t, check.IfNil(srEndRound)) + assert.Nil(t, err) +} + +func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) { + t.Parallel() + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + signingHandler := &consensusMocks.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + return nil, crypto.ErrNilHasher + }, + } + container.SetSigningHandler(signingHandler) + + sr.Header = &block.Header{} + + sr.SetSelfPubKey("A") + + assert.True(t, sr.IsSelfLeaderInCurrentRound()) + r := sr.DoEndRoundJob() + assert.False(t, r) +} + +func TestSubroundEndRound_DoEndRoundJobErrCommitBlockShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + blProcMock := mock.InitBlockProcessorMock(container.Marshalizer()) + blProcMock.CommitBlockCalled = func( + header data.HeaderHandler, + body data.BodyHandler, + ) error { + return blockchain.ErrHeaderUnitNil + } + + container.SetBlockProcessor(blProcMock) + sr.Header = &block.Header{} + + r := sr.DoEndRoundJob() + assert.False(t, r) +} + +func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + remainingTime := time.Millisecond + roundHandlerMock := &mock.RoundHandlerMock{ + RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { + return remainingTime + }, + } + + container.SetRoundHandler(roundHandlerMock) + sr.Header = &block.Header{} + + r := sr.DoEndRoundJob() + assert.True(t, r) + + remainingTime = -time.Millisecond + + r = sr.DoEndRoundJob() + assert.False(t, r) +} + +func TestSubroundEndRound_DoEndRoundJobErrBroadcastBlockOK(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + bm := &mock.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return errors.New("error") + }, + } + container.SetBroadcastMessenger(bm) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.Header = &block.Header{} + + r := sr.DoEndRoundJob() + assert.True(t, r) +} + +func TestSubroundEndRound_DoEndRoundJobErrMarshalizedDataToBroadcastOK(t *testing.T) { + t.Parallel() + + err := errors.New("") + container := mock.InitConsensusCore() + + bpm := mock.InitBlockProcessorMock(container.Marshalizer()) + bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + err = errors.New("error marshalized data to broadcast") + return make(map[uint32][]byte), make(map[string][][]byte), err + } + container.SetBlockProcessor(bpm) + + bm := &mock.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return nil + }, + BroadcastMiniBlocksCalled: func(bytes map[uint32][]byte, pkBytes []byte) error { + return nil + }, + BroadcastTransactionsCalled: func(bytes map[string][][]byte, pkBytes []byte) error { + return nil + }, + } + container.SetBroadcastMessenger(bm) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.Header = &block.Header{} + + r := sr.DoEndRoundJob() + assert.True(t, r) + assert.Equal(t, errors.New("error marshalized data to broadcast"), err) +} + +func TestSubroundEndRound_DoEndRoundJobErrBroadcastMiniBlocksOK(t *testing.T) { + t.Parallel() + + err := errors.New("") + container := mock.InitConsensusCore() + + bpm := mock.InitBlockProcessorMock(container.Marshalizer()) + bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return make(map[uint32][]byte), make(map[string][][]byte), nil + } + container.SetBlockProcessor(bpm) + + bm := &mock.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return nil + }, + BroadcastMiniBlocksCalled: func(bytes map[uint32][]byte, pkBytes []byte) error { + err = errors.New("error broadcast miniblocks") + return err + }, + BroadcastTransactionsCalled: func(bytes map[string][][]byte, pkBytes []byte) error { + return nil + }, + } + container.SetBroadcastMessenger(bm) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.Header = &block.Header{} + + r := sr.DoEndRoundJob() + assert.True(t, r) + // no error as broadcast is delayed + assert.Equal(t, errors.New("error broadcast miniblocks"), err) +} + +func TestSubroundEndRound_DoEndRoundJobErrBroadcastTransactionsOK(t *testing.T) { + t.Parallel() + + err := errors.New("") + container := mock.InitConsensusCore() + + bpm := mock.InitBlockProcessorMock(container.Marshalizer()) + bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return make(map[uint32][]byte), make(map[string][][]byte), nil + } + container.SetBlockProcessor(bpm) + + bm := &mock.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return nil + }, + BroadcastMiniBlocksCalled: func(bytes map[uint32][]byte, pkBytes []byte) error { + return nil + }, + BroadcastTransactionsCalled: func(bytes map[string][][]byte, pkBytes []byte) error { + err = errors.New("error broadcast transactions") + return err + }, + } + container.SetBroadcastMessenger(bm) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.Header = &block.Header{} + + r := sr.DoEndRoundJob() + assert.True(t, r) + // no error as broadcast is delayed + assert.Equal(t, errors.New("error broadcast transactions"), err) +} + +func TestSubroundEndRound_DoEndRoundJobAllOK(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + bm := &mock.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return errors.New("error") + }, + } + container.SetBroadcastMessenger(bm) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.Header = &block.Header{} + + r := sr.DoEndRoundJob() + assert.True(t, r) +} + +func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { + t.Parallel() + + expectedSignature := []byte("signature") + container := mock.InitConsensusCore() + signingHandler := &consensusMocks.SigningHandlerStub{ + CreateSignatureForPublicKeyCalled: func(publicKeyBytes []byte, msg []byte) ([]byte, error) { + var receivedHdr block.Header + _ = container.Marshalizer().Unmarshal(&receivedHdr, msg) + return expectedSignature, nil + }, + } + container.SetSigningHandler(signingHandler) + bm := &mock.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return errors.New("error") + }, + } + container.SetBroadcastMessenger(bm) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.Header = &block.Header{Nonce: 5} + + r := sr.DoEndRoundJob() + assert.True(t, r) + assert.Equal(t, expectedSignature, sr.Header.GetLeaderSignature()) +} + +func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.RoundCanceled = true + + ok := sr.DoEndRoundConsensusCheck() + assert.False(t, ok) +} + +func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnTrueWhenRoundIsFinished(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetStatus(v1.SrEndRound, spos.SsFinished) + + ok := sr.DoEndRoundConsensusCheck() + assert.True(t, ok) +} + +func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsNotFinished(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + ok := sr.DoEndRoundConsensusCheck() + assert.False(t, ok) +} + +func TestSubroundEndRound_CheckSignaturesValidityShouldErrNilSignature(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + err := sr.CheckSignaturesValidity([]byte{2}) + assert.Equal(t, spos.ErrNilSignature, err) +} + +func TestSubroundEndRound_CheckSignaturesValidityShouldReturnNil(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + + err := sr.CheckSignaturesValidity([]byte{1}) + assert.Equal(t, nil, err) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_RoundCanceledShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.RoundCanceled = true + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusDataNotSetShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.Data = nil + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_PreviousSubroundNotFinishedShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetStatus(2, spos.SsNotFinished) + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_CurrentSubroundFinishedShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + // set previous as finished + sr.SetStatus(2, spos.SsFinished) + + // set current as finished + sr.SetStatus(3, spos.SsFinished) + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusHeaderNotReceivedShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + // set previous as finished + sr.SetStatus(2, spos.SsFinished) + + // set current as not finished + sr.SetStatus(3, spos.SsNotFinished) + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_ShouldReturnTrue(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.Header = hdr + sr.AddReceivedHeader(hdr) + + // set previous as finished + sr.SetStatus(2, spos.SsFinished) + + // set current as not finished + sr.SetStatus(3, spos.SsNotFinished) + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.True(t, res) +} + +func TestSubroundEndRound_IsConsensusHeaderReceived_NoReceivedHeadersShouldReturnFalse(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.Header = hdr + + res, retHdr := sr.IsConsensusHeaderReceived() + assert.False(t, res) + assert.Nil(t, retHdr) +} + +func TestSubroundEndRound_IsConsensusHeaderReceived_HeaderNotReceivedShouldReturnFalse(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + hdrToSearchFor := &block.Header{Nonce: 38} + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.AddReceivedHeader(hdr) + sr.Header = hdrToSearchFor + + res, retHdr := sr.IsConsensusHeaderReceived() + assert.False(t, res) + assert.Nil(t, retHdr) +} + +func TestSubroundEndRound_IsConsensusHeaderReceivedShouldReturnTrue(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.Header = hdr + sr.AddReceivedHeader(hdr) + + res, retHdr := sr.IsConsensusHeaderReceived() + assert.True(t, res) + assert.Equal(t, hdr, retHdr) +} + +func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoNilHdrShouldNotWork(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{} + + haveHdr, hdr := sr.HaveConsensusHeaderWithFullInfo(&cnsData) + assert.False(t, haveHdr) + assert.Nil(t, hdr) +} + +func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoShouldWork(t *testing.T) { + t.Parallel() + + originalPubKeyBitMap := []byte{0, 1, 2} + newPubKeyBitMap := []byte{3, 4, 5} + originalLeaderSig := []byte{6, 7, 8} + newLeaderSig := []byte{9, 10, 11} + originalSig := []byte{12, 13, 14} + newSig := []byte{15, 16, 17} + hdr := block.Header{ + PubKeysBitmap: originalPubKeyBitMap, + Signature: originalSig, + LeaderSignature: originalLeaderSig, + } + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.Header = &hdr + + cnsData := consensus.Message{ + PubKeysBitmap: newPubKeyBitMap, + LeaderSignature: newLeaderSig, + AggregateSignature: newSig, + } + haveHdr, newHdr := sr.HaveConsensusHeaderWithFullInfo(&cnsData) + assert.True(t, haveHdr) + require.NotNil(t, newHdr) + assert.Equal(t, newPubKeyBitMap, newHdr.GetPubKeysBitmap()) + assert.Equal(t, newLeaderSig, newHdr.GetLeaderSignature()) + assert.Equal(t, newSig, newHdr.GetSignature()) +} + +func TestSubroundEndRound_CreateAndBroadcastHeaderFinalInfoBroadcastShouldBeCalled(t *testing.T) { + t.Parallel() + + chanRcv := make(chan bool, 1) + leaderSigInHdr := []byte("leader sig") + container := mock.InitConsensusCore() + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + chanRcv <- true + assert.Equal(t, message.LeaderSignature, leaderSigInHdr) + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.Header = &block.Header{LeaderSignature: leaderSigInHdr} + + sr.CreateAndBroadcastHeaderFinalInfo() + + select { + case <-chanRcv: + case <-time.After(100 * time.Millisecond): + assert.Fail(t, "broadcast not called") + } +} + +func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.Header = hdr + sr.AddReceivedHeader(hdr) + + sr.SetStatus(2, spos.SsFinished) + sr.SetStatus(3, spos.SsNotFinished) + + cnsData := consensus.Message{ + // apply the data which is mocked in consensus state so the checks will pass + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) + assert.True(t, res) +} + +func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinalInfoIsNotValid(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + headerSigVerifier := &mock.HeaderSigVerifierStub{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { + return errors.New("error") + }, + VerifySignatureCalled: func(header data.HeaderHandler) error { + return errors.New("error") + }, + } + + container.SetHeaderSigVerifier(headerSigVerifier) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + sr.Header = &block.Header{} + res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_IsOutOfTimeShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + res := sr.IsOutOfTime() + assert.False(t, res) +} + +func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { + t.Parallel() + + // update roundHandler's mock, so it will calculate for real the duration + container := mock.InitConsensusCore() + roundHandler := mock.RoundHandlerMock{RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { + currentTime := time.Now() + elapsedTime := currentTime.Sub(startTime) + remainingTime := maxTime - elapsedTime + + return remainingTime + }} + container.SetRoundHandler(&roundHandler) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + sr.RoundTimeStamp = time.Now().AddDate(0, 0, -1) + + res := sr.IsOutOfTime() + assert.True(t, res) +} + +func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerifyLeaderSignatureFails(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + headerSigVerifier := &mock.HeaderSigVerifierStub{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { + return errors.New("error") + }, + VerifySignatureCalled: func(header data.HeaderHandler) error { + return nil + }, + } + + container.SetHeaderSigVerifier(headerSigVerifier) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsDta := &consensus.Message{} + sr.Header = &block.Header{} + isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) + assert.False(t, isValid) +} + +func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerifySignatureFails(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + headerSigVerifier := &mock.HeaderSigVerifierStub{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { + return nil + }, + VerifySignatureCalled: func(header data.HeaderHandler) error { + return errors.New("error") + }, + } + + container.SetHeaderSigVerifier(headerSigVerifier) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsDta := &consensus.Message{} + sr.Header = &block.Header{} + isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) + assert.False(t, isValid) +} + +func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnTrue(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + headerSigVerifier := &mock.HeaderSigVerifierStub{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { + return nil + }, + VerifySignatureCalled: func(header data.HeaderHandler) error { + return nil + }, + } + + container.SetHeaderSigVerifier(headerSigVerifier) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsDta := &consensus.Message{} + sr.Header = &block.Header{} + isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) + assert.True(t, isValid) +} + +func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { + t.Parallel() + + t.Run("fail to get signature share", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + expectedErr := errors.New("exptected error") + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, expectedErr + }, + } + + container.SetSigningHandler(signingHandler) + + sr.Header = &block.Header{} + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + + _, err := sr.VerifyNodesOnAggSigFail() + require.Equal(t, expectedErr, err) + }) + + t.Run("fail to verify signature share, job done will be set to false", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + expectedErr := errors.New("exptected error") + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, nil + }, + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + return expectedErr + }, + } + + sr.Header = &block.Header{} + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + container.SetSigningHandler(signingHandler) + + _, err := sr.VerifyNodesOnAggSigFail() + require.Nil(t, err) + + isJobDone, err := sr.JobDone(sr.ConsensusGroup()[0], v1.SrSignature) + require.Nil(t, err) + require.False(t, isJobDone) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, nil + }, + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + return nil + }, + VerifyCalled: func(msg, bitmap []byte, epoch uint32) error { + return nil + }, + } + container.SetSigningHandler(signingHandler) + + sr.Header = &block.Header{} + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], v1.SrSignature, true) + + invalidSigners, err := sr.VerifyNodesOnAggSigFail() + require.Nil(t, err) + require.NotNil(t, invalidSigners) + }) +} + +func TestComputeAddSigOnValidNodes(t *testing.T) { + t.Parallel() + + t.Run("invalid number of valid sig shares", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.Header = &block.Header{} + sr.SetThreshold(v1.SrEndRound, 2) + + _, _, err := sr.ComputeAggSigOnValidNodes() + require.True(t, errors.Is(err, spos.ErrInvalidNumSigShares)) + }) + + t.Run("fail to created aggregated sig", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + expectedErr := errors.New("exptected error") + signingHandler := &consensusMocks.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + return nil, expectedErr + }, + } + container.SetSigningHandler(signingHandler) + + sr.Header = &block.Header{} + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + + _, _, err := sr.ComputeAggSigOnValidNodes() + require.Equal(t, expectedErr, err) + }) + + t.Run("fail to set aggregated sig", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + expectedErr := errors.New("exptected error") + signingHandler := &consensusMocks.SigningHandlerStub{ + SetAggregatedSigCalled: func(_ []byte) error { + return expectedErr + }, + } + container.SetSigningHandler(signingHandler) + sr.Header = &block.Header{} + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + + _, _, err := sr.ComputeAggSigOnValidNodes() + require.Equal(t, expectedErr, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.Header = &block.Header{} + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + + bitmap, sig, err := sr.ComputeAggSigOnValidNodes() + require.NotNil(t, bitmap) + require.NotNil(t, sig) + require.Nil(t, err) + }) +} + +func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { + t.Parallel() + + t.Run("not enough valid signature shares", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + verifySigShareNumCalls := 0 + verifyFirstCall := true + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, nil + }, + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + if verifySigShareNumCalls == 0 { + verifySigShareNumCalls++ + return errors.New("expected error") + } + + verifySigShareNumCalls++ + return nil + }, + VerifyCalled: func(msg, bitmap []byte, epoch uint32) error { + if verifyFirstCall { + verifyFirstCall = false + return errors.New("expected error") + } + + return nil + }, + } + + container.SetSigningHandler(signingHandler) + + sr.SetThreshold(v1.SrEndRound, 2) + + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], v1.SrSignature, true) + + sr.Header = &block.Header{} + + r := sr.DoEndRoundJobByLeader() + require.False(t, r) + + assert.False(t, verifyFirstCall) + assert.Equal(t, 2, verifySigShareNumCalls) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + verifySigShareNumCalls := 0 + verifyFirstCall := true + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, nil + }, + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + if verifySigShareNumCalls == 0 { + verifySigShareNumCalls++ + return errors.New("expected error") + } + + verifySigShareNumCalls++ + return nil + }, + VerifyCalled: func(msg, bitmap []byte, epoch uint32) error { + if verifyFirstCall { + verifyFirstCall = false + return errors.New("expected error") + } + + return nil + }, + } + + container.SetSigningHandler(signingHandler) + + sr.SetThreshold(v1.SrEndRound, 2) + + _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[2], v1.SrSignature, true) + + sr.Header = &block.Header{} + + r := sr.DoEndRoundJobByLeader() + require.True(t, r) + + assert.False(t, verifyFirstCall) + assert.Equal(t, 3, verifySigShareNumCalls) + }) +} + +func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { + t.Parallel() + + t.Run("consensus data is not set", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.ConsensusState.Data = nil + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("received message node is not leader in current round", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("other node"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("received message from self leader should return false", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("received message from self multikey leader should return false", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "A" + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, _ := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + srEndRound.SetSelfPubKey("A") + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := srEndRound.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("received hash does not match the hash from current consensus state", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("Y"), + PubKey: []byte("A"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("process received message verification failed, different round index", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + RoundIndex: 1, + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("empty invalid signers", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + InvalidSigners: []byte{}, + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("invalid signers data", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + messageSigningHandler := &mock.MessageSigningHandlerStub{ + DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) { + return nil, expectedErr + }, + } + + container := mock.InitConsensusCore() + container.SetMessageSigningHandler(messageSigningHandler) + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + InvalidSigners: []byte("invalid data"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + InvalidSigners: []byte("invalidSignersData"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.True(t, res) + }) +} + +func TestVerifyInvalidSigners(t *testing.T) { + t.Parallel() + + t.Run("failed to deserialize invalidSigners field, should error", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + expectedErr := errors.New("expected err") + messageSigningHandler := &mock.MessageSigningHandlerStub{ + DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) { + return nil, expectedErr + }, + } + + container.SetMessageSigningHandler(messageSigningHandler) + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + err := sr.VerifyInvalidSigners([]byte{}) + require.Equal(t, expectedErr, err) + }) + + t.Run("failed to verify low level p2p message, should error", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + invalidSigners := []p2p.MessageP2P{&factory.Message{ + FromField: []byte("from"), + }} + invalidSignersBytes, _ := container.Marshalizer().Marshal(invalidSigners) + + expectedErr := errors.New("expected err") + messageSigningHandler := &mock.MessageSigningHandlerStub{ + DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) { + require.Equal(t, invalidSignersBytes, messagesBytes) + return invalidSigners, nil + }, + VerifyCalled: func(message p2p.MessageP2P) error { + return expectedErr + }, + } + + container.SetMessageSigningHandler(messageSigningHandler) + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + err := sr.VerifyInvalidSigners(invalidSignersBytes) + require.Equal(t, expectedErr, err) + }) + + t.Run("failed to verify signature share", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + pubKey := []byte("A") // it's in consensus + + consensusMsg := &consensus.Message{ + PubKey: pubKey, + } + consensusMsgBytes, _ := container.Marshalizer().Marshal(consensusMsg) + + invalidSigners := []p2p.MessageP2P{&factory.Message{ + FromField: []byte("from"), + DataField: consensusMsgBytes, + }} + invalidSignersBytes, _ := container.Marshalizer().Marshal(invalidSigners) + + messageSigningHandler := &mock.MessageSigningHandlerStub{ + DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) { + require.Equal(t, invalidSignersBytes, messagesBytes) + return invalidSigners, nil + }, + } + + wasCalled := false + signingHandler := &consensusMocks.SigningHandlerStub{ + VerifySingleSignatureCalled: func(publicKeyBytes []byte, message []byte, signature []byte) error { + wasCalled = true + return errors.New("expected err") + }, + } + + container.SetSigningHandler(signingHandler) + container.SetMessageSigningHandler(messageSigningHandler) + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + err := sr.VerifyInvalidSigners(invalidSignersBytes) + require.Nil(t, err) + require.True(t, wasCalled) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + pubKey := []byte("A") // it's in consensus + + consensusMsg := &consensus.Message{ + PubKey: pubKey, + } + consensusMsgBytes, _ := container.Marshalizer().Marshal(consensusMsg) + + invalidSigners := []p2p.MessageP2P{&factory.Message{ + FromField: []byte("from"), + DataField: consensusMsgBytes, + }} + invalidSignersBytes, _ := container.Marshalizer().Marshal(invalidSigners) + + messageSigningHandler := &mock.MessageSignerMock{} + container.SetMessageSigningHandler(messageSigningHandler) + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + err := sr.VerifyInvalidSigners(invalidSignersBytes) + require.Nil(t, err) + }) +} + +func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { + t.Parallel() + + t.Run("redundancy node should not send while main is active", func(t *testing.T) { + t.Parallel() + + expectedInvalidSigners := []byte("invalid signers") + + container := mock.InitConsensusCore() + nodeRedundancy := &mock.NodeRedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + IsMainMachineActiveCalled: func() bool { + return true + }, + } + container.SetNodeRedundancyHandler(nodeRedundancy) + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Fail(t, "should have not been called") + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + wg := &sync.WaitGroup{} + wg.Add(1) + + expectedInvalidSigners := []byte("invalid signers") + + wasCalled := false + container := mock.InitConsensusCore() + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) + wasCalled = true + wg.Done() + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + + wg.Wait() + + require.True(t, wasCalled) + }) +} + +func TestGetFullMessagesForInvalidSigners(t *testing.T) { + t.Parallel() + + t.Run("empty p2p messages slice if not in state", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + messageSigningHandler := &mock.MessageSigningHandlerStub{ + SerializeCalled: func(messages []p2p.MessageP2P) ([]byte, error) { + require.Equal(t, 0, len(messages)) + + return []byte{}, nil + }, + } + + container.SetMessageSigningHandler(messageSigningHandler) + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + invalidSigners := []string{"B", "C"} + + invalidSignersBytes, err := sr.GetFullMessagesForInvalidSigners(invalidSigners) + require.Nil(t, err) + require.Equal(t, []byte{}, invalidSignersBytes) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + expectedInvalidSigners := []byte("expectedInvalidSigners") + + messageSigningHandler := &mock.MessageSigningHandlerStub{ + SerializeCalled: func(messages []p2p.MessageP2P) ([]byte, error) { + require.Equal(t, 2, len(messages)) + + return expectedInvalidSigners, nil + }, + } + + container.SetMessageSigningHandler(messageSigningHandler) + + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.AddMessageWithSignature("B", &p2pmocks.P2PMessageMock{}) + sr.AddMessageWithSignature("C", &p2pmocks.P2PMessageMock{}) + + invalidSigners := []string{"B", "C"} + + invalidSignersBytes, err := sr.GetFullMessagesForInvalidSigners(invalidSigners) + require.Nil(t, err) + require.Equal(t, expectedInvalidSigners, invalidSignersBytes) + }) +} + +func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + sr, _ := spos.NewSubround( + v1.SrSignature, + v1.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, _ := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + t.Run("no managed keys from consensus group", func(t *testing.T) { + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return false + } + + assert.Equal(t, 9, srEndRound.GetMinConsensusGroupIndexOfManagedKeys()) + }) + t.Run("first managed key in consensus group should return 0", func(t *testing.T) { + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return bytes.Equal([]byte("A"), pkBytes) + } + + assert.Equal(t, 0, srEndRound.GetMinConsensusGroupIndexOfManagedKeys()) + }) + t.Run("third managed key in consensus group should return 2", func(t *testing.T) { + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return bytes.Equal([]byte("C"), pkBytes) + } + + assert.Equal(t, 2, srEndRound.GetMinConsensusGroupIndexOfManagedKeys()) + }) + t.Run("last managed key in consensus group should return 8", func(t *testing.T) { + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return bytes.Equal([]byte("I"), pkBytes) + } + + assert.Equal(t, 8, srEndRound.GetMinConsensusGroupIndexOfManagedKeys()) + }) +} diff --git a/consensus/spos/bls/v1/subroundSignature.go b/consensus/spos/bls/v1/subroundSignature.go new file mode 100644 index 00000000000..2880480713d --- /dev/null +++ b/consensus/spos/bls/v1/subroundSignature.go @@ -0,0 +1,409 @@ +package v1 + +import ( + "context" + "encoding/hex" + "fmt" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" +) + +type subroundSignature struct { + *spos.Subround + appStatusHandler core.AppStatusHandler + sentSignatureTracker spos.SentSignaturesTracker +} + +// NewSubroundSignature creates a subroundSignature object +func NewSubroundSignature( + baseSubround *spos.Subround, + extend func(subroundId int), + appStatusHandler core.AppStatusHandler, + sentSignatureTracker spos.SentSignaturesTracker, +) (*subroundSignature, error) { + err := checkNewSubroundSignatureParams( + baseSubround, + ) + if err != nil { + return nil, err + } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if check.IfNil(appStatusHandler) { + return nil, spos.ErrNilAppStatusHandler + } + if check.IfNil(sentSignatureTracker) { + return nil, ErrNilSentSignatureTracker + } + + srSignature := subroundSignature{ + Subround: baseSubround, + appStatusHandler: appStatusHandler, + sentSignatureTracker: sentSignatureTracker, + } + srSignature.Job = srSignature.doSignatureJob + srSignature.Check = srSignature.doSignatureConsensusCheck + srSignature.Extend = extend + + return &srSignature, nil +} + +func checkNewSubroundSignatureParams( + baseSubround *spos.Subround, +) error { + if baseSubround == nil { + return spos.ErrNilSubround + } + if baseSubround.ConsensusState == nil { + return spos.ErrNilConsensusState + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err +} + +// doSignatureJob method does the job of the subround Signature +func (sr *subroundSignature) doSignatureJob(_ context.Context) bool { + if !sr.CanDoSubroundJob(sr.Current()) { + return false + } + if check.IfNil(sr.Header) { + log.Error("doSignatureJob", "error", spos.ErrNilHeader) + return false + } + + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + isSelfInConsensusGroup := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() + + if isSelfLeader || isSelfInConsensusGroup { + selfIndex, err := sr.SelfConsensusGroupIndex() + if err != nil { + log.Debug("doSignatureJob.SelfConsensusGroupIndex: not in consensus group") + return false + } + + signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( + sr.GetData(), + uint16(selfIndex), + sr.Header.GetEpoch(), + []byte(sr.SelfPubKey()), + ) + if err != nil { + log.Debug("doSignatureJob.CreateSignatureShareForPublicKey", "error", err.Error()) + return false + } + + if !isSelfLeader { + ok := sr.createAndSendSignatureMessage(signatureShare, []byte(sr.SelfPubKey())) + if !ok { + return false + } + } + + ok := sr.completeSignatureSubRound(sr.SelfPubKey(), isSelfLeader) + if !ok { + return false + } + } + + return sr.doSignatureJobForManagedKeys() +} + +func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte, pkBytes []byte) bool { + // TODO: Analyze it is possible to send message only to leader with O(1) instead of O(n) + cnsMsg := consensus.NewConsensusMessage( + sr.GetData(), + signatureShare, + nil, + nil, + pkBytes, + nil, + int(MtSignature), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid(pkBytes), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("createAndSendSignatureMessage.BroadcastConsensusMessage", + "error", err.Error(), "pk", pkBytes) + return false + } + + log.Debug("step 2: signature has been sent", "pk", pkBytes) + + return true +} + +func (sr *subroundSignature) completeSignatureSubRound(pk string, shouldWaitForAllSigsAsync bool) bool { + err := sr.SetJobDone(pk, sr.Current(), true) + if err != nil { + log.Debug("doSignatureJob.SetSelfJobDone", + "subround", sr.Name(), + "error", err.Error(), + "pk", []byte(pk), + ) + return false + } + + if shouldWaitForAllSigsAsync { + go sr.waitAllSignatures() + } + + return true +} + +// receivedSignature method is called when a signature is received through the signature channel. +// If the signature is valid, then the jobDone map corresponding to the node which sent it, +// is set on true for the subround Signature +func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consensus.Message) bool { + node := string(cnsDta.PubKey) + pkForLogs := core.GetTrimmedPk(hex.EncodeToString(cnsDta.PubKey)) + + if !sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeInConsensusGroup(node) { + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.ValidatorPeerHonestyDecreaseFactor, + ) + + return false + } + + if !sr.IsSelfLeaderInCurrentRound() && !sr.IsMultiKeyLeaderInCurrentRound() { + return false + } + + if !sr.IsConsensusDataEqual(cnsDta.BlockHeaderHash) { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + index, err := sr.ConsensusGroupIndex(node) + if err != nil { + log.Debug("receivedSignature.ConsensusGroupIndex", + "node", pkForLogs, + "error", err.Error()) + return false + } + + err = sr.SigningHandler().StoreSignatureShare(uint16(index), cnsDta.SignatureShare) + if err != nil { + log.Debug("receivedSignature.StoreSignatureShare", + "node", pkForLogs, + "index", index, + "error", err.Error()) + return false + } + + err = sr.SetJobDone(node, sr.Current(), true) + if err != nil { + log.Debug("receivedSignature.SetJobDone", + "node", pkForLogs, + "subround", sr.Name(), + "error", err.Error()) + return false + } + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.ValidatorPeerHonestyIncreaseFactor, + ) + + sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, "signed") + return true +} + +// doSignatureConsensusCheck method checks if the consensus in the subround Signature is achieved +func (sr *subroundSignature) doSignatureConsensusCheck() bool { + if sr.RoundCanceled { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, "signed") + + return true + } + + isSelfLeader := sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() + isSelfInConsensusGroup := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || sr.IsMultiKeyInConsensusGroup() + + threshold := sr.Threshold(sr.Current()) + if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.Header) { + threshold = sr.FallbackThreshold(sr.Current()) + log.Warn("subroundSignature.doSignatureConsensusCheck: fallback validation has been applied", + "minimum number of signatures required", threshold, + "actual number of signatures received", sr.getNumOfSignaturesCollected(), + ) + } + + areSignaturesCollected, numSigs := sr.areSignaturesCollected(threshold) + areAllSignaturesCollected := numSigs == sr.ConsensusGroupSize() + + isJobDoneByLeader := isSelfLeader && (areAllSignaturesCollected || (areSignaturesCollected && sr.WaitingAllSignaturesTimeOut)) + + selfJobDone := true + if sr.IsNodeInConsensusGroup(sr.SelfPubKey()) { + selfJobDone = sr.IsSelfJobDone(sr.Current()) + } + multiKeyJobDone := true + if sr.IsMultiKeyInConsensusGroup() { + multiKeyJobDone = sr.IsMultiKeyJobDone(sr.Current()) + } + isJobDoneByConsensusNode := !isSelfLeader && isSelfInConsensusGroup && selfJobDone && multiKeyJobDone + + isSubroundFinished := !isSelfInConsensusGroup || isJobDoneByConsensusNode || isJobDoneByLeader + + if isSubroundFinished { + if isSelfLeader { + log.Debug("step 2: signatures", + "received", numSigs, + "total", len(sr.ConsensusGroup())) + } + + log.Debug("step 2: subround has been finished", + "subround", sr.Name()) + sr.SetStatus(sr.Current(), spos.SsFinished) + + sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, "signed") + + return true + } + + return false +} + +// areSignaturesCollected method checks if the signatures received from the nodes, belonging to the current +// jobDone group, are more than the necessary given threshold +func (sr *subroundSignature) areSignaturesCollected(threshold int) (bool, int) { + n := sr.getNumOfSignaturesCollected() + return n >= threshold, n +} + +func (sr *subroundSignature) getNumOfSignaturesCollected() int { + n := 0 + + for i := 0; i < len(sr.ConsensusGroup()); i++ { + node := sr.ConsensusGroup()[i] + + isSignJobDone, err := sr.JobDone(node, sr.Current()) + if err != nil { + log.Debug("getNumOfSignaturesCollected.JobDone", + "node", node, + "subround", sr.Name(), + "error", err.Error()) + continue + } + + if isSignJobDone { + n++ + } + } + + return n +} + +func (sr *subroundSignature) waitAllSignatures() { + remainingTime := sr.remainingTime() + time.Sleep(remainingTime) + + if sr.IsSubroundFinished(sr.Current()) { + return + } + + sr.WaitingAllSignaturesTimeOut = true + + select { + case sr.ConsensusChannel() <- true: + default: + } +} + +func (sr *subroundSignature) remainingTime() time.Duration { + startTime := sr.RoundHandler().TimeStamp() + maxTime := time.Duration(float64(sr.StartTime()) + float64(sr.EndTime()-sr.StartTime())*waitingAllSigsMaxTimeThreshold) + remainigTime := sr.RoundHandler().RemainingTime(startTime, maxTime) + + return remainigTime +} + +func (sr *subroundSignature) doSignatureJobForManagedKeys() bool { + isMultiKeyLeader := sr.IsMultiKeyLeaderInCurrentRound() + + numMultiKeysSignaturesSent := 0 + for idx, pk := range sr.ConsensusGroup() { + pkBytes := []byte(pk) + if sr.IsJobDone(pk, sr.Current()) { + continue + } + if !sr.IsKeyManagedByCurrentNode(pkBytes) { + continue + } + + selfIndex, err := sr.ConsensusGroupIndex(pk) + if err != nil { + log.Warn("doSignatureJobForManagedKeys: index not found", "pk", pkBytes) + continue + } + + signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( + sr.GetData(), + uint16(selfIndex), + sr.Header.GetEpoch(), + pkBytes, + ) + if err != nil { + log.Debug("doSignatureJobForManagedKeys.CreateSignatureShareForPublicKey", "error", err.Error()) + return false + } + + if !isMultiKeyLeader { + ok := sr.createAndSendSignatureMessage(signatureShare, pkBytes) + if !ok { + return false + } + + numMultiKeysSignaturesSent++ + } + sr.sentSignatureTracker.SignatureSent(pkBytes) + + isLeader := idx == spos.IndexOfLeaderInConsensusGroup + ok := sr.completeSignatureSubRound(pk, isLeader) + if !ok { + return false + } + } + + if numMultiKeysSignaturesSent > 0 { + log.Debug("step 2: multi keys signatures have been sent", "num", numMultiKeysSignaturesSent) + } + + return true +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sr *subroundSignature) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/v1/subroundSignature_test.go b/consensus/spos/bls/v1/subroundSignature_test.go new file mode 100644 index 00000000000..1dac174eb96 --- /dev/null +++ b/consensus/spos/bls/v1/subroundSignature_test.go @@ -0,0 +1,776 @@ +package v1_test + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/testscommon" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func initSubroundSignatureWithContainer(container *mock.ConsensusCoreMock) v1.SubroundSignature { + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srSignature, _ := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + return srSignature +} + +func initSubroundSignature() v1.SubroundSignature { + container := mock.InitConsensusCore() + return initSubroundSignatureWithContainer(container) +} + +func TestNewSubroundSignature(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := v1.NewSubroundSignature( + nil, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := v1.NewSubroundSignature( + sr, + nil, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + nil, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, spos.ErrNilAppStatusHandler, err) + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + nil, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, v1.ErrNilSentSignatureTracker, err) + }) +} + +func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + sr.ConsensusState = nil + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetHasher(nil) + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilHasher, err) +} + +func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetMultiSignerContainer(nil) + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetRoundHandler(nil) + + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetSyncTimer(nil) + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.False(t, check.IfNil(srSignature)) + assert.Nil(t, err) +} + +func TestSubroundSignature_DoSignatureJob(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundSignatureWithContainer(container) + + sr.Header = &block.Header{} + sr.Data = nil + r := sr.DoSignatureJob() + assert.False(t, r) + + sr.Data = []byte("X") + + err := errors.New("create signature share error") + signingHandler := &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, err + }, + } + container.SetSigningHandler(signingHandler) + + r = sr.DoSignatureJob() + assert.False(t, r) + + signingHandler = &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return []byte("SIG"), nil + }, + } + container.SetSigningHandler(signingHandler) + + r = sr.DoSignatureJob() + assert.True(t, r) + + _ = sr.SetJobDone(sr.SelfPubKey(), v1.SrSignature, false) + sr.RoundCanceled = false + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + r = sr.DoSignatureJob() + assert.True(t, r) + assert.False(t, sr.RoundCanceled) +} + +func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusStateWithKeysHandler( + &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return true + }, + }, + ) + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + v1.SrBlock, + v1.SrSignature, + v1.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + signatureSentForPks := make(map[string]struct{}) + srSignature, _ := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{ + SignatureSentCalled: func(pkBytes []byte) { + signatureSentForPks[string(pkBytes)] = struct{}{} + }, + }, + ) + + srSignature.Header = &block.Header{} + srSignature.Data = nil + r := srSignature.DoSignatureJob() + assert.False(t, r) + + sr.Data = []byte("X") + + err := errors.New("create signature share error") + signingHandler := &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, err + }, + } + container.SetSigningHandler(signingHandler) + + r = srSignature.DoSignatureJob() + assert.False(t, r) + + signingHandler = &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return []byte("SIG"), nil + }, + } + container.SetSigningHandler(signingHandler) + + r = srSignature.DoSignatureJob() + assert.True(t, r) + + _ = sr.SetJobDone(sr.SelfPubKey(), v1.SrSignature, false) + sr.RoundCanceled = false + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + r = srSignature.DoSignatureJob() + assert.True(t, r) + assert.False(t, sr.RoundCanceled) + expectedMap := map[string]struct{}{ + "A": {}, + "B": {}, + "C": {}, + "D": {}, + "E": {}, + "F": {}, + "G": {}, + "H": {}, + "I": {}, + } + assert.Equal(t, expectedMap, signatureSentForPks) +} + +func TestSubroundSignature_ReceivedSignature(t *testing.T) { + t.Parallel() + + sr := *initSubroundSignature() + signature := []byte("signature") + cnsMsg := consensus.NewConsensusMessage( + sr.Data, + signature, + nil, + nil, + []byte(sr.ConsensusGroup()[1]), + []byte("sig"), + int(v1.MtSignature), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + + sr.Header = &block.Header{} + sr.Data = nil + r := sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.Data = []byte("Y") + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.Data = []byte("X") + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + cnsMsg.PubKey = []byte("X") + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) + maxCount := len(sr.ConsensusGroup()) * 2 / 3 + count := 0 + for i := 0; i < len(sr.ConsensusGroup()); i++ { + if sr.ConsensusGroup()[i] != string(cnsMsg.PubKey) { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + count++ + if count == maxCount { + break + } + } + } + r = sr.ReceivedSignature(cnsMsg) + assert.True(t, r) +} + +func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { + t.Parallel() + + errStore := errors.New("signature share store failed") + storeSigShareCalled := false + signingHandler := &consensusMocks.SigningHandlerStub{ + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + return nil + }, + StoreSignatureShareCalled: func(index uint16, sig []byte) error { + storeSigShareCalled = true + return errStore + }, + } + + container := mock.InitConsensusCore() + container.SetSigningHandler(signingHandler) + sr := *initSubroundSignatureWithContainer(container) + sr.Header = &block.Header{} + + signature := []byte("signature") + cnsMsg := consensus.NewConsensusMessage( + sr.Data, + signature, + nil, + nil, + []byte(sr.ConsensusGroup()[1]), + []byte("sig"), + int(v1.MtSignature), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + + sr.Data = nil + r := sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.Data = []byte("Y") + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.Data = []byte("X") + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + cnsMsg.PubKey = []byte("X") + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) + maxCount := len(sr.ConsensusGroup()) * 2 / 3 + count := 0 + for i := 0; i < len(sr.ConsensusGroup()); i++ { + if sr.ConsensusGroup()[i] != string(cnsMsg.PubKey) { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + count++ + if count == maxCount { + break + } + } + } + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + assert.True(t, storeSigShareCalled) +} + +func TestSubroundSignature_SignaturesCollected(t *testing.T) { + t.Parallel() + + sr := *initSubroundSignature() + + for i := 0; i < len(sr.ConsensusGroup()); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrBlock, false) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, false) + } + + ok, n := sr.AreSignaturesCollected(2) + assert.False(t, ok) + assert.Equal(t, 0, n) + + ok, _ = sr.AreSignaturesCollected(2) + assert.False(t, ok) + + _ = sr.SetJobDone("B", v1.SrSignature, true) + isJobDone, _ := sr.JobDone("B", v1.SrSignature) + assert.True(t, isJobDone) + + ok, _ = sr.AreSignaturesCollected(2) + assert.False(t, ok) + + _ = sr.SetJobDone("C", v1.SrSignature, true) + ok, _ = sr.AreSignaturesCollected(2) + assert.True(t, ok) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { + t.Parallel() + + sr := *initSubroundSignature() + sr.RoundCanceled = true + assert.False(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { + t.Parallel() + + sr := *initSubroundSignature() + sr.SetStatus(v1.SrSignature, spos.SsFinished) + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignaturesCollectedReturnTrue(t *testing.T) { + t.Parallel() + + sr := *initSubroundSignature() + + for i := 0; i < sr.Threshold(v1.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + } + + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenSignaturesCollectedReturnFalse(t *testing.T) { + t.Parallel() + + sr := *initSubroundSignature() + assert.False(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenNotAllSignaturesCollectedAndTimeIsNotOut(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundSignatureWithContainer(container) + sr.WaitingAllSignaturesTimeOut = false + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + for i := 0; i < sr.Threshold(v1.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + } + + assert.False(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenAllSignaturesCollected(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundSignatureWithContainer(container) + sr.WaitingAllSignaturesTimeOut = false + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + for i := 0; i < sr.ConsensusGroupSize(); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + } + + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenEnoughButNotAllSignaturesCollectedAndTimeIsOut(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + sr := *initSubroundSignatureWithContainer(container) + sr.WaitingAllSignaturesTimeOut = true + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + for i := 0; i < sr.Threshold(v1.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + } + + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbackThresholdCouldNotBeApplied(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + container.SetFallbackHeaderValidator(&testscommon.FallBackHeaderValidatorStub{ + ShouldApplyFallbackValidationCalled: func(headerHandler data.HeaderHandler) bool { + return false + }, + }) + sr := *initSubroundSignatureWithContainer(container) + sr.WaitingAllSignaturesTimeOut = false + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + for i := 0; i < sr.FallbackThreshold(v1.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + } + + assert.False(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallbackThresholdCouldBeApplied(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + container.SetFallbackHeaderValidator(&testscommon.FallBackHeaderValidatorStub{ + ShouldApplyFallbackValidationCalled: func(headerHandler data.HeaderHandler) bool { + return true + }, + }) + sr := *initSubroundSignatureWithContainer(container) + sr.WaitingAllSignaturesTimeOut = true + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + for i := 0; i < sr.FallbackThreshold(v1.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + } + + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqual(t *testing.T) { + t.Parallel() + + sr := *initSubroundSignature() + + cnsMsg := consensus.NewConsensusMessage( + append(sr.Data, []byte("X")...), + []byte("signature"), + nil, + nil, + []byte(sr.ConsensusGroup()[0]), + []byte("sig"), + int(v1.MtSignature), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + + assert.False(t, sr.ReceivedSignature(cnsMsg)) +} diff --git a/consensus/spos/bls/v1/subroundStartRound.go b/consensus/spos/bls/v1/subroundStartRound.go new file mode 100644 index 00000000000..b514b586241 --- /dev/null +++ b/consensus/spos/bls/v1/subroundStartRound.go @@ -0,0 +1,374 @@ +package v1 + +import ( + "context" + "encoding/hex" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + outportcore "github.com/multiversx/mx-chain-core-go/data/outport" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/outport/disabled" +) + +// subroundStartRound defines the data needed by the subround StartRound +type subroundStartRound struct { + outportMutex sync.RWMutex + *spos.Subround + processingThresholdPercentage int + executeStoredMessages func() + resetConsensusMessages func() + + outportHandler outport.OutportHandler + sentSignatureTracker spos.SentSignaturesTracker +} + +// NewSubroundStartRound creates a subroundStartRound object +func NewSubroundStartRound( + baseSubround *spos.Subround, + extend func(subroundId int), + processingThresholdPercentage int, + executeStoredMessages func(), + resetConsensusMessages func(), + sentSignatureTracker spos.SentSignaturesTracker, +) (*subroundStartRound, error) { + err := checkNewSubroundStartRoundParams( + baseSubround, + ) + if err != nil { + return nil, err + } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if executeStoredMessages == nil { + return nil, fmt.Errorf("%w for executeStoredMessages function", spos.ErrNilFunctionHandler) + } + if resetConsensusMessages == nil { + return nil, fmt.Errorf("%w for resetConsensusMessages function", spos.ErrNilFunctionHandler) + } + if check.IfNil(sentSignatureTracker) { + return nil, ErrNilSentSignatureTracker + } + + srStartRound := subroundStartRound{ + Subround: baseSubround, + processingThresholdPercentage: processingThresholdPercentage, + executeStoredMessages: executeStoredMessages, + resetConsensusMessages: resetConsensusMessages, + outportHandler: disabled.NewDisabledOutport(), + sentSignatureTracker: sentSignatureTracker, + outportMutex: sync.RWMutex{}, + } + srStartRound.Job = srStartRound.doStartRoundJob + srStartRound.Check = srStartRound.doStartRoundConsensusCheck + srStartRound.Extend = extend + baseSubround.EpochStartRegistrationHandler().RegisterHandler(&srStartRound) + + return &srStartRound, nil +} + +func checkNewSubroundStartRoundParams( + baseSubround *spos.Subround, +) error { + if baseSubround == nil { + return spos.ErrNilSubround + } + if baseSubround.ConsensusState == nil { + return spos.ErrNilConsensusState + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err +} + +// SetOutportHandler method sets outport handler +func (sr *subroundStartRound) SetOutportHandler(outportHandler outport.OutportHandler) error { + if check.IfNil(outportHandler) { + return outport.ErrNilDriver + } + + sr.outportMutex.Lock() + sr.outportHandler = outportHandler + sr.outportMutex.Unlock() + + return nil +} + +// doStartRoundJob method does the job of the subround StartRound +func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool { + sr.ResetConsensusState() + sr.RoundIndex = sr.RoundHandler().Index() + sr.RoundTimeStamp = sr.RoundHandler().TimeStamp() + topic := spos.GetConsensusTopicID(sr.ShardCoordinator()) + sr.GetAntiFloodHandler().ResetForTopic(topic) + sr.resetConsensusMessages() + return true +} + +// doStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound +func (sr *subroundStartRound) doStartRoundConsensusCheck() bool { + if sr.RoundCanceled { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + return true + } + + if sr.initCurrentRound() { + return true + } + + return false +} + +func (sr *subroundStartRound) initCurrentRound() bool { + nodeState := sr.BootStrapper().GetNodeState() + if nodeState != common.NsSynchronized { // if node is not synchronized yet, it has to continue the bootstrapping mechanism + return false + } + + sr.AppStatusHandler().SetStringValue(common.MetricConsensusRoundState, "") + + err := sr.generateNextConsensusGroup(sr.RoundHandler().Index()) + if err != nil { + log.Debug("initCurrentRound.generateNextConsensusGroup", + "round index", sr.RoundHandler().Index(), + "error", err.Error()) + + sr.RoundCanceled = true + + return false + } + + if sr.NodeRedundancyHandler().IsRedundancyNode() { + sr.NodeRedundancyHandler().AdjustInactivityIfNeeded( + sr.SelfPubKey(), + sr.ConsensusGroup(), + sr.RoundHandler().Index(), + ) + // we should not return here, the multikey redundancy system relies on it + // the NodeRedundancyHandler "thinks" it is in redundancy mode even if we use the multikey redundancy system + } + + leader, err := sr.GetLeader() + if err != nil { + log.Debug("initCurrentRound.GetLeader", "error", err.Error()) + + sr.RoundCanceled = true + + return false + } + + msg := "" + if sr.IsKeyManagedByCurrentNode([]byte(leader)) { + msg = " (my turn in multi-key)" + } + if leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() { + msg = " (my turn)" + } + if len(msg) != 0 { + sr.AppStatusHandler().Increment(common.MetricCountLeader) + sr.AppStatusHandler().SetStringValue(common.MetricConsensusRoundState, "proposed") + sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "proposer") + } + + log.Debug("step 0: preparing the round", + "leader", core.GetTrimmedPk(hex.EncodeToString([]byte(leader))), + "messsage", msg) + sr.sentSignatureTracker.StartRound() + + pubKeys := sr.ConsensusGroup() + numMultiKeysInConsensusGroup := sr.computeNumManagedKeysInConsensusGroup(pubKeys) + + sr.indexRoundIfNeeded(pubKeys) + + isSingleKeyLeader := leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() + isLeader := isSingleKeyLeader || sr.IsKeyManagedByCurrentNode([]byte(leader)) + isSelfInConsensus := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || numMultiKeysInConsensusGroup > 0 + if !isSelfInConsensus { + log.Debug("not in consensus group") + sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "not in consensus group") + } else { + if !isLeader { + sr.AppStatusHandler().Increment(common.MetricCountConsensus) + sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "participant") + } + } + + err = sr.SigningHandler().Reset(pubKeys) + if err != nil { + log.Debug("initCurrentRound.Reset", "error", err.Error()) + + sr.RoundCanceled = true + + return false + } + + startTime := sr.RoundTimeStamp + maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 + if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { + log.Debug("canceled round, time is out", + "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), + "subround", sr.Name()) + + sr.RoundCanceled = true + + return false + } + + sr.SetStatus(sr.Current(), spos.SsFinished) + + // execute stored messages which were received in this new round but before this initialisation + go sr.executeStoredMessages() + + return true +} + +func (sr *subroundStartRound) computeNumManagedKeysInConsensusGroup(pubKeys []string) int { + numMultiKeysInConsensusGroup := 0 + for _, pk := range pubKeys { + pkBytes := []byte(pk) + if sr.IsKeyManagedByCurrentNode(pkBytes) { + numMultiKeysInConsensusGroup++ + log.Trace("in consensus group with multi key", + "pk", core.GetTrimmedPk(hex.EncodeToString(pkBytes))) + } + sr.IncrementRoundsWithoutReceivedMessages(pkBytes) + } + + if numMultiKeysInConsensusGroup > 0 { + log.Debug("in consensus group with multi keys identities", "num", numMultiKeysInConsensusGroup) + } + + return numMultiKeysInConsensusGroup +} + +func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { + sr.outportMutex.RLock() + defer sr.outportMutex.RUnlock() + + if !sr.outportHandler.HasDrivers() { + return + } + + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + currentHeader = sr.Blockchain().GetGenesisHeader() + } + + epoch := currentHeader.GetEpoch() + shardId := sr.ShardCoordinator().SelfId() + nodesCoordinatorShardID, err := sr.NodesCoordinator().ShardIdForEpoch(epoch) + if err != nil { + log.Debug("initCurrentRound.ShardIdForEpoch", + "epoch", epoch, + "error", err.Error()) + return + } + + if shardId != nodesCoordinatorShardID { + log.Debug("initCurrentRound.ShardIdForEpoch", + "epoch", epoch, + "shardCoordinator.ShardID", shardId, + "nodesCoordinator.ShardID", nodesCoordinatorShardID) + return + } + + signersIndexes, err := sr.NodesCoordinator().GetValidatorsIndexes(pubKeys, epoch) + if err != nil { + log.Error(err.Error()) + return + } + + round := sr.RoundHandler().Index() + + roundInfo := &outportcore.RoundInfo{ + Round: uint64(round), + SignersIndexes: signersIndexes, + BlockWasProposed: false, + ShardId: shardId, + Epoch: epoch, + Timestamp: uint64(sr.RoundTimeStamp.Unix()), + } + roundsInfo := &outportcore.RoundsInfo{ + ShardID: shardId, + RoundsInfo: []*outportcore.RoundInfo{roundInfo}, + } + sr.outportHandler.SaveRoundsInfo(roundsInfo) +} + +func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error { + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + currentHeader = sr.Blockchain().GetGenesisHeader() + if check.IfNil(currentHeader) { + return spos.ErrNilHeader + } + } + + randomSeed := currentHeader.GetRandSeed() + + log.Debug("random source for the next consensus group", + "rand", randomSeed) + + shardId := sr.ShardCoordinator().SelfId() + + nextConsensusGroup, err := sr.GetNextConsensusGroup( + randomSeed, + uint64(sr.RoundIndex), + shardId, + sr.NodesCoordinator(), + currentHeader.GetEpoch(), + ) + if err != nil { + return err + } + + log.Trace("consensus group is formed by next validators:", + "round", roundIndex) + + for i := 0; i < len(nextConsensusGroup); i++ { + log.Trace(core.GetTrimmedPk(hex.EncodeToString([]byte(nextConsensusGroup[i])))) + } + + sr.SetConsensusGroup(nextConsensusGroup) + + return nil +} + +// EpochStartPrepare wis called when an epoch start event is observed, but not yet confirmed/committed. +// Some components may need to do initialisation on this event +func (sr *subroundStartRound) EpochStartPrepare(metaHdr data.HeaderHandler, _ data.BodyHandler) { + log.Trace(fmt.Sprintf("epoch %d start prepare in consensus", metaHdr.GetEpoch())) +} + +// EpochStartAction is called upon a start of epoch event. +func (sr *subroundStartRound) EpochStartAction(hdr data.HeaderHandler) { + log.Trace(fmt.Sprintf("epoch %d start action in consensus", hdr.GetEpoch())) + + sr.changeEpoch(hdr.GetEpoch()) +} + +func (sr *subroundStartRound) changeEpoch(currentEpoch uint32) { + epochNodes, err := sr.NodesCoordinator().GetConsensusWhitelistedNodes(currentEpoch) + if err != nil { + panic(fmt.Sprintf("consensus changing epoch failed with error %s", err.Error())) + } + + sr.SetEligibleList(epochNodes) +} + +// NotifyOrder returns the notification order for a start of epoch event +func (sr *subroundStartRound) NotifyOrder() uint32 { + return common.ConsensusOrder +} diff --git a/consensus/spos/bls/v1/subroundStartRound_test.go b/consensus/spos/bls/v1/subroundStartRound_test.go new file mode 100644 index 00000000000..96ab0bbd440 --- /dev/null +++ b/consensus/spos/bls/v1/subroundStartRound_test.go @@ -0,0 +1,835 @@ +package v1_test + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (v1.SubroundStartRound, error) { + startRound, err := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + return startRound, err +} + +func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) v1.SubroundStartRound { + startRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + return startRound +} + +func defaultSubround( + consensusState *spos.ConsensusState, + ch chan bool, + container spos.ConsensusCoreHandler, +) (*spos.Subround, error) { + + return spos.NewSubround( + -1, + v1.SrStartRound, + v1.SrBlock, + int64(0*roundTimeDuration/100), + int64(5*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) +} + +func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) v1.SubroundStartRound { + consensusState := initConsensusState() + ch := make(chan bool, 1) + sr, _ := defaultSubround(consensusState, ch, container) + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + return srStartRound +} + +func initSubroundStartRound() v1.SubroundStartRound { + container := mock.InitConsensusCore() + return initSubroundStartRoundWithContainer(container) +} + +func TestNewSubroundStartRound(t *testing.T) { + t.Parallel() + + ch := make(chan bool, 1) + consensusState := initConsensusState() + container := mock.InitConsensusCore() + sr, _ := spos.NewSubround( + -1, + v1.SrStartRound, + v1.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + nil, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + sr, + nil, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "extend") + }) + t.Run("nil executeStoredMessages function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + nil, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "executeStoredMessages") + }) + t.Run("nil resetConsensusMessages function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + nil, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "resetConsensusMessages") + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + nil, + ) + + assert.Nil(t, srStartRound) + assert.Equal(t, v1.ErrNilSentSignatureTracker, err) + }) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilBlockChainShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetBlockchain(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilBlockChain, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilBootstrapperShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetBootStrapper(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilBootstrapper, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + sr.ConsensusState = nil + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetMultiSignerContainer(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetRoundHandler(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetSyncTimer(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilValidatorGroupSelectorShouldFail(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetValidatorGroupSelector(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilNodesCoordinator, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundShouldWork(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.NotNil(t, srStartRound) + assert.Nil(t, err) +} + +func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + + consensusState := initConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + srStartRound := *defaultWithoutErrorSubroundStartRoundFromSubround(sr) + + r := srStartRound.DoStartRoundJob() + assert.True(t, r) +} + +func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { + t.Parallel() + + sr := *initSubroundStartRound() + + sr.RoundCanceled = true + + ok := sr.DoStartRoundConsensusCheck() + assert.False(t, ok) +} + +func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenRoundIsFinished(t *testing.T) { + t.Parallel() + + sr := *initSubroundStartRound() + + sr.SetStatus(v1.SrStartRound, spos.SsFinished) + + ok := sr.DoStartRoundConsensusCheck() + assert.True(t, ok) +} + +func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCurrentRoundReturnTrue(t *testing.T) { + t.Parallel() + + bootstrapperMock := &mock.BootstrapperStub{GetNodeStateCalled: func() common.NodeState { + return common.NsSynchronized + }} + + container := mock.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + + sr := *initSubroundStartRoundWithContainer(container) + sentTrackerInterface := sr.GetSentSignatureTracker() + sentTracker := sentTrackerInterface.(*testscommon.SentSignatureTrackerStub) + startRoundCalled := false + sentTracker.StartRoundCalled = func() { + startRoundCalled = true + } + + ok := sr.DoStartRoundConsensusCheck() + assert.True(t, ok) + assert.True(t, startRoundCalled) +} + +func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenInitCurrentRoundReturnFalse(t *testing.T) { + t.Parallel() + + bootstrapperMock := &mock.BootstrapperStub{GetNodeStateCalled: func() common.NodeState { + return common.NsNotSynchronized + }} + + container := mock.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + container.SetRoundHandler(initRoundHandlerMock()) + + sr := *initSubroundStartRoundWithContainer(container) + + ok := sr.DoStartRoundConsensusCheck() + assert.False(t, ok) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetNodeStateNotReturnSynchronized(t *testing.T) { + t.Parallel() + + bootstrapperMock := &mock.BootstrapperStub{} + + bootstrapperMock.GetNodeStateCalled = func() common.NodeState { + return common.NsNotSynchronized + } + container := mock.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + + srStartRound := *initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.False(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextConsensusGroupErr(t *testing.T) { + t.Parallel() + + validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} + err := errors.New("error") + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte, round uint64, shardId uint32, epoch uint32) ([]nodesCoordinator.Validator, error) { + return nil, err + } + container := mock.InitConsensusCore() + container.SetValidatorGroupSelector(validatorGroupSelector) + + srStartRound := *initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.False(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenMainMachineIsActive(t *testing.T) { + t.Parallel() + + nodeRedundancyMock := &mock.NodeRedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + } + container := mock.InitConsensusCore() + container.SetNodeRedundancyHandler(nodeRedundancyMock) + + srStartRound := *initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.True(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t *testing.T) { + t.Parallel() + + validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + epoch uint32, + ) ([]nodesCoordinator.Validator, error) { + return make([]nodesCoordinator.Validator, 0), nil + } + + container := mock.InitConsensusCore() + container.SetValidatorGroupSelector(validatorGroupSelector) + + srStartRound := *initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.False(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenIsNotInTheConsensusGroup(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + consensusState := initConsensusState() + consensusState.SetSelfPubKey(consensusState.SelfPubKey() + "X") + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + srStartRound := *defaultWithoutErrorSubroundStartRoundFromSubround(sr) + + r := srStartRound.InitCurrentRound() + assert.True(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenTimeIsOut(t *testing.T) { + t.Parallel() + + roundHandlerMock := initRoundHandlerMock() + + roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { + return time.Duration(-1) + } + + container := mock.InitConsensusCore() + container.SetRoundHandler(roundHandlerMock) + + srStartRound := *initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.False(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { + t.Parallel() + + bootstrapperMock := &mock.BootstrapperStub{} + + bootstrapperMock.GetNodeStateCalled = func() common.NodeState { + return common.NsSynchronized + } + + container := mock.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + + srStartRound := *initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.True(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { + t.Parallel() + + t.Run("not in consensus node", func(t *testing.T) { + t.Parallel() + + wasCalled := false + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, value, "not in consensus group") + } + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("not in consensus") + sr, _ := spos.NewSubround( + -1, + v1.SrStartRound, + v1.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + }) + t.Run("main key participant", func(t *testing.T) { + t.Parallel() + + wasCalled := false + wasIncrementCalled := false + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "B" + }, + } + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, "participant", value) + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("B") + sr, _ := spos.NewSubround( + -1, + v1.SrStartRound, + v1.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) + }) + t.Run("multi key participant", func(t *testing.T) { + t.Parallel() + + wasCalled := false + wasIncrementCalled := false + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, value, "participant") + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return string(pkBytes) == consensusState.SelfPubKey() + } + sr, _ := spos.NewSubround( + -1, + v1.SrStartRound, + v1.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) + }) + t.Run("main key leader", func(t *testing.T) { + t.Parallel() + + wasMetricConsensusStateCalled := false + wasMetricCountLeaderCalled := false + cntMetricConsensusRoundStateCalled := 0 + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasMetricConsensusStateCalled = true + assert.Equal(t, value, "proposer") + } + if key == common.MetricConsensusRoundState { + cntMetricConsensusRoundStateCalled++ + switch cntMetricConsensusRoundStateCalled { + case 1: + assert.Equal(t, value, "") + case 2: + assert.Equal(t, value, "proposed") + default: + assert.Fail(t, "should have been called only twice") + } + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountLeader { + wasMetricCountLeaderCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + leader, _ := consensusState.GetLeader() + consensusState.SetSelfPubKey(leader) + sr, _ := spos.NewSubround( + -1, + v1.SrStartRound, + v1.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasMetricConsensusStateCalled) + assert.True(t, wasMetricCountLeaderCalled) + assert.Equal(t, 2, cntMetricConsensusRoundStateCalled) + }) + t.Run("managed key leader", func(t *testing.T) { + t.Parallel() + + wasMetricConsensusStateCalled := false + wasMetricCountLeaderCalled := false + cntMetricConsensusRoundStateCalled := 0 + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasMetricConsensusStateCalled = true + assert.Equal(t, value, "proposer") + } + if key == common.MetricConsensusRoundState { + cntMetricConsensusRoundStateCalled++ + switch cntMetricConsensusRoundStateCalled { + case 1: + assert.Equal(t, value, "") + case 2: + assert.Equal(t, value, "proposed") + default: + assert.Fail(t, "should have been called only twice") + } + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountLeader { + wasMetricCountLeaderCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + leader, _ := consensusState.GetLeader() + consensusState.SetSelfPubKey(leader) + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return string(pkBytes) == leader + } + sr, _ := spos.NewSubround( + -1, + v1.SrStartRound, + v1.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasMetricConsensusStateCalled) + assert.True(t, wasMetricCountLeaderCalled) + assert.Equal(t, 2, cntMetricConsensusRoundStateCalled) + }) +} + +func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing.T) { + t.Parallel() + + validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} + + err := errors.New("error") + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + epoch uint32, + ) ([]nodesCoordinator.Validator, error) { + return nil, err + } + container := mock.InitConsensusCore() + container.SetValidatorGroupSelector(validatorGroupSelector) + + srStartRound := *initSubroundStartRoundWithContainer(container) + + err2 := srStartRound.GenerateNextConsensusGroup(0) + + assert.Equal(t, err, err2) +} diff --git a/consensus/spos/bls/benchmark_test.go b/consensus/spos/bls/v2/benchmark_test.go similarity index 99% rename from consensus/spos/bls/benchmark_test.go rename to consensus/spos/bls/v2/benchmark_test.go index 4a0802760b8..7cc8235bc84 100644 --- a/consensus/spos/bls/benchmark_test.go +++ b/consensus/spos/bls/v2/benchmark_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "context" diff --git a/consensus/spos/bls/benchmark_verify_signatures_test.go b/consensus/spos/bls/v2/benchmark_verify_signatures_test.go similarity index 99% rename from consensus/spos/bls/benchmark_verify_signatures_test.go rename to consensus/spos/bls/v2/benchmark_verify_signatures_test.go index 85b14c9a2c2..0190f50ea01 100644 --- a/consensus/spos/bls/benchmark_verify_signatures_test.go +++ b/consensus/spos/bls/v2/benchmark_verify_signatures_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "context" diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/v2/blsSubroundsFactory.go similarity index 99% rename from consensus/spos/bls/blsSubroundsFactory.go rename to consensus/spos/bls/v2/blsSubroundsFactory.go index 28531a6af49..dfb6a4050f3 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "time" diff --git a/consensus/spos/bls/blsSubroundsFactory_test.go b/consensus/spos/bls/v2/blsSubroundsFactory_test.go similarity index 99% rename from consensus/spos/bls/blsSubroundsFactory_test.go rename to consensus/spos/bls/v2/blsSubroundsFactory_test.go index ce976c27c58..1e227a96fd6 100644 --- a/consensus/spos/bls/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "context" diff --git a/consensus/spos/bls/v2/blsWorker.go b/consensus/spos/bls/v2/blsWorker.go new file mode 100644 index 00000000000..79d1cbb24c0 --- /dev/null +++ b/consensus/spos/bls/v2/blsWorker.go @@ -0,0 +1,163 @@ +package v2 + +import ( + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" +) + +// peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by +// following the next premises: +// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; +// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round +// adds an extra 1 to the total value, reaching value 4; +// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly +// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. +// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// +// Validators only send one signature message in a round, treating the edge case of a delayed message, will need at most +// 2 messages per round (which is ok as it is below the set value of 5) +const peerMaxMessagesPerSec = uint32(6) + +// defaultMaxNumOfMessageTypeAccepted represents the maximum number of the same message type accepted in one round to be +// received from the same public key for the default message types +const defaultMaxNumOfMessageTypeAccepted = uint32(1) + +// maxNumOfMessageTypeSignatureAccepted represents the maximum number of the signature message type accepted in one round to be +// received from the same public key +const maxNumOfMessageTypeSignatureAccepted = uint32(2) + +// worker defines the data needed by spos to communicate between nodes which are in the validators group +type worker struct { +} + +// NewConsensusService creates a new worker object +func NewConsensusService() (*worker, error) { + wrk := worker{} + + return &wrk, nil +} + +// InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService +func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { + receivedMessages := make(map[consensus.MessageType][]*consensus.Message) + receivedMessages[MtBlockBodyAndHeader] = make([]*consensus.Message, 0) + receivedMessages[MtBlockBody] = make([]*consensus.Message, 0) + receivedMessages[MtBlockHeader] = make([]*consensus.Message, 0) + receivedMessages[MtSignature] = make([]*consensus.Message, 0) + receivedMessages[MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) + receivedMessages[MtInvalidSigners] = make([]*consensus.Message, 0) + + return receivedMessages +} + +// GetMaxMessagesInARoundPerPeer returns the maximum number of messages a peer can send per round for BLS +func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { + return peerMaxMessagesPerSec +} + +// GetStringValue gets the name of the messageType +func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { + return getStringValue(messageType) +} + +// GetSubroundName gets the subround name for the subround id provided +func (wrk *worker) GetSubroundName(subroundId int) string { + return getSubroundName(subroundId) +} + +// IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header +func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { + return msgType == MtBlockBodyAndHeader +} + +// IsMessageWithBlockBody returns if the current messageType is about block body +func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { + return msgType == MtBlockBody +} + +// IsMessageWithBlockHeader returns if the current messageType is about block header +func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { + return msgType == MtBlockHeader +} + +// IsMessageWithSignature returns if the current messageType is about signature +func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { + return msgType == MtSignature +} + +// IsMessageWithFinalInfo returns if the current messageType is about header final info +func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { + return msgType == MtBlockHeaderFinalInfo +} + +// IsMessageWithInvalidSigners returns if the current messageType is about invalid signers +func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { + return msgType == MtInvalidSigners +} + +// IsMessageTypeValid returns if the current messageType is valid +func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { + isMessageTypeValid := msgType == MtBlockBodyAndHeader || + msgType == MtBlockBody || + msgType == MtBlockHeader || + msgType == MtSignature || + msgType == MtBlockHeaderFinalInfo || + msgType == MtInvalidSigners + + return isMessageTypeValid +} + +// IsSubroundSignature returns if the current subround is about signature +func (wrk *worker) IsSubroundSignature(subroundId int) bool { + return subroundId == SrSignature +} + +// IsSubroundStartRound returns if the current subround is about start round +func (wrk *worker) IsSubroundStartRound(subroundId int) bool { + return subroundId == SrStartRound +} + +// GetMessageRange provides the MessageType range used in checks by the consensus +func (wrk *worker) GetMessageRange() []consensus.MessageType { + var v []consensus.MessageType + + for i := MtBlockBodyAndHeader; i <= MtInvalidSigners; i++ { + v = append(v, i) + } + + return v +} + +// CanProceed returns if the current messageType can proceed further if previous subrounds finished +func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { + switch msgType { + case MtBlockBodyAndHeader: + return consensusState.Status(SrStartRound) == spos.SsFinished + case MtBlockBody: + return consensusState.Status(SrStartRound) == spos.SsFinished + case MtBlockHeader: + return consensusState.Status(SrStartRound) == spos.SsFinished + case MtSignature: + return consensusState.Status(SrBlock) == spos.SsFinished + case MtBlockHeaderFinalInfo: + return consensusState.Status(SrSignature) == spos.SsFinished + case MtInvalidSigners: + return consensusState.Status(SrSignature) == spos.SsFinished + } + + return false +} + +// GetMaxNumOfMessageTypeAccepted returns the maximum number of accepted consensus message types per round, per public key +func (wrk *worker) GetMaxNumOfMessageTypeAccepted(msgType consensus.MessageType) uint32 { + if msgType == MtSignature { + return maxNumOfMessageTypeSignatureAccepted + } + + return defaultMaxNumOfMessageTypeAccepted +} + +// IsInterfaceNil returns true if there is no value under the interface +func (wrk *worker) IsInterfaceNil() bool { + return wrk == nil +} diff --git a/consensus/spos/bls/blsWorker_test.go b/consensus/spos/bls/v2/blsWorker_test.go similarity index 99% rename from consensus/spos/bls/blsWorker_test.go rename to consensus/spos/bls/v2/blsWorker_test.go index 75cc8f3b412..1f8377ef266 100644 --- a/consensus/spos/bls/blsWorker_test.go +++ b/consensus/spos/bls/v2/blsWorker_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "testing" diff --git a/consensus/spos/bls/v2/constants.go b/consensus/spos/bls/v2/constants.go new file mode 100644 index 00000000000..a395f506ddd --- /dev/null +++ b/consensus/spos/bls/v2/constants.go @@ -0,0 +1,126 @@ +package v2 + +import ( + logger "github.com/multiversx/mx-chain-logger-go" + + "github.com/multiversx/mx-chain-go/consensus" +) + +var log = logger.GetOrCreate("consensus/spos/bls") + +const ( + // SrStartRound defines ID of Subround "Start round" + SrStartRound = iota + // SrBlock defines ID of Subround "block" + SrBlock + // SrSignature defines ID of Subround "signature" + SrSignature + // SrEndRound defines ID of Subround "End round" + SrEndRound +) + +const ( + // MtUnknown defines ID of a message that has unknown data inside + MtUnknown consensus.MessageType = iota + // MtBlockBodyAndHeader defines ID of a message that has a block body and a block header inside + MtBlockBodyAndHeader + // MtBlockBody defines ID of a message that has a block body inside + MtBlockBody + // MtBlockHeader defines ID of a message that has a block header inside + MtBlockHeader + // MtSignature defines ID of a message that has a Signature inside + MtSignature + // MtBlockHeaderFinalInfo defines ID of a message that has a block header final info inside + // (aggregate signature, bitmap and seal leader signature for the proposed and accepted header) + MtBlockHeaderFinalInfo + // MtInvalidSigners defines ID of a message that has a invalid signers p2p messages inside + MtInvalidSigners +) + +// waitingAllSigsMaxTimeThreshold specifies the max allocated time for waiting all signatures from the total time of the subround signature +const waitingAllSigsMaxTimeThreshold = 0.5 + +// processingThresholdPercent specifies the max allocated time for processing the block as a percentage of the total time of the round +const processingThresholdPercent = 85 + +// srStartStartTime specifies the start time, from the total time of the round, of Subround Start +const srStartStartTime = 0.0 + +// srEndStartTime specifies the end time, from the total time of the round, of Subround Start +const srStartEndTime = 0.05 + +// srBlockStartTime specifies the start time, from the total time of the round, of Subround Block +const srBlockStartTime = 0.05 + +// srBlockEndTime specifies the end time, from the total time of the round, of Subround Block +const srBlockEndTime = 0.25 + +// srSignatureStartTime specifies the start time, from the total time of the round, of Subround Signature +const srSignatureStartTime = 0.25 + +// srSignatureEndTime specifies the end time, from the total time of the round, of Subround Signature +const srSignatureEndTime = 0.85 + +// srEndStartTime specifies the start time, from the total time of the round, of Subround End +const srEndStartTime = 0.85 + +// srEndEndTime specifies the end time, from the total time of the round, of Subround End +const srEndEndTime = 0.95 + +const ( + // BlockBodyAndHeaderStringValue represents the string to be used to identify a block body and a block header + BlockBodyAndHeaderStringValue = "(BLOCK_BODY_AND_HEADER)" + + // BlockBodyStringValue represents the string to be used to identify a block body + BlockBodyStringValue = "(BLOCK_BODY)" + + // BlockHeaderStringValue represents the string to be used to identify a block header + BlockHeaderStringValue = "(BLOCK_HEADER)" + + // BlockSignatureStringValue represents the string to be used to identify a block's signature + BlockSignatureStringValue = "(SIGNATURE)" + + // BlockHeaderFinalInfoStringValue represents the string to be used to identify a block's header final info + BlockHeaderFinalInfoStringValue = "(FINAL_INFO)" + + // BlockUnknownStringValue represents the string to be used to identify an unknown block + BlockUnknownStringValue = "(UNKNOWN)" + + // BlockDefaultStringValue represents the message to identify a message that is undefined + BlockDefaultStringValue = "Undefined message type" +) + +func getStringValue(msgType consensus.MessageType) string { + switch msgType { + case MtBlockBodyAndHeader: + return BlockBodyAndHeaderStringValue + case MtBlockBody: + return BlockBodyStringValue + case MtBlockHeader: + return BlockHeaderStringValue + case MtSignature: + return BlockSignatureStringValue + case MtBlockHeaderFinalInfo: + return BlockHeaderFinalInfoStringValue + case MtUnknown: + return BlockUnknownStringValue + default: + return BlockDefaultStringValue + } +} + +// getSubroundName returns the name of each Subround from a given Subround ID +func getSubroundName(subroundId int) string { + switch subroundId { + case SrStartRound: + return "(START_ROUND)" + case SrBlock: + return "(BLOCK)" + case SrSignature: + return "(SIGNATURE)" + case SrEndRound: + return "(END_ROUND)" + default: + return "Undefined subround" + } +} diff --git a/consensus/spos/bls/v2/errors.go b/consensus/spos/bls/v2/errors.go new file mode 100644 index 00000000000..97c8e1eb685 --- /dev/null +++ b/consensus/spos/bls/v2/errors.go @@ -0,0 +1,6 @@ +package v2 + +import "errors" + +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/consensus/spos/bls/export_test.go b/consensus/spos/bls/v2/export_test.go similarity index 99% rename from consensus/spos/bls/export_test.go rename to consensus/spos/bls/v2/export_test.go index e36bce4c94e..33bef8d7328 100644 --- a/consensus/spos/bls/export_test.go +++ b/consensus/spos/bls/v2/export_test.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "context" diff --git a/consensus/spos/bls/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go similarity index 99% rename from consensus/spos/bls/subroundBlock.go rename to consensus/spos/bls/v2/subroundBlock.go index cec1c657c41..7131415a0c7 100644 --- a/consensus/spos/bls/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "context" @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" diff --git a/consensus/spos/bls/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go similarity index 99% rename from consensus/spos/bls/subroundBlock_test.go rename to consensus/spos/bls/v2/subroundBlock_test.go index d24713cd413..4c1a9e8b129 100644 --- a/consensus/spos/bls/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "errors" diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/v2/subroundEndRound.go similarity index 99% rename from consensus/spos/bls/subroundEndRound.go rename to consensus/spos/bls/v2/subroundEndRound.go index 6bd52cd8adc..c142f1e4da1 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/v2/subroundEndRound.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "bytes" diff --git a/consensus/spos/bls/subroundEndRound_test.go b/consensus/spos/bls/v2/subroundEndRound_test.go similarity index 99% rename from consensus/spos/bls/subroundEndRound_test.go rename to consensus/spos/bls/v2/subroundEndRound_test.go index b435b1e9f9b..a75f7e08d85 100644 --- a/consensus/spos/bls/subroundEndRound_test.go +++ b/consensus/spos/bls/v2/subroundEndRound_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "bytes" diff --git a/consensus/spos/bls/subroundSignature.go b/consensus/spos/bls/v2/subroundSignature.go similarity index 99% rename from consensus/spos/bls/subroundSignature.go rename to consensus/spos/bls/v2/subroundSignature.go index f08ab7c8e27..dfcf3cfcc8c 100644 --- a/consensus/spos/bls/subroundSignature.go +++ b/consensus/spos/bls/v2/subroundSignature.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "context" diff --git a/consensus/spos/bls/subroundSignature_test.go b/consensus/spos/bls/v2/subroundSignature_test.go similarity index 99% rename from consensus/spos/bls/subroundSignature_test.go rename to consensus/spos/bls/v2/subroundSignature_test.go index bb76513bfc7..5b152eca937 100644 --- a/consensus/spos/bls/subroundSignature_test.go +++ b/consensus/spos/bls/v2/subroundSignature_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "context" diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/v2/subroundStartRound.go similarity index 99% rename from consensus/spos/bls/subroundStartRound.go rename to consensus/spos/bls/v2/subroundStartRound.go index 6f8c6d03908..e0cc0b5d055 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/v2/subroundStartRound.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "context" diff --git a/consensus/spos/bls/subroundStartRound_test.go b/consensus/spos/bls/v2/subroundStartRound_test.go similarity index 99% rename from consensus/spos/bls/subroundStartRound_test.go rename to consensus/spos/bls/v2/subroundStartRound_test.go index c87a678857d..b0bd4bc9a26 100644 --- a/consensus/spos/bls/subroundStartRound_test.go +++ b/consensus/spos/bls/v2/subroundStartRound_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "fmt" From d20c0e290be0a7d8549d23f17532ac490e6a502b Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 20 Sep 2024 14:12:02 +0300 Subject: [PATCH 02/30] fixes for consensus v1 - after initial split --- consensus/chronology/chronology_test.go | 19 +-- consensus/round/round_test.go | 20 +-- .../spos/bls/v1/blsSubroundsFactory_test.go | 4 +- consensus/spos/bls/v1/subroundBlock_test.go | 134 ++++++++-------- consensus/spos/bls/v1/subroundEndRound.go | 2 +- .../spos/bls/v1/subroundEndRound_test.go | 143 +++++++++--------- consensus/spos/bls/v1/subroundSignature.go | 7 +- .../spos/bls/v1/subroundSignature_test.go | 36 ++--- consensus/spos/bls/v1/subroundStartRound.go | 9 +- .../spos/bls/v2/blsSubroundsFactory_test.go | 4 +- consensus/spos/bls/v2/subroundBlock_test.go | 20 +-- .../spos/bls/v2/subroundEndRound_test.go | 4 +- consensus/spos/consensusCoreValidator_test.go | 4 +- consensus/spos/consensusState.go | 6 +- consensus/spos/export_test.go | 11 +- consensus/spos/scheduledProcessor_test.go | 48 +++--- consensus/spos/subround.go | 9 +- consensus/spos/subround_test.go | 13 +- consensus/spos/worker_test.go | 8 +- testscommon/consensus/mockTestInitializer.go | 4 +- .../consensus}/rounderMock.go | 2 +- .../consensus}/syncTimerMock.go | 2 +- 22 files changed, 256 insertions(+), 253 deletions(-) rename {consensus/mock => testscommon/consensus}/rounderMock.go (98%) rename {consensus/mock => testscommon/consensus}/syncTimerMock.go (98%) diff --git a/consensus/chronology/chronology_test.go b/consensus/chronology/chronology_test.go index 3f57da37f9b..1de6289d1ca 100644 --- a/consensus/chronology/chronology_test.go +++ b/consensus/chronology/chronology_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/chronology" "github.com/multiversx/mx-chain-go/consensus/mock" + consensus2 "github.com/multiversx/mx-chain-go/testscommon/consensus" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -117,7 +118,7 @@ func TestChronology_StartRoundShouldReturnWhenRoundIndexIsNegative(t *testing.T) t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus2.RoundHandlerMock{} roundHandlerMock.IndexCalled = func() int64 { return -1 } @@ -151,7 +152,7 @@ func TestChronology_StartRoundShouldReturnWhenDoWorkReturnsFalse(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus2.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock chr, _ := chronology.NewChronology(arg) @@ -168,7 +169,7 @@ func TestChronology_StartRoundShouldWork(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus2.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock chr, _ := chronology.NewChronology(arg) @@ -221,7 +222,7 @@ func TestChronology_InitRoundShouldNotSetSubroundWhenRoundIndexIsNegative(t *tes t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus2.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -242,7 +243,7 @@ func TestChronology_InitRoundShouldSetSubroundWhenRoundIndexIsPositive(t *testin t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus2.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() @@ -259,7 +260,7 @@ func TestChronology_StartRoundShouldNotUpdateRoundWhenCurrentRoundIsNotFinished( t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus2.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -273,7 +274,7 @@ func TestChronology_StartRoundShouldNotUpdateRoundWhenCurrentRoundIsNotFinished( func TestChronology_StartRoundShouldUpdateRoundWhenCurrentRoundIsFinished(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus2.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -317,8 +318,8 @@ func TestChronology_CheckIfStatusHandlerWorks(t *testing.T) { func getDefaultChronologyArg() chronology.ArgChronology { return chronology.ArgChronology{ GenesisTime: time.Now(), - RoundHandler: &mock.RoundHandlerMock{}, - SyncTimer: &mock.SyncTimerMock{}, + RoundHandler: &consensus2.RoundHandlerMock{}, + SyncTimer: &consensus2.SyncTimerMock{}, AppStatusHandler: statusHandlerMock.NewAppStatusHandlerMock(), Watchdog: &mock.WatchdogMock{}, } diff --git a/consensus/round/round_test.go b/consensus/round/round_test.go index ede509d7176..b306ebe8f22 100644 --- a/consensus/round/round_test.go +++ b/consensus/round/round_test.go @@ -5,8 +5,10 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/round" + "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/stretchr/testify/assert" ) @@ -28,7 +30,7 @@ func TestRound_NewRoundShouldWork(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensus.SyncTimerMock{} rnd, err := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) @@ -41,7 +43,7 @@ func TestRound_UpdateRoundShouldNotChangeAnything(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensus.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) oldIndex := rnd.Index() @@ -61,7 +63,7 @@ func TestRound_UpdateRoundShouldAdvanceOneRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensus.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) oldIndex := rnd.Index() @@ -76,7 +78,7 @@ func TestRound_IndexShouldReturnFirstIndex(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensus.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) rnd.UpdateRound(genesisTime, genesisTime.Add(roundTimeDuration/2)) @@ -90,7 +92,7 @@ func TestRound_TimeStampShouldReturnTimeStampOfTheNextRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensus.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) rnd.UpdateRound(genesisTime, genesisTime.Add(roundTimeDuration+roundTimeDuration/2)) @@ -104,7 +106,7 @@ func TestRound_TimeDurationShouldReturnTheDurationOfOneRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensus.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) timeDuration := rnd.TimeDuration() @@ -117,7 +119,7 @@ func TestRound_RemainingTimeInCurrentRoundShouldReturnPositiveValue(t *testing.T genesisTime := time.Unix(0, 0) - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensus.SyncTimerMock{} timeElapsed := int64(roundTimeDuration - 1) @@ -138,7 +140,7 @@ func TestRound_RemainingTimeInCurrentRoundShouldReturnNegativeValue(t *testing.T genesisTime := time.Unix(0, 0) - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensus.SyncTimerMock{} timeElapsed := int64(roundTimeDuration + 1) diff --git a/consensus/spos/bls/v1/blsSubroundsFactory_test.go b/consensus/spos/bls/v1/blsSubroundsFactory_test.go index 9a8acd85d67..66bc2887210 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory_test.go @@ -42,8 +42,8 @@ func executeStoredMessages() { func resetConsensusMessages() { } -func initRoundHandlerMock() *mock.RoundHandlerMock { - return &mock.RoundHandlerMock{ +func initRoundHandlerMock() *consensusMock.RoundHandlerMock { + return &consensusMock.RoundHandlerMock{ RoundIndex: 0, TimeStampCalled: func() time.Time { return time.Unix(0, 0) diff --git a/consensus/spos/bls/v1/subroundBlock_test.go b/consensus/spos/bls/v1/subroundBlock_test.go index 8a3289b4d5d..074a6463e5e 100644 --- a/consensus/spos/bls/v1/subroundBlock_test.go +++ b/consensus/spos/bls/v1/subroundBlock_test.go @@ -14,15 +14,17 @@ import ( "github.com/stretchr/testify/require" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/testscommon" + consensusMock "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) func defaultSubroundForSRBlock(consensusState *spos.ConsensusState, ch chan bool, - container *mock.ConsensusCoreMock, appStatusHandler core.AppStatusHandler) (*spos.Subround, error) { + container *consensusMock.ConsensusCoreMock, appStatusHandler core.AppStatusHandler) (*spos.Subround, error) { return spos.NewSubround( v1.SrStartRound, v1.SrBlock, @@ -77,7 +79,7 @@ func defaultSubroundBlockWithoutErrorFromSubround(sr *spos.Subround) v1.Subround func initSubroundBlock( blockChain data.ChainHandler, - container *mock.ConsensusCoreMock, + container *consensusMock.ConsensusCoreMock, appStatusHandler core.AppStatusHandler, ) v1.SubroundBlock { if blockChain == nil { @@ -108,18 +110,18 @@ func initSubroundBlock( return srBlock } -func createConsensusContainers() []*mock.ConsensusCoreMock { - consensusContainers := make([]*mock.ConsensusCoreMock, 0) - container := mock.InitConsensusCore() +func createConsensusContainers() []*consensusMock.ConsensusCoreMock { + consensusContainers := make([]*consensusMock.ConsensusCoreMock, 0) + container := consensusMock.InitConsensusCore() consensusContainers = append(consensusContainers, container) - container = mock.InitConsensusCoreHeaderV2() + container = consensusMock.InitConsensusCoreHeaderV2() consensusContainers = append(consensusContainers, container) return consensusContainers } func initSubroundBlockWithBlockProcessor( bp *testscommon.BlockProcessorStub, - container *mock.ConsensusCoreMock, + container *consensusMock.ConsensusCoreMock, ) v1.SubroundBlock { blockChain := &testscommon.ChainHandlerStub{ GetGenesisHeaderCalled: func() data.HeaderHandler { @@ -158,7 +160,7 @@ func TestSubroundBlock_NewSubroundBlockNilSubroundShouldFail(t *testing.T) { func TestSubroundBlock_NewSubroundBlockNilBlockchainShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() @@ -174,7 +176,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockchainShouldFail(t *testing.T) { func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() @@ -190,7 +192,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -204,7 +206,7 @@ func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) func TestSubroundBlock_NewSubroundBlockNilHasherShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() @@ -219,7 +221,7 @@ func TestSubroundBlock_NewSubroundBlockNilHasherShouldFail(t *testing.T) { func TestSubroundBlock_NewSubroundBlockNilMarshalizerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() @@ -234,7 +236,7 @@ func TestSubroundBlock_NewSubroundBlockNilMarshalizerShouldFail(t *testing.T) { func TestSubroundBlock_NewSubroundBlockNilMultiSignerContainerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() @@ -249,7 +251,7 @@ func TestSubroundBlock_NewSubroundBlockNilMultiSignerContainerShouldFail(t *test func TestSubroundBlock_NewSubroundBlockNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() @@ -264,7 +266,7 @@ func TestSubroundBlock_NewSubroundBlockNilRoundHandlerShouldFail(t *testing.T) { func TestSubroundBlock_NewSubroundBlockNilShardCoordinatorShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() @@ -279,7 +281,7 @@ func TestSubroundBlock_NewSubroundBlockNilShardCoordinatorShouldFail(t *testing. func TestSubroundBlock_NewSubroundBlockNilSyncTimerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() @@ -294,7 +296,7 @@ func TestSubroundBlock_NewSubroundBlockNilSyncTimerShouldFail(t *testing.T) { func TestSubroundBlock_NewSubroundBlockShouldWork(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -306,7 +308,7 @@ func TestSubroundBlock_NewSubroundBlockShouldWork(t *testing.T) { func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) r := sr.DoBlockJob() assert.False(t, r) @@ -331,15 +333,15 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { r = sr.DoBlockJob() assert.False(t, r) - bpm = mock.InitBlockProcessorMock(container.Marshalizer()) + bpm = consensusMock.InitBlockProcessorMock(container.Marshalizer()) container.SetBlockProcessor(bpm) - bm := &mock.BroadcastMessengerMock{ + bm := &consensusMock.BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { return nil }, } container.SetBroadcastMessenger(bm) - container.SetRoundHandler(&mock.RoundHandlerMock{ + container.SetRoundHandler(&consensusMock.RoundHandlerMock{ RoundIndex: 1, }) r = sr.DoBlockJob() @@ -350,7 +352,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} @@ -366,7 +368,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} @@ -382,7 +384,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t * func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} @@ -400,8 +402,8 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() - blProc := mock.InitBlockProcessorMock(container.Marshalizer()) + container := consensusMock.InitConsensusCore() + blProc := consensusMock.InitBlockProcessorMock(container.Marshalizer()) blProc.DecodeBlockHeaderCalled = func(dta []byte) data.HeaderHandler { // error decoding so return nil return nil @@ -424,7 +426,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} @@ -442,7 +444,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing. func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} @@ -459,7 +461,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testin func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) t.Run("block is valid", func(t *testing.T) { @@ -483,7 +485,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { } func createConsensusMessage(header *block.Header, body *block.Body, leader []byte, topic consensus.MessageType) *consensus.Message { - marshaller := &mock.MarshalizerMock{} + marshaller := &marshallerMock.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} hdrStr, _ := marshaller.Marshal(header) @@ -510,11 +512,11 @@ func createConsensusMessage(header *block.Header, body *block.Body, leader []byt func TestSubroundBlock_ReceivedBlock(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) - blockProcessorMock := mock.InitBlockProcessorMock(container.Marshalizer()) + blockProcessorMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) blkBody := &block.Body{} - blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) cnsMsg := consensus.NewConsensusMessage( nil, nil, @@ -593,7 +595,7 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { sr.Header = nil hdr = createDefaultHeader() hdr.Nonce = 1 - hdrStr, _ = mock.MarshalizerMock{}.Marshal(hdr) + hdrStr, _ = marshallerMock.MarshalizerMock{}.Marshal(hdr) hdrHash = (&hashingMocks.HasherMock{}).Compute(string(hdrStr)) cnsMsg.BlockHeaderHash = hdrHash cnsMsg.Header = hdrStr @@ -603,7 +605,7 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAreNotSet(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) cnsMsg := consensus.NewConsensusMessage( nil, @@ -626,9 +628,9 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAre func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFails(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) - blProcMock := mock.InitBlockProcessorMock(container.Marshalizer()) + blProcMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) err := errors.New("error process block") blProcMock.ProcessBlockCalled = func(data.HeaderHandler, data.BodyHandler, func() time.Duration) error { return err @@ -636,7 +638,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail container.SetBlockProcessor(blProcMock) hdr := &block.Header{} blkBody := &block.Body{} - blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) cnsMsg := consensus.NewConsensusMessage( nil, nil, @@ -660,11 +662,11 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockReturnsInNextRound(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{} blkBody := &block.Body{} - blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) cnsMsg := consensus.NewConsensusMessage( nil, nil, @@ -683,12 +685,12 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockRetu ) sr.Header = hdr sr.Body = blkBody - blockProcessorMock := mock.InitBlockProcessorMock(container.Marshalizer()) + blockProcessorMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) blockProcessorMock.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errors.New("error") } container.SetBlockProcessor(blockProcessorMock) - container.SetRoundHandler(&mock.RoundHandlerMock{RoundIndex: 1}) + container.SetRoundHandler(&consensusMock.RoundHandlerMock{RoundIndex: 1}) assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) } @@ -701,7 +703,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { hdr, _ := container.BlockProcessor().CreateNewHeader(1, 1) hdr, blkBody, _ := container.BlockProcessor().CreateBlock(hdr, func() bool { return true }) - blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) cnsMsg := consensus.NewConsensusMessage( nil, nil, @@ -726,7 +728,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() roundHandlerMock := initRoundHandlerMock() container.SetRoundHandler(roundHandlerMock) @@ -739,19 +741,19 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { return remainingTime } - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMock.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 84 / 100) }}) ret := remainingTimeInThisRound() assert.True(t, ret > 0) - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMock.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 85 / 100) }}) ret = remainingTimeInThisRound() assert.True(t, ret == 0) - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMock.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 86 / 100) }}) ret = remainingTimeInThisRound() @@ -760,7 +762,7 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) sr.RoundCanceled = true assert.False(t, sr.DoBlockConsensusCheck()) @@ -768,7 +770,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) sr.SetStatus(v1.SrBlock, spos.SsFinished) assert.True(t, sr.DoBlockConsensusCheck()) @@ -776,7 +778,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinish func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedReturnTrue(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < sr.Threshold(v1.SrBlock); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrBlock, true) @@ -786,14 +788,14 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedR func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenBlockIsReceivedReturnFalse(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) assert.False(t, sr.DoBlockConsensusCheck()) } func TestSubroundBlock_IsBlockReceived(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < len(sr.ConsensusGroup()); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrBlock, false) @@ -815,7 +817,7 @@ func TestSubroundBlock_IsBlockReceived(t *testing.T) { func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) haveTimeInCurrentSubound := func() bool { roundStartTime := sr.RoundHandler().TimeStamp() @@ -825,14 +827,14 @@ func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { return time.Duration(remainingTime) > 0 } - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMock.RoundHandlerMock{} roundHandlerMock.TimeDurationCalled = func() time.Duration { return 4000 * time.Millisecond } roundHandlerMock.TimeStampCalled = func() time.Time { return time.Unix(0, 0) } - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMock.SyncTimerMock{} timeElapsed := sr.EndTime() - 1 syncTimerMock.CurrentTimeCalled = func() time.Time { return time.Unix(0, timeElapsed) @@ -845,7 +847,7 @@ func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) haveTimeInCurrentSubound := func() bool { roundStartTime := sr.RoundHandler().TimeStamp() @@ -855,14 +857,14 @@ func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { return time.Duration(remainingTime) > 0 } - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMock.RoundHandlerMock{} roundHandlerMock.TimeDurationCalled = func() time.Duration { return 4000 * time.Millisecond } roundHandlerMock.TimeStampCalled = func() time.Time { return time.Unix(0, 0) } - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMock.SyncTimerMock{} timeElapsed := sr.EndTime() + 1 syncTimerMock.CurrentTimeCalled = func() time.Time { return time.Unix(0, timeElapsed) @@ -967,8 +969,8 @@ func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { } }, } - container := mock.InitConsensusCore() - bp := mock.InitBlockProcessorMock(container.Marshalizer()) + container := consensusMock.InitConsensusCore() + bp := consensusMock.InitBlockProcessorMock(container.Marshalizer()) bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { shardHeader, _ := header.(*block.Header) shardHeader.MiniBlockHeaders = mbHeaders @@ -1002,8 +1004,8 @@ func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { func TestSubroundBlock_CreateHeaderNilMiniBlocks(t *testing.T) { expectedErr := errors.New("nil mini blocks") - container := mock.InitConsensusCore() - bp := mock.InitBlockProcessorMock(container.Marshalizer()) + container := consensusMock.InitConsensusCore() + bp := consensusMock.InitBlockProcessorMock(container.Marshalizer()) bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { return nil, nil, expectedErr } @@ -1059,7 +1061,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { srDuration := srEndTime - srStartTime delay := srDuration * 430 / 1000 - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() receivedValue := uint64(0) container.SetBlockProcessor(&testscommon.BlockProcessorStub{ ProcessBlockCalled: func(_ data.HeaderHandler, _ data.BodyHandler, _ func() time.Duration) error { @@ -1073,7 +1075,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { }}) hdr := &block.Header{} blkBody := &block.Body{} - blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) cnsMsg := consensus.NewConsensusMessage( nil, @@ -1113,7 +1115,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDurationWithZeroDurationShould } }() - container := mock.InitConsensusCore() + container := consensusMock.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go index bc275f19272..c6ed827e0c5 100644 --- a/consensus/spos/bls/v1/subroundEndRound.go +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -924,7 +924,7 @@ func (sr *subroundEndRound) getMinConsensusGroupIndexOfManagedKeys() int { minIdx := sr.ConsensusGroupSize() for idx, validator := range sr.ConsensusGroup() { - if !sr.IsKeyManagedByCurrentNode([]byte(validator)) { + if !sr.IsKeyManagedBySelf([]byte(validator)) { continue } diff --git a/consensus/spos/bls/v1/subroundEndRound_test.go b/consensus/spos/bls/v1/subroundEndRound_test.go index c202cc15a7e..cd6e14a6f0f 100644 --- a/consensus/spos/bls/v1/subroundEndRound_test.go +++ b/consensus/spos/bls/v1/subroundEndRound_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/p2p/factory" @@ -28,7 +29,7 @@ import ( ) func initSubroundEndRoundWithContainer( - container *mock.ConsensusCoreMock, + container *consensusMocks.ConsensusCoreMock, appStatusHandler core.AppStatusHandler, ) v1.SubroundEndRound { ch := make(chan bool, 1) @@ -62,14 +63,14 @@ func initSubroundEndRoundWithContainer( } func initSubroundEndRound(appStatusHandler core.AppStatusHandler) v1.SubroundEndRound { - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() return initSubroundEndRoundWithContainer(container, appStatusHandler) } func TestNewSubroundEndRound(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -153,7 +154,7 @@ func TestNewSubroundEndRound(t *testing.T) { func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -189,7 +190,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -225,7 +226,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -262,7 +263,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -298,7 +299,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -334,7 +335,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -370,7 +371,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -405,7 +406,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) signingHandler := &consensusMocks.SigningHandlerStub{ @@ -427,11 +428,11 @@ func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) func TestSubroundEndRound_DoEndRoundJobErrCommitBlockShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - blProcMock := mock.InitBlockProcessorMock(container.Marshalizer()) + blProcMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) blProcMock.CommitBlockCalled = func( header data.HeaderHandler, body data.BodyHandler, @@ -449,12 +450,12 @@ func TestSubroundEndRound_DoEndRoundJobErrCommitBlockShouldFail(t *testing.T) { func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") remainingTime := time.Millisecond - roundHandlerMock := &mock.RoundHandlerMock{ + roundHandlerMock := &consensusMocks.RoundHandlerMock{ RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { return remainingTime }, @@ -475,8 +476,8 @@ func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { func TestSubroundEndRound_DoEndRoundJobErrBroadcastBlockOK(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() - bm := &mock.BroadcastMessengerMock{ + container := consensusMocks.InitConsensusCore() + bm := &consensusMocks.BroadcastMessengerMock{ BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { return errors.New("error") }, @@ -495,16 +496,16 @@ func TestSubroundEndRound_DoEndRoundJobErrMarshalizedDataToBroadcastOK(t *testin t.Parallel() err := errors.New("") - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() - bpm := mock.InitBlockProcessorMock(container.Marshalizer()) + bpm := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { err = errors.New("error marshalized data to broadcast") return make(map[uint32][]byte), make(map[string][][]byte), err } container.SetBlockProcessor(bpm) - bm := &mock.BroadcastMessengerMock{ + bm := &consensusMocks.BroadcastMessengerMock{ BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { return nil }, @@ -530,15 +531,15 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastMiniBlocksOK(t *testing.T) { t.Parallel() err := errors.New("") - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() - bpm := mock.InitBlockProcessorMock(container.Marshalizer()) + bpm := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { return make(map[uint32][]byte), make(map[string][][]byte), nil } container.SetBlockProcessor(bpm) - bm := &mock.BroadcastMessengerMock{ + bm := &consensusMocks.BroadcastMessengerMock{ BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { return nil }, @@ -566,15 +567,15 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastTransactionsOK(t *testing.T) t.Parallel() err := errors.New("") - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() - bpm := mock.InitBlockProcessorMock(container.Marshalizer()) + bpm := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { return make(map[uint32][]byte), make(map[string][][]byte), nil } container.SetBlockProcessor(bpm) - bm := &mock.BroadcastMessengerMock{ + bm := &consensusMocks.BroadcastMessengerMock{ BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { return nil }, @@ -601,8 +602,8 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastTransactionsOK(t *testing.T) func TestSubroundEndRound_DoEndRoundJobAllOK(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() - bm := &mock.BroadcastMessengerMock{ + container := consensusMocks.InitConsensusCore() + bm := &consensusMocks.BroadcastMessengerMock{ BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { return errors.New("error") }, @@ -621,7 +622,7 @@ func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { t.Parallel() expectedSignature := []byte("signature") - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() signingHandler := &consensusMocks.SigningHandlerStub{ CreateSignatureForPublicKeyCalled: func(publicKeyBytes []byte, msg []byte) ([]byte, error) { var receivedHdr block.Header @@ -630,7 +631,7 @@ func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { }, } container.SetSigningHandler(signingHandler) - bm := &mock.BroadcastMessengerMock{ + bm := &consensusMocks.BroadcastMessengerMock{ BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { return errors.New("error") }, @@ -864,8 +865,8 @@ func TestSubroundEndRound_CreateAndBroadcastHeaderFinalInfoBroadcastShouldBeCall chanRcv := make(chan bool, 1) leaderSigInHdr := []byte("leader sig") - container := mock.InitConsensusCore() - messenger := &mock.BroadcastMessengerMock{ + container := consensusMocks.InitConsensusCore() + messenger := &consensusMocks.BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { chanRcv <- true assert.Equal(t, message.LeaderSignature, leaderSigInHdr) @@ -909,9 +910,9 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinalInfoIsNotValid(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() - headerSigVerifier := &mock.HeaderSigVerifierStub{ + headerSigVerifier := &consensusMocks.HeaderSigVerifierMock{ VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { return errors.New("error") }, @@ -944,8 +945,8 @@ func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { t.Parallel() // update roundHandler's mock, so it will calculate for real the duration - container := mock.InitConsensusCore() - roundHandler := mock.RoundHandlerMock{RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { + container := consensusMocks.InitConsensusCore() + roundHandler := consensusMocks.RoundHandlerMock{RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { currentTime := time.Now() elapsedTime := currentTime.Sub(startTime) remainingTime := maxTime - elapsedTime @@ -964,9 +965,9 @@ func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerifyLeaderSignatureFails(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() - headerSigVerifier := &mock.HeaderSigVerifierStub{ + headerSigVerifier := &consensusMocks.HeaderSigVerifierMock{ VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { return errors.New("error") }, @@ -986,9 +987,9 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerifySignatureFails(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() - headerSigVerifier := &mock.HeaderSigVerifierStub{ + headerSigVerifier := &consensusMocks.HeaderSigVerifierMock{ VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { return nil }, @@ -1008,9 +1009,9 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnTrue(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() - headerSigVerifier := &mock.HeaderSigVerifierStub{ + headerSigVerifier := &consensusMocks.HeaderSigVerifierMock{ VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { return nil }, @@ -1033,7 +1034,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { t.Run("fail to get signature share", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") @@ -1055,7 +1056,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { t.Run("fail to verify signature share, job done will be set to false", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") @@ -1083,7 +1084,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) signingHandler := &consensusMocks.SigningHandlerStub{ SignatureShareCalled: func(index uint16) ([]byte, error) { @@ -1114,7 +1115,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { t.Run("invalid number of valid sig shares", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.Header = &block.Header{} sr.SetThreshold(v1.SrEndRound, 2) @@ -1126,7 +1127,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { t.Run("fail to created aggregated sig", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") @@ -1147,7 +1148,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { t.Run("fail to set aggregated sig", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") @@ -1167,7 +1168,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.Header = &block.Header{} _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) @@ -1185,7 +1186,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { t.Run("not enough valid signature shares", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) verifySigShareNumCalls := 0 @@ -1232,7 +1233,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) verifySigShareNumCalls := 0 @@ -1284,7 +1285,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { t.Run("consensus data is not set", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.ConsensusState.Data = nil @@ -1301,7 +1302,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { t.Run("received message node is not leader in current round", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) @@ -1317,7 +1318,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { t.Run("received message from self leader should return false", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") @@ -1334,7 +1335,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { t.Run("received message from self multikey leader should return false", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return string(pkBytes) == "A" @@ -1381,7 +1382,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { t.Run("received hash does not match the hash from current consensus state", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) @@ -1397,7 +1398,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { t.Run("process received message verification failed, different round index", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) @@ -1414,7 +1415,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { t.Run("empty invalid signers", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{ @@ -1437,7 +1438,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { }, } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetMessageSigningHandler(messageSigningHandler) sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) @@ -1454,7 +1455,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) @@ -1475,7 +1476,7 @@ func TestVerifyInvalidSigners(t *testing.T) { t.Run("failed to deserialize invalidSigners field, should error", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() expectedErr := errors.New("expected err") messageSigningHandler := &mock.MessageSigningHandlerStub{ @@ -1495,7 +1496,7 @@ func TestVerifyInvalidSigners(t *testing.T) { t.Run("failed to verify low level p2p message, should error", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() invalidSigners := []p2p.MessageP2P{&factory.Message{ FromField: []byte("from"), @@ -1524,7 +1525,7 @@ func TestVerifyInvalidSigners(t *testing.T) { t.Run("failed to verify signature share", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() pubKey := []byte("A") // it's in consensus @@ -1567,7 +1568,7 @@ func TestVerifyInvalidSigners(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() pubKey := []byte("A") // it's in consensus @@ -1600,7 +1601,7 @@ func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { expectedInvalidSigners := []byte("invalid signers") - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() nodeRedundancy := &mock.NodeRedundancyHandlerStub{ IsRedundancyNodeCalled: func() bool { return true @@ -1610,7 +1611,7 @@ func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { }, } container.SetNodeRedundancyHandler(nodeRedundancy) - messenger := &mock.BroadcastMessengerMock{ + messenger := &consensusMocks.BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { assert.Fail(t, "should have not been called") return nil @@ -1630,8 +1631,8 @@ func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { expectedInvalidSigners := []byte("invalid signers") wasCalled := false - container := mock.InitConsensusCore() - messenger := &mock.BroadcastMessengerMock{ + container := consensusMocks.InitConsensusCore() + messenger := &consensusMocks.BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) wasCalled = true @@ -1657,7 +1658,7 @@ func TestGetFullMessagesForInvalidSigners(t *testing.T) { t.Run("empty p2p messages slice if not in state", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() messageSigningHandler := &mock.MessageSigningHandlerStub{ SerializeCalled: func(messages []p2p.MessageP2P) ([]byte, error) { @@ -1680,7 +1681,7 @@ func TestGetFullMessagesForInvalidSigners(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() expectedInvalidSigners := []byte("expectedInvalidSigners") @@ -1709,7 +1710,7 @@ func TestGetFullMessagesForInvalidSigners(t *testing.T) { func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) diff --git a/consensus/spos/bls/v1/subroundSignature.go b/consensus/spos/bls/v1/subroundSignature.go index 2880480713d..df1e6e8030b 100644 --- a/consensus/spos/bls/v1/subroundSignature.go +++ b/consensus/spos/bls/v1/subroundSignature.go @@ -353,12 +353,12 @@ func (sr *subroundSignature) doSignatureJobForManagedKeys() bool { isMultiKeyLeader := sr.IsMultiKeyLeaderInCurrentRound() numMultiKeysSignaturesSent := 0 - for idx, pk := range sr.ConsensusGroup() { + for _, pk := range sr.ConsensusGroup() { pkBytes := []byte(pk) if sr.IsJobDone(pk, sr.Current()) { continue } - if !sr.IsKeyManagedByCurrentNode(pkBytes) { + if !sr.IsKeyManagedBySelf(pkBytes) { continue } @@ -388,8 +388,9 @@ func (sr *subroundSignature) doSignatureJobForManagedKeys() bool { numMultiKeysSignaturesSent++ } sr.sentSignatureTracker.SignatureSent(pkBytes) + leader, err := sr.GetLeader() - isLeader := idx == spos.IndexOfLeaderInConsensusGroup + isLeader := pk == leader ok := sr.completeSignatureSubRound(pk, isLeader) if !ok { return false diff --git a/consensus/spos/bls/v1/subroundSignature_test.go b/consensus/spos/bls/v1/subroundSignature_test.go index 1dac174eb96..31532f562eb 100644 --- a/consensus/spos/bls/v1/subroundSignature_test.go +++ b/consensus/spos/bls/v1/subroundSignature_test.go @@ -10,14 +10,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) -func initSubroundSignatureWithContainer(container *mock.ConsensusCoreMock) v1.SubroundSignature { +func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreMock) v1.SubroundSignature { consensusState := initConsensusState() ch := make(chan bool, 1) @@ -48,14 +48,14 @@ func initSubroundSignatureWithContainer(container *mock.ConsensusCoreMock) v1.Su } func initSubroundSignature() v1.SubroundSignature { - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() return initSubroundSignatureWithContainer(container) } func TestNewSubroundSignature(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -132,7 +132,7 @@ func TestNewSubroundSignature(t *testing.T) { func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -167,7 +167,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -201,7 +201,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -235,7 +235,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -270,7 +270,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -304,7 +304,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -338,7 +338,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { func TestSubroundSignature_DoSignatureJob(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundSignatureWithContainer(container) sr.Header = &block.Header{} @@ -380,7 +380,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { @@ -538,7 +538,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { }, } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetSigningHandler(signingHandler) sr := *initSubroundSignatureWithContainer(container) sr.Header = &block.Header{} @@ -663,7 +663,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenSignatu func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenNotAllSignaturesCollectedAndTimeIsNotOut(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = false @@ -679,7 +679,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenNotAllS func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenAllSignaturesCollected(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = false @@ -695,7 +695,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenAllSigna func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenEnoughButNotAllSignaturesCollectedAndTimeIsOut(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr := *initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = true @@ -711,7 +711,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenEnoughBu func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbackThresholdCouldNotBeApplied(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetFallbackHeaderValidator(&testscommon.FallBackHeaderValidatorStub{ ShouldApplyFallbackValidationCalled: func(headerHandler data.HeaderHandler) bool { return false @@ -732,7 +732,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbac func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallbackThresholdCouldBeApplied(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetFallbackHeaderValidator(&testscommon.FallBackHeaderValidatorStub{ ShouldApplyFallbackValidationCalled: func(headerHandler data.HeaderHandler) bool { return true diff --git a/consensus/spos/bls/v1/subroundStartRound.go b/consensus/spos/bls/v1/subroundStartRound.go index b514b586241..2de413aa9cb 100644 --- a/consensus/spos/bls/v1/subroundStartRound.go +++ b/consensus/spos/bls/v1/subroundStartRound.go @@ -170,7 +170,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { } msg := "" - if sr.IsKeyManagedByCurrentNode([]byte(leader)) { + if sr.IsKeyManagedBySelf([]byte(leader)) { msg = " (my turn in multi-key)" } if leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() { @@ -193,7 +193,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { sr.indexRoundIfNeeded(pubKeys) isSingleKeyLeader := leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() - isLeader := isSingleKeyLeader || sr.IsKeyManagedByCurrentNode([]byte(leader)) + isLeader := isSingleKeyLeader || sr.IsKeyManagedBySelf([]byte(leader)) isSelfInConsensus := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || numMultiKeysInConsensusGroup > 0 if !isSelfInConsensus { log.Debug("not in consensus group") @@ -238,7 +238,7 @@ func (sr *subroundStartRound) computeNumManagedKeysInConsensusGroup(pubKeys []st numMultiKeysInConsensusGroup := 0 for _, pk := range pubKeys { pkBytes := []byte(pk) - if sr.IsKeyManagedByCurrentNode(pkBytes) { + if sr.IsKeyManagedBySelf(pkBytes) { numMultiKeysInConsensusGroup++ log.Trace("in consensus group with multi key", "pk", core.GetTrimmedPk(hex.EncodeToString(pkBytes))) @@ -323,7 +323,7 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, err := sr.GetNextConsensusGroup( + leader, nextConsensusGroup, err := sr.GetNextConsensusGroup( randomSeed, uint64(sr.RoundIndex), shardId, @@ -342,6 +342,7 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error } sr.SetConsensusGroup(nextConsensusGroup) + sr.SetLeader(leader) return nil } diff --git a/consensus/spos/bls/v2/blsSubroundsFactory_test.go b/consensus/spos/bls/v2/blsSubroundsFactory_test.go index 1e227a96fd6..babe2a19ca1 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory_test.go @@ -31,8 +31,8 @@ const roundTimeDuration = 100 * time.Millisecond func executeStoredMessages() { } -func initRoundHandlerMock() *mock.RoundHandlerMock { - return &mock.RoundHandlerMock{ +func initRoundHandlerMock() *testscommonConsensus.RoundHandlerMock { + return &testscommonConsensus.RoundHandlerMock{ RoundIndex: 0, TimeStampCalled: func() time.Time { return time.Unix(0, 0) diff --git a/consensus/spos/bls/v2/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go index 4c1a9e8b129..9f96ed9af4e 100644 --- a/consensus/spos/bls/v2/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -532,7 +532,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { }, } container.SetBroadcastMessenger(bm) - container.SetRoundHandler(&mock.RoundHandlerMock{ + container.SetRoundHandler(&consensusMocks.RoundHandlerMock{ RoundIndex: 1, }) container.SetEquivalentProofsPool(&dataRetriever.ProofsPoolMock{ @@ -575,7 +575,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { }, } container.SetBroadcastMessenger(bm) - container.SetRoundHandler(&mock.RoundHandlerMock{ + container.SetRoundHandler(&consensusMocks.RoundHandlerMock{ RoundIndex: 1, }) r := sr.DoBlockJob() @@ -992,7 +992,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockRetu return expectedErr } container.SetBlockProcessor(blockProcessorMock) - container.SetRoundHandler(&mock.RoundHandlerMock{RoundIndex: 1}) + container.SetRoundHandler(&consensusMocks.RoundHandlerMock{RoundIndex: 1}) assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) } @@ -1044,19 +1044,19 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { return remainingTime } - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMocks.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 84 / 100) }}) ret := remainingTimeInThisRound() assert.True(t, ret > 0) - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMocks.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 85 / 100) }}) ret = remainingTimeInThisRound() assert.True(t, ret == 0) - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMocks.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 86 / 100) }}) ret = remainingTimeInThisRound() @@ -1130,14 +1130,14 @@ func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { return time.Duration(remainingTime) > 0 } - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.TimeDurationCalled = func() time.Duration { return 4000 * time.Millisecond } roundHandlerMock.TimeStampCalled = func() time.Time { return time.Unix(0, 0) } - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} timeElapsed := sr.EndTime() - 1 syncTimerMock.CurrentTimeCalled = func() time.Time { return time.Unix(0, timeElapsed) @@ -1160,14 +1160,14 @@ func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { return time.Duration(remainingTime) > 0 } - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.TimeDurationCalled = func() time.Duration { return 4000 * time.Millisecond } roundHandlerMock.TimeStampCalled = func() time.Time { return time.Unix(0, 0) } - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} timeElapsed := sr.EndTime() + 1 syncTimerMock.CurrentTimeCalled = func() time.Time { return time.Unix(0, timeElapsed) diff --git a/consensus/spos/bls/v2/subroundEndRound_test.go b/consensus/spos/bls/v2/subroundEndRound_test.go index a75f7e08d85..6c8f448cd80 100644 --- a/consensus/spos/bls/v2/subroundEndRound_test.go +++ b/consensus/spos/bls/v2/subroundEndRound_test.go @@ -553,7 +553,7 @@ func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { sr.SetSelfPubKey("A") remainingTime := time.Millisecond - roundHandlerMock := &mock.RoundHandlerMock{ + roundHandlerMock := &consensusMocks.RoundHandlerMock{ RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { return remainingTime }, @@ -1252,7 +1252,7 @@ func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { // update roundHandler's mock, so it will calculate for real the duration container := consensusMocks.InitConsensusCore() - roundHandler := mock.RoundHandlerMock{RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { + roundHandler := consensusMocks.RoundHandlerMock{RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { currentTime := time.Now() elapsedTime := currentTime.Sub(startTime) remainingTime := maxTime - elapsedTime diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index d35e83c4acb..5594b831311 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -25,9 +25,9 @@ func initConsensusDataContainer() *ConsensusCore { chronologyHandlerMock := consensusMocks.InitChronologyHandlerMock() multiSignerMock := cryptoMocks.NewMultiSigner() hasherMock := &hashingMocks.HasherMock{} - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} antifloodHandler := &mock.P2PAntifloodHandlerStub{} peerHonestyHandler := &testscommon.PeerHonestyHandlerStub{} diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index fa806d9c840..ff336ad3fae 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -343,9 +343,9 @@ func (cns *ConsensusState) IsLeaderJobDone(currentSubroundId int) bool { return cns.IsJobDone(leader, currentSubroundId) } -// isMultiKeyJobDone method returns true if all the nodes controlled by this instance finished the current job for +// IsMultiKeyJobDone method returns true if all the nodes controlled by this instance finished the current job for // the current subround and false otherwise -func (cns *ConsensusState) isMultiKeyJobDone(currentSubroundId int) bool { +func (cns *ConsensusState) IsMultiKeyJobDone(currentSubroundId int) bool { for _, validator := range cns.consensusGroup { if !cns.keysHandler.IsKeyManagedByCurrentNode([]byte(validator)) { continue @@ -368,7 +368,7 @@ func (cns *ConsensusState) IsSelfJobDone(currentSubroundID int) bool { multiKeyJobDone := true if cns.IsMultiKeyInConsensusGroup() { - multiKeyJobDone = cns.isMultiKeyJobDone(currentSubroundID) + multiKeyJobDone = cns.IsMultiKeyJobDone(currentSubroundID) } return selfJobDone && multiKeyJobDone diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go index 73634ae2af5..1ad0bbc67d5 100644 --- a/consensus/spos/export_test.go +++ b/consensus/spos/export_test.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/process" ) @@ -271,13 +272,3 @@ func (cmv *consensusMessageValidator) GetNumOfMessageTypeForPublicKey(pk []byte, func (cmv *consensusMessageValidator) ResetConsensusMessages() { cmv.resetConsensusMessages() } - -// IsSelfLeaderInCurrentRound - -func (sr *Subround) IsSelfLeaderInCurrentRound() bool { - return sr.isSelfLeaderInCurrentRound() -} - -// IsMultiKeyJobDone - -func (cns *ConsensusState) IsMultiKeyJobDone(currentSubroundId int) bool { - return cns.isMultiKeyJobDone(currentSubroundId) -} diff --git a/consensus/spos/scheduledProcessor_test.go b/consensus/spos/scheduledProcessor_test.go index 7316209921b..ed1f95287a2 100644 --- a/consensus/spos/scheduledProcessor_test.go +++ b/consensus/spos/scheduledProcessor_test.go @@ -8,9 +8,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/stretchr/testify/require" ) @@ -30,7 +32,7 @@ func TestNewScheduledProcessorWrapper_NilSyncTimerShouldErr(t *testing.T) { args := ScheduledProcessorWrapperArgs{ SyncTimer: nil, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, err := NewScheduledProcessorWrapper(args) @@ -42,9 +44,9 @@ func TestNewScheduledProcessorWrapper_NilBlockProcessorShouldErr(t *testing.T) { t.Parallel() args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: nil, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, err := NewScheduledProcessorWrapper(args) @@ -56,7 +58,7 @@ func TestNewScheduledProcessorWrapper_NilRoundTimeDurationHandlerShouldErr(t *te t.Parallel() args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{}, RoundTimeDurationHandler: nil, } @@ -70,9 +72,9 @@ func TestNewScheduledProcessorWrapper_NilBlockProcessorOK(t *testing.T) { t.Parallel() args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, err := NewScheduledProcessorWrapper(args) @@ -85,14 +87,14 @@ func TestScheduledProcessorWrapper_IsProcessedOKEarlyExit(t *testing.T) { called := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{ + SyncTimer: &consensus.SyncTimerMock{ CurrentTimeCalled: func() time.Time { called.SetValue(true) return time.Now() }, }, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, err := NewScheduledProcessorWrapper(args) @@ -112,13 +114,13 @@ func TestScheduledProcessorWrapper_IsProcessedOKEarlyExit(t *testing.T) { func defaultScheduledProcessorWrapperArgs() ScheduledProcessorWrapperArgs { return ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{ + SyncTimer: &consensus.SyncTimerMock{ CurrentTimeCalled: func() time.Time { return time.Now() }, }, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } } @@ -227,9 +229,9 @@ func TestScheduledProcessorWrapper_StatusGetterAndSetter(t *testing.T) { t.Parallel() args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, _ := NewScheduledProcessorWrapper(args) @@ -250,14 +252,14 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV1ProcessingOK( processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{ ProcessScheduledBlockCalled: func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { processScheduledCalled.SetValue(true) return nil }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, _ := NewScheduledProcessorWrapper(args) @@ -276,14 +278,14 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ProcessingWit processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{ ProcessScheduledBlockCalled: func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { processScheduledCalled.SetValue(true) return errors.New("processing error") }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, _ := NewScheduledProcessorWrapper(args) @@ -304,14 +306,14 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ProcessingOK( processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{ ProcessScheduledBlockCalled: func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { processScheduledCalled.SetValue(true) return nil }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, _ := NewScheduledProcessorWrapper(args) @@ -333,7 +335,7 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ForceStopped( processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{ + SyncTimer: &consensus.SyncTimerMock{ CurrentTimeCalled: func() time.Time { return time.Now() }, @@ -350,7 +352,7 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ForceStopped( } }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } spw, err := NewScheduledProcessorWrapper(args) @@ -374,7 +376,7 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ForceStopAfte processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{ + SyncTimer: &consensus.SyncTimerMock{ CurrentTimeCalled: func() time.Time { return time.Now() }, @@ -386,7 +388,7 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ForceStopAfte return nil }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } spw, err := NewScheduledProcessorWrapper(args) diff --git a/consensus/spos/subround.go b/consensus/spos/subround.go index 1f06191a2c5..e124475407b 100644 --- a/consensus/spos/subround.go +++ b/consensus/spos/subround.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/consensus" ) @@ -235,11 +236,11 @@ func (sr *Subround) IsSelfInConsensusGroup() bool { // IsSelfLeader returns true is the current node is leader is single key or in // multi-key mode func (sr *Subround) IsSelfLeader() bool { - return sr.isSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() + return sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() } -// isSelfLeaderInCurrentRound method checks if the current node is leader in the current round -func (sr *Subround) isSelfLeaderInCurrentRound() bool { +// IsSelfLeaderInCurrentRound method checks if the current node is leader in the current round +func (sr *Subround) IsSelfLeaderInCurrentRound() bool { return sr.IsNodeLeaderInCurrentRound(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() } @@ -249,7 +250,7 @@ func (sr *Subround) GetLeaderStartRoundMessage() string { if sr.IsMultiKeyLeaderInCurrentRound() { return multiKeyStartMsg } - if sr.isSelfLeaderInCurrentRound() { + if sr.IsSelfLeaderInCurrentRound() { return singleKeyStartMsg } diff --git a/consensus/spos/subround_test.go b/consensus/spos/subround_test.go index 2e28b9a0a9d..cd54782643c 100644 --- a/consensus/spos/subround_test.go +++ b/consensus/spos/subround_test.go @@ -9,6 +9,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" @@ -16,8 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var chainID = []byte("chain ID") @@ -594,7 +595,7 @@ func TestSubround_DoWorkShouldReturnFalseWhenJobFunctionIsNotSet(t *testing.T) { } maxTime := time.Now().Add(100 * time.Millisecond) - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus.RoundHandlerMock{} roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { return time.Until(maxTime) } @@ -632,7 +633,7 @@ func TestSubround_DoWorkShouldReturnFalseWhenCheckFunctionIsNotSet(t *testing.T) sr.Check = nil maxTime := time.Now().Add(100 * time.Millisecond) - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus.RoundHandlerMock{} roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { return time.Until(maxTime) } @@ -681,7 +682,7 @@ func testDoWork(t *testing.T, checkDone bool, shouldWork bool) { } maxTime := time.Now().Add(100 * time.Millisecond) - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus.RoundHandlerMock{} roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { return time.Until(maxTime) } @@ -728,7 +729,7 @@ func TestSubround_DoWorkShouldReturnTrueWhenJobIsDoneAndConsensusIsDoneAfterAWhi } maxTime := time.Now().Add(2000 * time.Millisecond) - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus.RoundHandlerMock{} roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { return time.Until(maxTime) } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index b9eada158f8..5fa1355f9e0 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -82,7 +82,7 @@ func createDefaultWorkerArgs(appStatusHandler core.AppStatusHandler) *spos.Worke return nil }, } - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} hasher := &hashingMocks.HasherMock{} blsService, _ := bls.NewConsensusService() poolAdder := cache.NewCacherMock() @@ -149,8 +149,8 @@ func initWorker(appStatusHandler core.AppStatusHandler) *spos.Worker { return sposWorker } -func initRoundHandlerMock() *mock.RoundHandlerMock { - return &mock.RoundHandlerMock{ +func initRoundHandlerMock() *consensusMocks.RoundHandlerMock { + return &consensusMocks.RoundHandlerMock{ RoundIndex: 0, TimeStampCalled: func() time.Time { return time.Unix(0, 0) @@ -797,7 +797,7 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( }, }) - wrk.SetRoundHandler(&mock.RoundHandlerMock{ + wrk.SetRoundHandler(&consensusMocks.RoundHandlerMock{ RoundIndex: 0, TimeDurationCalled: func() time.Duration { return roundDuration diff --git a/testscommon/consensus/mockTestInitializer.go b/testscommon/consensus/mockTestInitializer.go index 2962a577d34..b9d74889e39 100644 --- a/testscommon/consensus/mockTestInitializer.go +++ b/testscommon/consensus/mockTestInitializer.go @@ -181,9 +181,9 @@ func InitConsensusCoreWithMultiSigner(multiSigner crypto.MultiSigner) *Consensus chronologyHandlerMock := InitChronologyHandlerMock() hasherMock := &hashingMocks.HasherMock{} - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &RoundHandlerMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &SyncTimerMock{} validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{ ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { defaultSelectionChances := uint32(1) diff --git a/consensus/mock/rounderMock.go b/testscommon/consensus/rounderMock.go similarity index 98% rename from consensus/mock/rounderMock.go rename to testscommon/consensus/rounderMock.go index 6a0625932a1..bb463f38c33 100644 --- a/consensus/mock/rounderMock.go +++ b/testscommon/consensus/rounderMock.go @@ -1,4 +1,4 @@ -package mock +package consensus import ( "time" diff --git a/consensus/mock/syncTimerMock.go b/testscommon/consensus/syncTimerMock.go similarity index 98% rename from consensus/mock/syncTimerMock.go rename to testscommon/consensus/syncTimerMock.go index 2fa41d42341..32b92bbe33b 100644 --- a/consensus/mock/syncTimerMock.go +++ b/testscommon/consensus/syncTimerMock.go @@ -1,4 +1,4 @@ -package mock +package consensus import ( "time" From c8158415ea1fc1e8ab0d015c9317ed48e9103be3 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 20 Sep 2024 16:34:32 +0300 Subject: [PATCH 03/30] extract exported constants --- consensus/spos/bls/constants.go | 92 +++++++++++ consensus/spos/bls/v1/blsSubroundsFactory.go | 49 +++--- .../spos/bls/v1/blsSubroundsFactory_test.go | 15 +- consensus/spos/bls/v1/blsWorker.go | 73 ++++----- consensus/spos/bls/v1/blsWorker_test.go | 149 +++++++++--------- consensus/spos/bls/v1/constants.go | 89 ----------- consensus/spos/bls/v1/export_test.go | 5 - consensus/spos/bls/v1/subroundBlock.go | 7 +- consensus/spos/bls/v1/subroundBlock_test.go | 69 ++++---- consensus/spos/bls/v1/subroundEndRound.go | 17 +- .../spos/bls/v1/subroundEndRound_test.go | 81 +++++----- consensus/spos/bls/v1/subroundSignature.go | 3 +- .../spos/bls/v1/subroundSignature_test.go | 103 ++++++------ .../spos/bls/v1/subroundStartRound_test.go | 105 ++++++------ 14 files changed, 434 insertions(+), 423 deletions(-) create mode 100644 consensus/spos/bls/constants.go diff --git a/consensus/spos/bls/constants.go b/consensus/spos/bls/constants.go new file mode 100644 index 00000000000..4b93cae65be --- /dev/null +++ b/consensus/spos/bls/constants.go @@ -0,0 +1,92 @@ +package bls + +import ( + "github.com/multiversx/mx-chain-go/consensus" +) + +const ( + // SrStartRound defines ID of Subround "Start round" + SrStartRound = iota + // SrBlock defines ID of Subround "block" + SrBlock + // SrSignature defines ID of Subround "signature" + SrSignature + // SrEndRound defines ID of Subround "End round" + SrEndRound +) + +const ( + // MtUnknown defines ID of a message that has unknown data inside + MtUnknown consensus.MessageType = iota + // MtBlockBodyAndHeader defines ID of a message that has a block body and a block header inside + MtBlockBodyAndHeader + // MtBlockBody defines ID of a message that has a block body inside + MtBlockBody + // MtBlockHeader defines ID of a message that has a block header inside + MtBlockHeader + // MtSignature defines ID of a message that has a Signature inside + MtSignature + // MtBlockHeaderFinalInfo defines ID of a message that has a block header final info inside + // (aggregate signature, bitmap and seal leader signature for the proposed and accepted header) + MtBlockHeaderFinalInfo + // MtInvalidSigners defines ID of a message that has a invalid signers p2p messages inside + MtInvalidSigners +) + +const ( + // BlockBodyAndHeaderStringValue represents the string to be used to identify a block body and a block header + BlockBodyAndHeaderStringValue = "(BLOCK_BODY_AND_HEADER)" + + // BlockBodyStringValue represents the string to be used to identify a block body + BlockBodyStringValue = "(BLOCK_BODY)" + + // BlockHeaderStringValue represents the string to be used to identify a block header + BlockHeaderStringValue = "(BLOCK_HEADER)" + + // BlockSignatureStringValue represents the string to be used to identify a block's signature + BlockSignatureStringValue = "(SIGNATURE)" + + // BlockHeaderFinalInfoStringValue represents the string to be used to identify a block's header final info + BlockHeaderFinalInfoStringValue = "(FINAL_INFO)" + + // BlockUnknownStringValue represents the string to be used to identify an unknown block + BlockUnknownStringValue = "(UNKNOWN)" + + // BlockDefaultStringValue represents the message to identify a message that is undefined + BlockDefaultStringValue = "Undefined message type" +) + +func GetStringValue(msgType consensus.MessageType) string { + switch msgType { + case MtBlockBodyAndHeader: + return BlockBodyAndHeaderStringValue + case MtBlockBody: + return BlockBodyStringValue + case MtBlockHeader: + return BlockHeaderStringValue + case MtSignature: + return BlockSignatureStringValue + case MtBlockHeaderFinalInfo: + return BlockHeaderFinalInfoStringValue + case MtUnknown: + return BlockUnknownStringValue + default: + return BlockDefaultStringValue + } +} + +// GetSubroundName returns the name of each Subround from a given Subround ID +func GetSubroundName(subroundId int) string { + switch subroundId { + case SrStartRound: + return "(START_ROUND)" + case SrBlock: + return "(BLOCK)" + case SrSignature: + return "(SIGNATURE)" + case SrEndRound: + return "(END_ROUND)" + default: + return "Undefined subround" + } +} diff --git a/consensus/spos/bls/v1/blsSubroundsFactory.go b/consensus/spos/bls/v1/blsSubroundsFactory.go index 8f6f7c1822d..f06c3e0af55 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/outport" ) @@ -131,11 +132,11 @@ func (fct *factory) getTimeDuration() time.Duration { func (fct *factory) generateStartRoundSubround() error { subround, err := spos.NewSubround( -1, - SrStartRound, - SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(float64(fct.getTimeDuration())*srStartStartTime), int64(float64(fct.getTimeDuration())*srStartEndTime), - getSubroundName(SrStartRound), + bls.GetSubroundName(bls.SrStartRound), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -172,12 +173,12 @@ func (fct *factory) generateStartRoundSubround() error { func (fct *factory) generateBlockSubround() error { subround, err := spos.NewSubround( - SrStartRound, - SrBlock, - SrSignature, + bls.SrStartRound, + bls.SrBlock, + bls.SrSignature, int64(float64(fct.getTimeDuration())*srBlockStartTime), int64(float64(fct.getTimeDuration())*srBlockEndTime), - getSubroundName(SrBlock), + bls.GetSubroundName(bls.SrBlock), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -199,9 +200,9 @@ func (fct *factory) generateBlockSubround() error { return err } - fct.worker.AddReceivedMessageCall(MtBlockBodyAndHeader, subroundBlockInstance.receivedBlockBodyAndHeader) - fct.worker.AddReceivedMessageCall(MtBlockBody, subroundBlockInstance.receivedBlockBody) - fct.worker.AddReceivedMessageCall(MtBlockHeader, subroundBlockInstance.receivedBlockHeader) + fct.worker.AddReceivedMessageCall(bls.MtBlockBodyAndHeader, subroundBlockInstance.receivedBlockBodyAndHeader) + fct.worker.AddReceivedMessageCall(bls.MtBlockBody, subroundBlockInstance.receivedBlockBody) + fct.worker.AddReceivedMessageCall(bls.MtBlockHeader, subroundBlockInstance.receivedBlockHeader) fct.consensusCore.Chronology().AddSubround(subroundBlockInstance) return nil @@ -209,12 +210,12 @@ func (fct *factory) generateBlockSubround() error { func (fct *factory) generateSignatureSubround() error { subround, err := spos.NewSubround( - SrBlock, - SrSignature, - SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(float64(fct.getTimeDuration())*srSignatureStartTime), int64(float64(fct.getTimeDuration())*srSignatureEndTime), - getSubroundName(SrSignature), + bls.GetSubroundName(bls.SrSignature), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -237,7 +238,7 @@ func (fct *factory) generateSignatureSubround() error { return err } - fct.worker.AddReceivedMessageCall(MtSignature, subroundSignatureObject.receivedSignature) + fct.worker.AddReceivedMessageCall(bls.MtSignature, subroundSignatureObject.receivedSignature) fct.consensusCore.Chronology().AddSubround(subroundSignatureObject) return nil @@ -245,12 +246,12 @@ func (fct *factory) generateSignatureSubround() error { func (fct *factory) generateEndRoundSubround() error { subround, err := spos.NewSubround( - SrSignature, - SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(float64(fct.getTimeDuration())*srEndStartTime), int64(float64(fct.getTimeDuration())*srEndEndTime), - getSubroundName(SrEndRound), + bls.GetSubroundName(bls.SrEndRound), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -275,8 +276,8 @@ func (fct *factory) generateEndRoundSubround() error { return err } - fct.worker.AddReceivedMessageCall(MtBlockHeaderFinalInfo, subroundEndRoundObject.receivedBlockHeaderFinalInfo) - fct.worker.AddReceivedMessageCall(MtInvalidSigners, subroundEndRoundObject.receivedInvalidSignersInfo) + fct.worker.AddReceivedMessageCall(bls.MtBlockHeaderFinalInfo, subroundEndRoundObject.receivedBlockHeaderFinalInfo) + fct.worker.AddReceivedMessageCall(bls.MtInvalidSigners, subroundEndRoundObject.receivedInvalidSignersInfo) fct.worker.AddReceivedHeaderHandler(subroundEndRoundObject.receivedHeader) fct.consensusCore.Chronology().AddSubround(subroundEndRoundObject) @@ -286,10 +287,10 @@ func (fct *factory) generateEndRoundSubround() error { func (fct *factory) initConsensusThreshold() { pBFTThreshold := core.GetPBFTThreshold(fct.consensusState.ConsensusGroupSize()) pBFTFallbackThreshold := core.GetPBFTFallbackThreshold(fct.consensusState.ConsensusGroupSize()) - fct.consensusState.SetThreshold(SrBlock, 1) - fct.consensusState.SetThreshold(SrSignature, pBFTThreshold) - fct.consensusState.SetFallbackThreshold(SrBlock, 1) - fct.consensusState.SetFallbackThreshold(SrSignature, pBFTFallbackThreshold) + fct.consensusState.SetThreshold(bls.SrBlock, 1) + fct.consensusState.SetThreshold(bls.SrSignature, pBFTThreshold) + fct.consensusState.SetFallbackThreshold(bls.SrBlock, 1) + fct.consensusState.SetFallbackThreshold(bls.SrSignature, pBFTFallbackThreshold) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/bls/v1/blsSubroundsFactory_test.go b/consensus/spos/bls/v1/blsSubroundsFactory_test.go index 66bc2887210..3024eb79de0 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/testscommon" @@ -93,25 +94,25 @@ func initFactory() v1.Factory { func TestFactory_GetMessageTypeName(t *testing.T) { t.Parallel() - r := v1.GetStringValue(v1.MtBlockBodyAndHeader) + r := bls.GetStringValue(bls.MtBlockBodyAndHeader) assert.Equal(t, "(BLOCK_BODY_AND_HEADER)", r) - r = v1.GetStringValue(v1.MtBlockBody) + r = bls.GetStringValue(bls.MtBlockBody) assert.Equal(t, "(BLOCK_BODY)", r) - r = v1.GetStringValue(v1.MtBlockHeader) + r = bls.GetStringValue(bls.MtBlockHeader) assert.Equal(t, "(BLOCK_HEADER)", r) - r = v1.GetStringValue(v1.MtSignature) + r = bls.GetStringValue(bls.MtSignature) assert.Equal(t, "(SIGNATURE)", r) - r = v1.GetStringValue(v1.MtBlockHeaderFinalInfo) + r = bls.GetStringValue(bls.MtBlockHeaderFinalInfo) assert.Equal(t, "(FINAL_INFO)", r) - r = v1.GetStringValue(v1.MtUnknown) + r = bls.GetStringValue(bls.MtUnknown) assert.Equal(t, "(UNKNOWN)", r) - r = v1.GetStringValue(consensus.MessageType(-1)) + r = bls.GetStringValue(consensus.MessageType(-1)) assert.Equal(t, "Undefined message type", r) } diff --git a/consensus/spos/bls/v1/blsWorker.go b/consensus/spos/bls/v1/blsWorker.go index 602ae0e8305..b6e168d61c0 100644 --- a/consensus/spos/bls/v1/blsWorker.go +++ b/consensus/spos/bls/v1/blsWorker.go @@ -3,6 +3,7 @@ package v1 import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) // peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by @@ -40,12 +41,12 @@ func NewConsensusService() (*worker, error) { // InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { receivedMessages := make(map[consensus.MessageType][]*consensus.Message) - receivedMessages[MtBlockBodyAndHeader] = make([]*consensus.Message, 0) - receivedMessages[MtBlockBody] = make([]*consensus.Message, 0) - receivedMessages[MtBlockHeader] = make([]*consensus.Message, 0) - receivedMessages[MtSignature] = make([]*consensus.Message, 0) - receivedMessages[MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) - receivedMessages[MtInvalidSigners] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockBodyAndHeader] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockBody] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockHeader] = make([]*consensus.Message, 0) + receivedMessages[bls.MtSignature] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) + receivedMessages[bls.MtInvalidSigners] = make([]*consensus.Message, 0) return receivedMessages } @@ -57,71 +58,71 @@ func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { // GetStringValue gets the name of the messageType func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { - return getStringValue(messageType) + return bls.GetStringValue(messageType) } // GetSubroundName gets the subround name for the subround id provided func (wrk *worker) GetSubroundName(subroundId int) string { - return getSubroundName(subroundId) + return bls.GetSubroundName(subroundId) } // IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { - return msgType == MtBlockBodyAndHeader + return msgType == bls.MtBlockBodyAndHeader } // IsMessageWithBlockBody returns if the current messageType is about block body func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { - return msgType == MtBlockBody + return msgType == bls.MtBlockBody } // IsMessageWithBlockHeader returns if the current messageType is about block header func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { - return msgType == MtBlockHeader + return msgType == bls.MtBlockHeader } // IsMessageWithSignature returns if the current messageType is about signature func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { - return msgType == MtSignature + return msgType == bls.MtSignature } // IsMessageWithFinalInfo returns if the current messageType is about header final info func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { - return msgType == MtBlockHeaderFinalInfo + return msgType == bls.MtBlockHeaderFinalInfo } // IsMessageWithInvalidSigners returns if the current messageType is about invalid signers func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { - return msgType == MtInvalidSigners + return msgType == bls.MtInvalidSigners } // IsMessageTypeValid returns if the current messageType is valid func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { - isMessageTypeValid := msgType == MtBlockBodyAndHeader || - msgType == MtBlockBody || - msgType == MtBlockHeader || - msgType == MtSignature || - msgType == MtBlockHeaderFinalInfo || - msgType == MtInvalidSigners + isMessageTypeValid := msgType == bls.MtBlockBodyAndHeader || + msgType == bls.MtBlockBody || + msgType == bls.MtBlockHeader || + msgType == bls.MtSignature || + msgType == bls.MtBlockHeaderFinalInfo || + msgType == bls.MtInvalidSigners return isMessageTypeValid } // IsSubroundSignature returns if the current subround is about signature func (wrk *worker) IsSubroundSignature(subroundId int) bool { - return subroundId == SrSignature + return subroundId == bls.SrSignature } // IsSubroundStartRound returns if the current subround is about start round func (wrk *worker) IsSubroundStartRound(subroundId int) bool { - return subroundId == SrStartRound + return subroundId == bls.SrStartRound } // GetMessageRange provides the MessageType range used in checks by the consensus func (wrk *worker) GetMessageRange() []consensus.MessageType { var v []consensus.MessageType - for i := MtBlockBodyAndHeader; i <= MtInvalidSigners; i++ { + for i := bls.MtBlockBodyAndHeader; i <= bls.MtInvalidSigners; i++ { v = append(v, i) } @@ -131,18 +132,18 @@ func (wrk *worker) GetMessageRange() []consensus.MessageType { // CanProceed returns if the current messageType can proceed further if previous subrounds finished func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { switch msgType { - case MtBlockBodyAndHeader: - return consensusState.Status(SrStartRound) == spos.SsFinished - case MtBlockBody: - return consensusState.Status(SrStartRound) == spos.SsFinished - case MtBlockHeader: - return consensusState.Status(SrStartRound) == spos.SsFinished - case MtSignature: - return consensusState.Status(SrBlock) == spos.SsFinished - case MtBlockHeaderFinalInfo: - return consensusState.Status(SrSignature) == spos.SsFinished - case MtInvalidSigners: - return consensusState.Status(SrSignature) == spos.SsFinished + case bls.MtBlockBodyAndHeader: + return consensusState.Status(bls.SrStartRound) == spos.SsFinished + case bls.MtBlockBody: + return consensusState.Status(bls.SrStartRound) == spos.SsFinished + case bls.MtBlockHeader: + return consensusState.Status(bls.SrStartRound) == spos.SsFinished + case bls.MtSignature: + return consensusState.Status(bls.SrBlock) == spos.SsFinished + case bls.MtBlockHeaderFinalInfo: + return consensusState.Status(bls.SrSignature) == spos.SsFinished + case bls.MtInvalidSigners: + return consensusState.Status(bls.SrSignature) == spos.SsFinished } return false @@ -150,7 +151,7 @@ func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType conse // GetMaxNumOfMessageTypeAccepted returns the maximum number of accepted consensus message types per round, per public key func (wrk *worker) GetMaxNumOfMessageTypeAccepted(msgType consensus.MessageType) uint32 { - if msgType == MtSignature { + if msgType == bls.MtSignature { return maxNumOfMessageTypeSignatureAccepted } diff --git a/consensus/spos/bls/v1/blsWorker_test.go b/consensus/spos/bls/v1/blsWorker_test.go index 15e5f5b03cd..21cf32a6de2 100644 --- a/consensus/spos/bls/v1/blsWorker_test.go +++ b/consensus/spos/bls/v1/blsWorker_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/testscommon" ) @@ -82,20 +83,20 @@ func TestWorker_InitReceivedMessagesShouldWork(t *testing.T) { messages := bnService.InitReceivedMessages() receivedMessages := make(map[consensus.MessageType][]*consensus.Message) - receivedMessages[v1.MtBlockBodyAndHeader] = make([]*consensus.Message, 0) - receivedMessages[v1.MtBlockBody] = make([]*consensus.Message, 0) - receivedMessages[v1.MtBlockHeader] = make([]*consensus.Message, 0) - receivedMessages[v1.MtSignature] = make([]*consensus.Message, 0) - receivedMessages[v1.MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) - receivedMessages[v1.MtInvalidSigners] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockBodyAndHeader] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockBody] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockHeader] = make([]*consensus.Message, 0) + receivedMessages[bls.MtSignature] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) + receivedMessages[bls.MtInvalidSigners] = make([]*consensus.Message, 0) assert.Equal(t, len(receivedMessages), len(messages)) - assert.NotNil(t, messages[v1.MtBlockBodyAndHeader]) - assert.NotNil(t, messages[v1.MtBlockBody]) - assert.NotNil(t, messages[v1.MtBlockHeader]) - assert.NotNil(t, messages[v1.MtSignature]) - assert.NotNil(t, messages[v1.MtBlockHeaderFinalInfo]) - assert.NotNil(t, messages[v1.MtInvalidSigners]) + assert.NotNil(t, messages[bls.MtBlockBodyAndHeader]) + assert.NotNil(t, messages[bls.MtBlockBody]) + assert.NotNil(t, messages[bls.MtBlockHeader]) + assert.NotNil(t, messages[bls.MtSignature]) + assert.NotNil(t, messages[bls.MtBlockHeaderFinalInfo]) + assert.NotNil(t, messages[bls.MtInvalidSigners]) } func TestWorker_GetMessageRangeShouldWork(t *testing.T) { @@ -107,7 +108,7 @@ func TestWorker_GetMessageRangeShouldWork(t *testing.T) { messagesRange := blsService.GetMessageRange() assert.NotNil(t, messagesRange) - for i := v1.MtBlockBodyAndHeader; i <= v1.MtInvalidSigners; i++ { + for i := bls.MtBlockBodyAndHeader; i <= bls.MtInvalidSigners; i++ { v = append(v, i) } assert.NotNil(t, v) @@ -123,9 +124,9 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyAndHeaderShouldW blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrStartRound, spos.SsFinished) + consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtBlockBodyAndHeader) + canProceed := blsService.CanProceed(consensusState, bls.MtBlockBodyAndHeader) assert.True(t, canProceed) } @@ -135,9 +136,9 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyAndHeaderShou blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrStartRound, spos.SsNotFinished) + consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtBlockBodyAndHeader) + canProceed := blsService.CanProceed(consensusState, bls.MtBlockBodyAndHeader) assert.False(t, canProceed) } @@ -147,9 +148,9 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyShouldWork(t *te blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrStartRound, spos.SsFinished) + consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtBlockBody) + canProceed := blsService.CanProceed(consensusState, bls.MtBlockBody) assert.True(t, canProceed) } @@ -159,9 +160,9 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyShouldNotWork blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrStartRound, spos.SsNotFinished) + consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtBlockBody) + canProceed := blsService.CanProceed(consensusState, bls.MtBlockBody) assert.False(t, canProceed) } @@ -171,9 +172,9 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockHeaderShouldWork(t * blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrStartRound, spos.SsFinished) + consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtBlockHeader) + canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeader) assert.True(t, canProceed) } @@ -183,9 +184,9 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockHeaderShouldNotWo blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrStartRound, spos.SsNotFinished) + consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtBlockHeader) + canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeader) assert.False(t, canProceed) } @@ -195,9 +196,9 @@ func TestWorker_CanProceedWithSrBlockFinishedForMtBlockHeaderShouldWork(t *testi blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrBlock, spos.SsFinished) + consensusState.SetStatus(bls.SrBlock, spos.SsFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtSignature) + canProceed := blsService.CanProceed(consensusState, bls.MtSignature) assert.True(t, canProceed) } @@ -207,9 +208,9 @@ func TestWorker_CanProceedWithSrBlockRoundNotFinishedForMtBlockHeaderShouldNotWo blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrBlock, spos.SsNotFinished) + consensusState.SetStatus(bls.SrBlock, spos.SsNotFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtSignature) + canProceed := blsService.CanProceed(consensusState, bls.MtSignature) assert.False(t, canProceed) } @@ -219,9 +220,9 @@ func TestWorker_CanProceedWithSrSignatureFinishedForMtBlockHeaderFinalInfoShould blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrSignature, spos.SsFinished) + consensusState.SetStatus(bls.SrSignature, spos.SsFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtBlockHeaderFinalInfo) + canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeaderFinalInfo) assert.True(t, canProceed) } @@ -231,9 +232,9 @@ func TestWorker_CanProceedWithSrSignatureRoundNotFinishedForMtBlockHeaderFinalIn blsService, _ := v1.NewConsensusService() consensusState := initConsensusState() - consensusState.SetStatus(v1.SrSignature, spos.SsNotFinished) + consensusState.SetStatus(bls.SrSignature, spos.SsNotFinished) - canProceed := blsService.CanProceed(consensusState, v1.MtBlockHeaderFinalInfo) + canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeaderFinalInfo) assert.False(t, canProceed) } @@ -252,13 +253,13 @@ func TestWorker_GetSubroundName(t *testing.T) { service, _ := v1.NewConsensusService() - r := service.GetSubroundName(v1.SrStartRound) + r := service.GetSubroundName(bls.SrStartRound) assert.Equal(t, "(START_ROUND)", r) - r = service.GetSubroundName(v1.SrBlock) + r = service.GetSubroundName(bls.SrBlock) assert.Equal(t, "(BLOCK)", r) - r = service.GetSubroundName(v1.SrSignature) + r = service.GetSubroundName(bls.SrSignature) assert.Equal(t, "(SIGNATURE)", r) - r = service.GetSubroundName(v1.SrEndRound) + r = service.GetSubroundName(bls.SrEndRound) assert.Equal(t, "(END_ROUND)", r) r = service.GetSubroundName(-1) assert.Equal(t, "Undefined subround", r) @@ -269,20 +270,20 @@ func TestWorker_GetStringValue(t *testing.T) { service, _ := v1.NewConsensusService() - r := service.GetStringValue(v1.MtBlockBodyAndHeader) - assert.Equal(t, v1.BlockBodyAndHeaderStringValue, r) - r = service.GetStringValue(v1.MtBlockBody) - assert.Equal(t, v1.BlockBodyStringValue, r) - r = service.GetStringValue(v1.MtBlockHeader) - assert.Equal(t, v1.BlockHeaderStringValue, r) - r = service.GetStringValue(v1.MtSignature) - assert.Equal(t, v1.BlockSignatureStringValue, r) - r = service.GetStringValue(v1.MtBlockHeaderFinalInfo) - assert.Equal(t, v1.BlockHeaderFinalInfoStringValue, r) - r = service.GetStringValue(v1.MtUnknown) - assert.Equal(t, v1.BlockUnknownStringValue, r) + r := service.GetStringValue(bls.MtBlockBodyAndHeader) + assert.Equal(t, bls.BlockBodyAndHeaderStringValue, r) + r = service.GetStringValue(bls.MtBlockBody) + assert.Equal(t, bls.BlockBodyStringValue, r) + r = service.GetStringValue(bls.MtBlockHeader) + assert.Equal(t, bls.BlockHeaderStringValue, r) + r = service.GetStringValue(bls.MtSignature) + assert.Equal(t, bls.BlockSignatureStringValue, r) + r = service.GetStringValue(bls.MtBlockHeaderFinalInfo) + assert.Equal(t, bls.BlockHeaderFinalInfoStringValue, r) + r = service.GetStringValue(bls.MtUnknown) + assert.Equal(t, bls.BlockUnknownStringValue, r) r = service.GetStringValue(-1) - assert.Equal(t, v1.BlockDefaultStringValue, r) + assert.Equal(t, bls.BlockDefaultStringValue, r) } func TestWorker_IsMessageWithBlockBodyAndHeader(t *testing.T) { @@ -290,13 +291,13 @@ func TestWorker_IsMessageWithBlockBodyAndHeader(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsMessageWithBlockBodyAndHeader(v1.MtBlockBody) + ret := service.IsMessageWithBlockBodyAndHeader(bls.MtBlockBody) assert.False(t, ret) - ret = service.IsMessageWithBlockBodyAndHeader(v1.MtBlockHeader) + ret = service.IsMessageWithBlockBodyAndHeader(bls.MtBlockHeader) assert.False(t, ret) - ret = service.IsMessageWithBlockBodyAndHeader(v1.MtBlockBodyAndHeader) + ret = service.IsMessageWithBlockBodyAndHeader(bls.MtBlockBodyAndHeader) assert.True(t, ret) } @@ -305,10 +306,10 @@ func TestWorker_IsMessageWithBlockBody(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsMessageWithBlockBody(v1.MtBlockHeader) + ret := service.IsMessageWithBlockBody(bls.MtBlockHeader) assert.False(t, ret) - ret = service.IsMessageWithBlockBody(v1.MtBlockBody) + ret = service.IsMessageWithBlockBody(bls.MtBlockBody) assert.True(t, ret) } @@ -317,10 +318,10 @@ func TestWorker_IsMessageWithBlockHeader(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsMessageWithBlockHeader(v1.MtBlockBody) + ret := service.IsMessageWithBlockHeader(bls.MtBlockBody) assert.False(t, ret) - ret = service.IsMessageWithBlockHeader(v1.MtBlockHeader) + ret = service.IsMessageWithBlockHeader(bls.MtBlockHeader) assert.True(t, ret) } @@ -329,10 +330,10 @@ func TestWorker_IsMessageWithSignature(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsMessageWithSignature(v1.MtBlockBodyAndHeader) + ret := service.IsMessageWithSignature(bls.MtBlockBodyAndHeader) assert.False(t, ret) - ret = service.IsMessageWithSignature(v1.MtSignature) + ret = service.IsMessageWithSignature(bls.MtSignature) assert.True(t, ret) } @@ -341,10 +342,10 @@ func TestWorker_IsMessageWithFinalInfo(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsMessageWithFinalInfo(v1.MtSignature) + ret := service.IsMessageWithFinalInfo(bls.MtSignature) assert.False(t, ret) - ret = service.IsMessageWithFinalInfo(v1.MtBlockHeaderFinalInfo) + ret = service.IsMessageWithFinalInfo(bls.MtBlockHeaderFinalInfo) assert.True(t, ret) } @@ -353,10 +354,10 @@ func TestWorker_IsMessageWithInvalidSigners(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsMessageWithInvalidSigners(v1.MtBlockHeaderFinalInfo) + ret := service.IsMessageWithInvalidSigners(bls.MtBlockHeaderFinalInfo) assert.False(t, ret) - ret = service.IsMessageWithInvalidSigners(v1.MtInvalidSigners) + ret = service.IsMessageWithInvalidSigners(bls.MtInvalidSigners) assert.True(t, ret) } @@ -365,10 +366,10 @@ func TestWorker_IsSubroundSignature(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsSubroundSignature(v1.SrEndRound) + ret := service.IsSubroundSignature(bls.SrEndRound) assert.False(t, ret) - ret = service.IsSubroundSignature(v1.SrSignature) + ret = service.IsSubroundSignature(bls.SrSignature) assert.True(t, ret) } @@ -377,10 +378,10 @@ func TestWorker_IsSubroundStartRound(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsSubroundStartRound(v1.SrSignature) + ret := service.IsSubroundStartRound(bls.SrSignature) assert.False(t, ret) - ret = service.IsSubroundStartRound(v1.SrStartRound) + ret = service.IsSubroundStartRound(bls.SrStartRound) assert.True(t, ret) } @@ -389,7 +390,7 @@ func TestWorker_IsMessageTypeValid(t *testing.T) { service, _ := v1.NewConsensusService() - ret := service.IsMessageTypeValid(v1.MtBlockBody) + ret := service.IsMessageTypeValid(bls.MtBlockBody) assert.True(t, ret) ret = service.IsMessageTypeValid(666) @@ -403,15 +404,15 @@ func TestWorker_GetMaxNumOfMessageTypeAccepted(t *testing.T) { t.Run("message type signature", func(t *testing.T) { t.Parallel() - assert.Equal(t, v1.MaxNumOfMessageTypeSignatureAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtSignature)) + assert.Equal(t, v1.MaxNumOfMessageTypeSignatureAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtSignature)) }) t.Run("other message types", func(t *testing.T) { t.Parallel() - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtUnknown)) - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtBlockBody)) - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtBlockHeader)) - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtBlockBodyAndHeader)) - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(v1.MtBlockHeaderFinalInfo)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtUnknown)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBody)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeader)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBodyAndHeader)) + assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeaderFinalInfo)) }) } diff --git a/consensus/spos/bls/v1/constants.go b/consensus/spos/bls/v1/constants.go index 1b80740483f..5753fc94770 100644 --- a/consensus/spos/bls/v1/constants.go +++ b/consensus/spos/bls/v1/constants.go @@ -2,41 +2,10 @@ package v1 import ( logger "github.com/multiversx/mx-chain-logger-go" - - "github.com/multiversx/mx-chain-go/consensus" ) var log = logger.GetOrCreate("consensus/spos/bls") -const ( - // SrStartRound defines ID of Subround "Start round" - SrStartRound = iota - // SrBlock defines ID of Subround "block" - SrBlock - // SrSignature defines ID of Subround "signature" - SrSignature - // SrEndRound defines ID of Subround "End round" - SrEndRound -) - -const ( - // MtUnknown defines ID of a message that has unknown data inside - MtUnknown consensus.MessageType = iota - // MtBlockBodyAndHeader defines ID of a message that has a block body and a block header inside - MtBlockBodyAndHeader - // MtBlockBody defines ID of a message that has a block body inside - MtBlockBody - // MtBlockHeader defines ID of a message that has a block header inside - MtBlockHeader - // MtSignature defines ID of a message that has a Signature inside - MtSignature - // MtBlockHeaderFinalInfo defines ID of a message that has a block header final info inside - // (aggregate signature, bitmap and seal leader signature for the proposed and accepted header) - MtBlockHeaderFinalInfo - // MtInvalidSigners defines ID of a message that has a invalid signers p2p messages inside - MtInvalidSigners -) - // waitingAllSigsMaxTimeThreshold specifies the max allocated time for waiting all signatures from the total time of the subround signature const waitingAllSigsMaxTimeThreshold = 0.5 @@ -66,61 +35,3 @@ const srEndStartTime = 0.85 // srEndEndTime specifies the end time, from the total time of the round, of Subround End const srEndEndTime = 0.95 - -const ( - // BlockBodyAndHeaderStringValue represents the string to be used to identify a block body and a block header - BlockBodyAndHeaderStringValue = "(BLOCK_BODY_AND_HEADER)" - - // BlockBodyStringValue represents the string to be used to identify a block body - BlockBodyStringValue = "(BLOCK_BODY)" - - // BlockHeaderStringValue represents the string to be used to identify a block header - BlockHeaderStringValue = "(BLOCK_HEADER)" - - // BlockSignatureStringValue represents the string to be used to identify a block's signature - BlockSignatureStringValue = "(SIGNATURE)" - - // BlockHeaderFinalInfoStringValue represents the string to be used to identify a block's header final info - BlockHeaderFinalInfoStringValue = "(FINAL_INFO)" - - // BlockUnknownStringValue represents the string to be used to identify an unknown block - BlockUnknownStringValue = "(UNKNOWN)" - - // BlockDefaultStringValue represents the message to identify a message that is undefined - BlockDefaultStringValue = "Undefined message type" -) - -func getStringValue(msgType consensus.MessageType) string { - switch msgType { - case MtBlockBodyAndHeader: - return BlockBodyAndHeaderStringValue - case MtBlockBody: - return BlockBodyStringValue - case MtBlockHeader: - return BlockHeaderStringValue - case MtSignature: - return BlockSignatureStringValue - case MtBlockHeaderFinalInfo: - return BlockHeaderFinalInfoStringValue - case MtUnknown: - return BlockUnknownStringValue - default: - return BlockDefaultStringValue - } -} - -// getSubroundName returns the name of each Subround from a given Subround ID -func getSubroundName(subroundId int) string { - switch subroundId { - case SrStartRound: - return "(START_ROUND)" - case SrBlock: - return "(BLOCK)" - case SrSignature: - return "(SIGNATURE)" - case SrEndRound: - return "(END_ROUND)" - default: - return "Undefined subround" - } -} diff --git a/consensus/spos/bls/v1/export_test.go b/consensus/spos/bls/v1/export_test.go index 2eedd84cd95..f5590b0b4f4 100644 --- a/consensus/spos/bls/v1/export_test.go +++ b/consensus/spos/bls/v1/export_test.go @@ -352,8 +352,3 @@ func (sr *subroundEndRound) GetFullMessagesForInvalidSigners(invalidPubKeys []st func (sr *subroundEndRound) GetSentSignatureTracker() spos.SentSignaturesTracker { return sr.sentSignatureTracker } - -// GetStringValue calls the unexported getStringValue function -func GetStringValue(messageType consensus.MessageType) string { - return getStringValue(messageType) -} diff --git a/consensus/spos/bls/v1/subroundBlock.go b/consensus/spos/bls/v1/subroundBlock.go index 8b88c5a02a8..f7d36bfff33 100644 --- a/consensus/spos/bls/v1/subroundBlock.go +++ b/consensus/spos/bls/v1/subroundBlock.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) // maxAllowedSizeInBytes defines how many bytes are allowed as payload in a message @@ -203,7 +204,7 @@ func (sr *subroundBlock) sendHeaderAndBlockBody( marshalizedHeader, []byte(leader), nil, - int(MtBlockBodyAndHeader), + int(bls.MtBlockBodyAndHeader), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -245,7 +246,7 @@ func (sr *subroundBlock) sendBlockBody(bodyHandler data.BodyHandler, marshalized nil, []byte(leader), nil, - int(MtBlockBody), + int(bls.MtBlockBody), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -285,7 +286,7 @@ func (sr *subroundBlock) sendBlockHeader(headerHandler data.HeaderHandler, marsh marshalizedHeader, []byte(leader), nil, - int(MtBlockHeader), + int(bls.MtBlockHeader), sr.RoundHandler().Index(), sr.ChainID(), nil, diff --git a/consensus/spos/bls/v1/subroundBlock_test.go b/consensus/spos/bls/v1/subroundBlock_test.go index 074a6463e5e..6724bd15d9e 100644 --- a/consensus/spos/bls/v1/subroundBlock_test.go +++ b/consensus/spos/bls/v1/subroundBlock_test.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/testscommon" consensusMock "github.com/multiversx/mx-chain-go/testscommon/consensus" @@ -26,9 +27,9 @@ import ( func defaultSubroundForSRBlock(consensusState *spos.ConsensusState, ch chan bool, container *consensusMock.ConsensusCoreMock, appStatusHandler core.AppStatusHandler) (*spos.Subround, error) { return spos.NewSubround( - v1.SrStartRound, - v1.SrBlock, - v1.SrSignature, + bls.SrStartRound, + bls.SrBlock, + bls.SrSignature, int64(5*roundTimeDuration/100), int64(25*roundTimeDuration/100), "(BLOCK)", @@ -314,16 +315,16 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { assert.False(t, r) sr.SetSelfPubKey(sr.ConsensusGroup()[0]) - _ = sr.SetJobDone(sr.SelfPubKey(), v1.SrBlock, true) + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrBlock, true) r = sr.DoBlockJob() assert.False(t, r) - _ = sr.SetJobDone(sr.SelfPubKey(), v1.SrBlock, false) - sr.SetStatus(v1.SrBlock, spos.SsFinished) + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrBlock, false) + sr.SetStatus(bls.SrBlock, spos.SsFinished) r = sr.DoBlockJob() assert.False(t, r) - sr.SetStatus(v1.SrBlock, spos.SsNotFinished) + sr.SetStatus(bls.SrBlock, spos.SsNotFinished) bpm := &testscommon.BlockProcessorStub{} err := errors.New("error") bpm.CreateBlockCalled = func(header data.HeaderHandler, remainingTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { @@ -358,7 +359,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) sr.Data = []byte("some data") r := sr.ReceivedBlockBodyAndHeader(cnsMsg) @@ -374,7 +375,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t * hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[1]), v1.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[1]), bls.MtBlockBodyAndHeader) sr.Data = nil r := sr.ReceivedBlockBodyAndHeader(cnsMsg) @@ -390,10 +391,10 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) sr.Data = nil - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrBlock, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrBlock, true) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -415,7 +416,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) sr.Data = nil r := sr.ReceivedBlockBodyAndHeader(cnsMsg) @@ -432,7 +433,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing. hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) sr.Data = nil sr.Body = &block.Body{} @@ -450,7 +451,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testin hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) sr.Data = nil sr.Header = &block.Header{Nonce: 1} @@ -467,7 +468,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { t.Run("block is valid", func(t *testing.T) { hdr := createDefaultHeader() blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) sr.Data = nil r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.True(t, r) @@ -477,7 +478,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { Nonce: 1, } blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), v1.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) sr.Data = nil r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -524,7 +525,7 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { nil, []byte(sr.ConsensusGroup()[0]), []byte("sig"), - int(v1.MtBlockBody), + int(bls.MtBlockBody), 0, chainID, nil, @@ -543,11 +544,11 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { assert.False(t, r) cnsMsg.PubKey = []byte(sr.ConsensusGroup()[0]) - sr.SetStatus(v1.SrBlock, spos.SsFinished) + sr.SetStatus(bls.SrBlock, spos.SsFinished) r = sr.ReceivedBlockBody(cnsMsg) assert.False(t, r) - sr.SetStatus(v1.SrBlock, spos.SsNotFinished) + sr.SetStatus(bls.SrBlock, spos.SsNotFinished) r = sr.ReceivedBlockBody(cnsMsg) assert.False(t, r) @@ -562,7 +563,7 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { hdrStr, []byte(sr.ConsensusGroup()[0]), []byte("sig"), - int(v1.MtBlockHeader), + int(bls.MtBlockHeader), 0, chainID, nil, @@ -585,11 +586,11 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { assert.False(t, r) cnsMsg.PubKey = []byte(sr.ConsensusGroup()[0]) - sr.SetStatus(v1.SrBlock, spos.SsFinished) + sr.SetStatus(bls.SrBlock, spos.SsFinished) r = sr.ReceivedBlockHeader(cnsMsg) assert.False(t, r) - sr.SetStatus(v1.SrBlock, spos.SsNotFinished) + sr.SetStatus(bls.SrBlock, spos.SsNotFinished) container.SetBlockProcessor(blockProcessorMock) sr.Data = nil sr.Header = nil @@ -614,7 +615,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAre nil, []byte(sr.ConsensusGroup()[0]), []byte("sig"), - int(v1.MtBlockBodyAndHeader), + int(bls.MtBlockBodyAndHeader), 0, chainID, nil, @@ -646,7 +647,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail nil, []byte(sr.ConsensusGroup()[0]), []byte("sig"), - int(v1.MtBlockBody), + int(bls.MtBlockBody), 0, chainID, nil, @@ -674,7 +675,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockRetu nil, []byte(sr.ConsensusGroup()[0]), []byte("sig"), - int(v1.MtBlockBody), + int(bls.MtBlockBody), 0, chainID, nil, @@ -711,7 +712,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { nil, []byte(sr.ConsensusGroup()[0]), []byte("sig"), - int(v1.MtBlockBody), + int(bls.MtBlockBody), 0, chainID, nil, @@ -772,7 +773,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinish t.Parallel() container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) - sr.SetStatus(v1.SrBlock, spos.SsFinished) + sr.SetStatus(bls.SrBlock, spos.SsFinished) assert.True(t, sr.DoBlockConsensusCheck()) } @@ -780,8 +781,8 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedR t.Parallel() container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) - for i := 0; i < sr.Threshold(v1.SrBlock); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrBlock, true) + for i := 0; i < sr.Threshold(bls.SrBlock); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, true) } assert.True(t, sr.DoBlockConsensusCheck()) } @@ -798,14 +799,14 @@ func TestSubroundBlock_IsBlockReceived(t *testing.T) { container := consensusMock.InitConsensusCore() sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < len(sr.ConsensusGroup()); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrBlock, false) - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, false) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, false) } ok := sr.IsBlockReceived(1) assert.False(t, ok) - _ = sr.SetJobDone("A", v1.SrBlock, true) - isJobDone, _ := sr.JobDone("A", v1.SrBlock) + _ = sr.SetJobDone("A", bls.SrBlock, true) + isJobDone, _ := sr.JobDone("A", bls.SrBlock) assert.True(t, isJobDone) ok = sr.IsBlockReceived(1) @@ -1084,7 +1085,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { nil, []byte(sr.ConsensusGroup()[0]), []byte("sig"), - int(v1.MtBlockBody), + int(bls.MtBlockBody), 0, chainID, nil, diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go index c6ed827e0c5..0c2e7197e21 100644 --- a/consensus/spos/bls/v1/subroundEndRound.go +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/headerCheck" ) @@ -294,7 +295,7 @@ func (sr *subroundEndRound) doEndRoundJob(_ context.Context) bool { } func (sr *subroundEndRound) doEndRoundJobByLeader() bool { - bitmap := sr.GenerateBitmap(SrSignature) + bitmap := sr.GenerateBitmap(bls.SrSignature) err := sr.checkSignaturesValidity(bitmap) if err != nil { log.Debug("doEndRoundJobByLeader.checkSignaturesValidity", "error", err.Error()) @@ -435,7 +436,7 @@ func (sr *subroundEndRound) verifyNodesOnAggSigFail() ([]string, error) { } for i, pk := range pubKeys { - isJobDone, err := sr.JobDone(pk, SrSignature) + isJobDone, err := sr.JobDone(pk, bls.SrSignature) if err != nil || !isJobDone { continue } @@ -450,7 +451,7 @@ func (sr *subroundEndRound) verifyNodesOnAggSigFail() ([]string, error) { if err != nil { isSuccessfull = false - err = sr.SetJobDone(pk, SrSignature, false) + err = sr.SetJobDone(pk, bls.SrSignature, false) if err != nil { return nil, err } @@ -521,7 +522,7 @@ func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) { threshold := sr.Threshold(sr.Current()) - numValidSigShares := sr.ComputeSize(SrSignature) + numValidSigShares := sr.ComputeSize(bls.SrSignature) if check.IfNil(sr.Header) { return nil, nil, spos.ErrNilHeader @@ -532,7 +533,7 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) spos.ErrInvalidNumSigShares, numValidSigShares, threshold) } - bitmap := sr.GenerateBitmap(SrSignature) + bitmap := sr.GenerateBitmap(bls.SrSignature) err := sr.checkSignaturesValidity(bitmap) if err != nil { return nil, nil, err @@ -565,7 +566,7 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { nil, []byte(leader), nil, - int(MtBlockHeaderFinalInfo), + int(bls.MtBlockHeaderFinalInfo), sr.RoundHandler().Index(), sr.ChainID(), sr.Header.GetPubKeysBitmap(), @@ -606,7 +607,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by nil, []byte(leader), nil, - int(MtInvalidSigners), + int(bls.MtInvalidSigners), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -867,7 +868,7 @@ func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { consensusGroup := sr.ConsensusGroup() signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, bitmap) for _, pubKey := range signers { - isSigJobDone, err := sr.JobDone(pubKey, SrSignature) + isSigJobDone, err := sr.JobDone(pubKey, bls.SrSignature) if err != nil { return err } diff --git a/consensus/spos/bls/v1/subroundEndRound_test.go b/consensus/spos/bls/v1/subroundEndRound_test.go index cd6e14a6f0f..6d7f1ac391d 100644 --- a/consensus/spos/bls/v1/subroundEndRound_test.go +++ b/consensus/spos/bls/v1/subroundEndRound_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" "github.com/multiversx/mx-chain-go/p2p" @@ -35,8 +36,8 @@ func initSubroundEndRoundWithContainer( ch := make(chan bool, 1) consensusState := initConsensusState() sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -74,8 +75,8 @@ func TestNewSubroundEndRound(t *testing.T) { consensusState := initConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -159,8 +160,8 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -195,8 +196,8 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -231,8 +232,8 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -268,8 +269,8 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -304,8 +305,8 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -340,8 +341,8 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -376,8 +377,8 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -661,7 +662,7 @@ func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnTrueWhenRoundIsFin t.Parallel() sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.SetStatus(v1.SrEndRound, spos.SsFinished) + sr.SetStatus(bls.SrEndRound, spos.SsFinished) ok := sr.DoEndRoundConsensusCheck() assert.True(t, ok) @@ -690,7 +691,7 @@ func TestSubroundEndRound_CheckSignaturesValidityShouldReturnNil(t *testing.T) { sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) err := sr.CheckSignaturesValidity([]byte{1}) assert.Equal(t, nil, err) @@ -1047,7 +1048,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { container.SetSigningHandler(signingHandler) sr.Header = &block.Header{} - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _, err := sr.VerifyNodesOnAggSigFail() require.Equal(t, expectedErr, err) @@ -1070,13 +1071,13 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { } sr.Header = &block.Header{} - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) container.SetSigningHandler(signingHandler) _, err := sr.VerifyNodesOnAggSigFail() require.Nil(t, err) - isJobDone, err := sr.JobDone(sr.ConsensusGroup()[0], v1.SrSignature) + isJobDone, err := sr.JobDone(sr.ConsensusGroup()[0], bls.SrSignature) require.Nil(t, err) require.False(t, isJobDone) }) @@ -1100,8 +1101,8 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { container.SetSigningHandler(signingHandler) sr.Header = &block.Header{} - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) - _ = sr.SetJobDone(sr.ConsensusGroup()[1], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) invalidSigners, err := sr.VerifyNodesOnAggSigFail() require.Nil(t, err) @@ -1118,7 +1119,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.Header = &block.Header{} - sr.SetThreshold(v1.SrEndRound, 2) + sr.SetThreshold(bls.SrEndRound, 2) _, _, err := sr.ComputeAggSigOnValidNodes() require.True(t, errors.Is(err, spos.ErrInvalidNumSigShares)) @@ -1139,7 +1140,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container.SetSigningHandler(signingHandler) sr.Header = &block.Header{} - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _, _, err := sr.ComputeAggSigOnValidNodes() require.Equal(t, expectedErr, err) @@ -1159,7 +1160,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { } container.SetSigningHandler(signingHandler) sr.Header = &block.Header{} - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _, _, err := sr.ComputeAggSigOnValidNodes() require.Equal(t, expectedErr, err) @@ -1171,7 +1172,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.Header = &block.Header{} - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) bitmap, sig, err := sr.ComputeAggSigOnValidNodes() require.NotNil(t, bitmap) @@ -1216,10 +1217,10 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { container.SetSigningHandler(signingHandler) - sr.SetThreshold(v1.SrEndRound, 2) + sr.SetThreshold(bls.SrEndRound, 2) - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) - _ = sr.SetJobDone(sr.ConsensusGroup()[1], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) sr.Header = &block.Header{} @@ -1263,11 +1264,11 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { container.SetSigningHandler(signingHandler) - sr.SetThreshold(v1.SrEndRound, 2) + sr.SetThreshold(bls.SrEndRound, 2) - _ = sr.SetJobDone(sr.ConsensusGroup()[0], v1.SrSignature, true) - _ = sr.SetJobDone(sr.ConsensusGroup()[1], v1.SrSignature, true) - _ = sr.SetJobDone(sr.ConsensusGroup()[2], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[2], bls.SrSignature, true) sr.Header = &block.Header{} @@ -1344,8 +1345,8 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), @@ -1715,8 +1716,8 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) sr, _ := spos.NewSubround( - v1.SrSignature, - v1.SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), diff --git a/consensus/spos/bls/v1/subroundSignature.go b/consensus/spos/bls/v1/subroundSignature.go index df1e6e8030b..86fc65b50dc 100644 --- a/consensus/spos/bls/v1/subroundSignature.go +++ b/consensus/spos/bls/v1/subroundSignature.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) type subroundSignature struct { @@ -126,7 +127,7 @@ func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte nil, pkBytes, nil, - int(MtSignature), + int(bls.MtSignature), sr.RoundHandler().Index(), sr.ChainID(), nil, diff --git a/consensus/spos/bls/v1/subroundSignature_test.go b/consensus/spos/bls/v1/subroundSignature_test.go index 31532f562eb..a31bf841740 100644 --- a/consensus/spos/bls/v1/subroundSignature_test.go +++ b/consensus/spos/bls/v1/subroundSignature_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" @@ -22,9 +23,9 @@ func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreM ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -60,9 +61,9 @@ func TestNewSubroundSignature(t *testing.T) { ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -137,9 +138,9 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -172,9 +173,9 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -206,9 +207,9 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -240,9 +241,9 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -275,9 +276,9 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -309,9 +310,9 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -369,7 +370,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { r = sr.DoSignatureJob() assert.True(t, r) - _ = sr.SetJobDone(sr.SelfPubKey(), v1.SrSignature, false) + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) sr.RoundCanceled = false sr.SetSelfPubKey(sr.ConsensusGroup()[0]) r = sr.DoSignatureJob() @@ -391,9 +392,9 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { ch := make(chan bool, 1) sr, _ := spos.NewSubround( - v1.SrBlock, - v1.SrSignature, - v1.SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(70*roundTimeDuration/100), int64(85*roundTimeDuration/100), "(SIGNATURE)", @@ -446,7 +447,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { r = srSignature.DoSignatureJob() assert.True(t, r) - _ = sr.SetJobDone(sr.SelfPubKey(), v1.SrSignature, false) + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) sr.RoundCanceled = false sr.SetSelfPubKey(sr.ConsensusGroup()[0]) r = srSignature.DoSignatureJob() @@ -478,7 +479,7 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { nil, []byte(sr.ConsensusGroup()[1]), []byte("sig"), - int(v1.MtSignature), + int(bls.MtSignature), 0, chainID, nil, @@ -512,7 +513,7 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { count := 0 for i := 0; i < len(sr.ConsensusGroup()); i++ { if sr.ConsensusGroup()[i] != string(cnsMsg.PubKey) { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) count++ if count == maxCount { break @@ -551,7 +552,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { nil, []byte(sr.ConsensusGroup()[1]), []byte("sig"), - int(v1.MtSignature), + int(bls.MtSignature), 0, chainID, nil, @@ -584,7 +585,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { count := 0 for i := 0; i < len(sr.ConsensusGroup()); i++ { if sr.ConsensusGroup()[i] != string(cnsMsg.PubKey) { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) count++ if count == maxCount { break @@ -602,8 +603,8 @@ func TestSubroundSignature_SignaturesCollected(t *testing.T) { sr := *initSubroundSignature() for i := 0; i < len(sr.ConsensusGroup()); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrBlock, false) - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, false) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, false) } ok, n := sr.AreSignaturesCollected(2) @@ -613,14 +614,14 @@ func TestSubroundSignature_SignaturesCollected(t *testing.T) { ok, _ = sr.AreSignaturesCollected(2) assert.False(t, ok) - _ = sr.SetJobDone("B", v1.SrSignature, true) - isJobDone, _ := sr.JobDone("B", v1.SrSignature) + _ = sr.SetJobDone("B", bls.SrSignature, true) + isJobDone, _ := sr.JobDone("B", bls.SrSignature) assert.True(t, isJobDone) ok, _ = sr.AreSignaturesCollected(2) assert.False(t, ok) - _ = sr.SetJobDone("C", v1.SrSignature, true) + _ = sr.SetJobDone("C", bls.SrSignature, true) ok, _ = sr.AreSignaturesCollected(2) assert.True(t, ok) } @@ -637,7 +638,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubround t.Parallel() sr := *initSubroundSignature() - sr.SetStatus(v1.SrSignature, spos.SsFinished) + sr.SetStatus(bls.SrSignature, spos.SsFinished) assert.True(t, sr.DoSignatureConsensusCheck()) } @@ -646,8 +647,8 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignatur sr := *initSubroundSignature() - for i := 0; i < sr.Threshold(v1.SrSignature); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + for i := 0; i < sr.Threshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } assert.True(t, sr.DoSignatureConsensusCheck()) @@ -669,8 +670,8 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenNotAllS sr.SetSelfPubKey(sr.ConsensusGroup()[0]) - for i := 0; i < sr.Threshold(v1.SrSignature); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + for i := 0; i < sr.Threshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } assert.False(t, sr.DoSignatureConsensusCheck()) @@ -686,7 +687,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenAllSigna sr.SetSelfPubKey(sr.ConsensusGroup()[0]) for i := 0; i < sr.ConsensusGroupSize(); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } assert.True(t, sr.DoSignatureConsensusCheck()) @@ -701,8 +702,8 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenEnoughBu sr.SetSelfPubKey(sr.ConsensusGroup()[0]) - for i := 0; i < sr.Threshold(v1.SrSignature); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + for i := 0; i < sr.Threshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } assert.True(t, sr.DoSignatureConsensusCheck()) @@ -722,8 +723,8 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbac sr.SetSelfPubKey(sr.ConsensusGroup()[0]) - for i := 0; i < sr.FallbackThreshold(v1.SrSignature); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + for i := 0; i < sr.FallbackThreshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } assert.False(t, sr.DoSignatureConsensusCheck()) @@ -743,8 +744,8 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback sr.SetSelfPubKey(sr.ConsensusGroup()[0]) - for i := 0; i < sr.FallbackThreshold(v1.SrSignature); i++ { - _ = sr.SetJobDone(sr.ConsensusGroup()[i], v1.SrSignature, true) + for i := 0; i < sr.FallbackThreshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } assert.True(t, sr.DoSignatureConsensusCheck()) @@ -762,7 +763,7 @@ func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqu nil, []byte(sr.ConsensusGroup()[0]), []byte("sig"), - int(v1.MtSignature), + int(bls.MtSignature), 0, chainID, nil, diff --git a/consensus/spos/bls/v1/subroundStartRound_test.go b/consensus/spos/bls/v1/subroundStartRound_test.go index 96ab0bbd440..8910fffc3aa 100644 --- a/consensus/spos/bls/v1/subroundStartRound_test.go +++ b/consensus/spos/bls/v1/subroundStartRound_test.go @@ -10,9 +10,12 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -51,8 +54,8 @@ func defaultSubround( return spos.NewSubround( -1, - v1.SrStartRound, - v1.SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(0*roundTimeDuration/100), int64(5*roundTimeDuration/100), "(START_ROUND)", @@ -83,7 +86,7 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) v1 } func initSubroundStartRound() v1.SubroundStartRound { - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() return initSubroundStartRoundWithContainer(container) } @@ -92,11 +95,11 @@ func TestNewSubroundStartRound(t *testing.T) { ch := make(chan bool, 1) consensusState := initConsensusState() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() sr, _ := spos.NewSubround( -1, - v1.SrStartRound, - v1.SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), "(START_ROUND)", @@ -192,7 +195,7 @@ func TestNewSubroundStartRound(t *testing.T) { func TestSubroundStartRound_NewSubroundStartRoundNilBlockChainShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -208,7 +211,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilBlockChainShouldFail(t *test func TestSubroundStartRound_NewSubroundStartRoundNilBootstrapperShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -224,7 +227,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilBootstrapperShouldFail(t *te func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -240,7 +243,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerContainerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -256,7 +259,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerContainerShouldFa func TestSubroundStartRound_NewSubroundStartRoundNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -272,7 +275,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilRoundHandlerShouldFail(t *te func TestSubroundStartRound_NewSubroundStartRoundNilSyncTimerShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -288,7 +291,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilSyncTimerShouldFail(t *testi func TestSubroundStartRound_NewSubroundStartRoundNilValidatorGroupSelectorShouldFail(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -304,7 +307,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilValidatorGroupSelectorShould func TestSubroundStartRound_NewSubroundStartRoundShouldWork(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -320,7 +323,7 @@ func TestSubroundStartRound_NewSubroundStartRoundShouldWork(t *testing.T) { func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() ch := make(chan bool, 1) @@ -349,7 +352,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenRoundI sr := *initSubroundStartRound() - sr.SetStatus(v1.SrStartRound, spos.SsFinished) + sr.SetStatus(bls.SrStartRound, spos.SsFinished) ok := sr.DoStartRoundConsensusCheck() assert.True(t, ok) @@ -358,11 +361,11 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenRoundI func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCurrentRoundReturnTrue(t *testing.T) { t.Parallel() - bootstrapperMock := &mock.BootstrapperStub{GetNodeStateCalled: func() common.NodeState { + bootstrapperMock := &bootstrapperStubs.BootstrapperStub{GetNodeStateCalled: func() common.NodeState { return common.NsSynchronized }} - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) sr := *initSubroundStartRoundWithContainer(container) @@ -381,11 +384,11 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCu func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenInitCurrentRoundReturnFalse(t *testing.T) { t.Parallel() - bootstrapperMock := &mock.BootstrapperStub{GetNodeStateCalled: func() common.NodeState { + bootstrapperMock := &bootstrapperStubs.BootstrapperStub{GetNodeStateCalled: func() common.NodeState { return common.NsNotSynchronized }} - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) container.SetRoundHandler(initRoundHandlerMock()) @@ -398,12 +401,12 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenInitC func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetNodeStateNotReturnSynchronized(t *testing.T) { t.Parallel() - bootstrapperMock := &mock.BootstrapperStub{} + bootstrapperMock := &bootstrapperStubs.BootstrapperStub{} bootstrapperMock.GetNodeStateCalled = func() common.NodeState { return common.NsNotSynchronized } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) srStartRound := *initSubroundStartRoundWithContainer(container) @@ -417,10 +420,10 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte, round uint64, shardId uint32, epoch uint32) ([]nodesCoordinator.Validator, error) { - return nil, err + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte, round uint64, shardId uint32, epoch uint32) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { + return nil, nil, err } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) srStartRound := *initSubroundStartRoundWithContainer(container) @@ -437,7 +440,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenMainMachineIsAct return true }, } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetNodeRedundancyHandler(nodeRedundancyMock) srStartRound := *initSubroundStartRoundWithContainer(container) @@ -455,11 +458,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t round uint64, shardId uint32, epoch uint32, - ) ([]nodesCoordinator.Validator, error) { - return make([]nodesCoordinator.Validator, 0), nil + ) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { + return nil, make([]nodesCoordinator.Validator, 0), nil } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) srStartRound := *initSubroundStartRoundWithContainer(container) @@ -471,7 +474,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenIsNotInTheConsensusGroup(t *testing.T) { t.Parallel() - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() consensusState := initConsensusState() consensusState.SetSelfPubKey(consensusState.SelfPubKey() + "X") ch := make(chan bool, 1) @@ -493,7 +496,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenTimeIsOut(t *te return time.Duration(-1) } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetRoundHandler(roundHandlerMock) srStartRound := *initSubroundStartRoundWithContainer(container) @@ -505,13 +508,13 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenTimeIsOut(t *te func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { t.Parallel() - bootstrapperMock := &mock.BootstrapperStub{} + bootstrapperMock := &bootstrapperStubs.BootstrapperStub{} bootstrapperMock.GetNodeStateCalled = func() common.NodeState { return common.NsSynchronized } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) srStartRound := *initSubroundStartRoundWithContainer(container) @@ -527,7 +530,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { t.Parallel() wasCalled := false - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} appStatusHandler := &statusHandler.AppStatusHandlerStub{ SetStringValueHandler: func(key string, value string) { @@ -542,8 +545,8 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { consensusState.SetSelfPubKey("not in consensus") sr, _ := spos.NewSubround( -1, - v1.SrStartRound, - v1.SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), "(START_ROUND)", @@ -572,7 +575,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { wasCalled := false wasIncrementCalled := false - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return string(pkBytes) == "B" @@ -596,8 +599,8 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { consensusState.SetSelfPubKey("B") sr, _ := spos.NewSubround( -1, - v1.SrStartRound, - v1.SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), "(START_ROUND)", @@ -627,7 +630,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { wasCalled := false wasIncrementCalled := false - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} appStatusHandler := &statusHandler.AppStatusHandlerStub{ SetStringValueHandler: func(key string, value string) { @@ -649,8 +652,8 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { } sr, _ := spos.NewSubround( -1, - v1.SrStartRound, - v1.SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), "(START_ROUND)", @@ -681,7 +684,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { wasMetricConsensusStateCalled := false wasMetricCountLeaderCalled := false cntMetricConsensusRoundStateCalled := 0 - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} appStatusHandler := &statusHandler.AppStatusHandlerStub{ SetStringValueHandler: func(key string, value string) { @@ -713,8 +716,8 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { consensusState.SetSelfPubKey(leader) sr, _ := spos.NewSubround( -1, - v1.SrStartRound, - v1.SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), "(START_ROUND)", @@ -746,7 +749,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { wasMetricConsensusStateCalled := false wasMetricCountLeaderCalled := false cntMetricConsensusRoundStateCalled := 0 - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} appStatusHandler := &statusHandler.AppStatusHandlerStub{ SetStringValueHandler: func(key string, value string) { @@ -781,8 +784,8 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { } sr, _ := spos.NewSubround( -1, - v1.SrStartRound, - v1.SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(85*roundTimeDuration/100), int64(95*roundTimeDuration/100), "(START_ROUND)", @@ -821,10 +824,10 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing round uint64, shardId uint32, epoch uint32, - ) ([]nodesCoordinator.Validator, error) { - return nil, err + ) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { + return nil, nil, err } - container := mock.InitConsensusCore() + container := consensusMocks.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) srStartRound := *initSubroundStartRoundWithContainer(container) From 5f82c80a593c679977f7bc46c927b239939508c3 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 20 Sep 2024 17:03:36 +0300 Subject: [PATCH 04/30] adapt v2 --- consensus/mock/sposWorkerMock.go | 7 +- consensus/spos/bls/v2/benchmark_test.go | 3 +- .../v2/benchmark_verify_signatures_test.go | 2 +- consensus/spos/bls/v2/blsSubroundsFactory.go | 51 +++++------ .../spos/bls/v2/blsSubroundsFactory_test.go | 45 +++++----- consensus/spos/bls/v2/blsWorker.go | 73 +++++++-------- consensus/spos/bls/v2/blsWorker_test.go | 65 +++++++------- consensus/spos/bls/v2/constants.go | 89 ------------------- consensus/spos/bls/v2/export_test.go | 5 -- consensus/spos/bls/v2/subroundBlock.go | 7 +- consensus/spos/bls/v2/subroundBlock_test.go | 29 +++--- consensus/spos/bls/v2/subroundEndRound.go | 29 +++--- .../spos/bls/v2/subroundEndRound_test.go | 89 ++++++++++--------- consensus/spos/bls/v2/subroundSignature.go | 3 +- .../spos/bls/v2/subroundSignature_test.go | 47 +++++----- .../spos/bls/v2/subroundStartRound_test.go | 87 +++++++++--------- consensus/spos/interface.go | 3 +- consensus/spos/worker.go | 2 +- consensus/spos/worker_test.go | 3 +- factory/interface.go | 5 +- 20 files changed, 283 insertions(+), 361 deletions(-) diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index d254b827b57..734ce65c326 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/p2p" ) @@ -27,7 +28,7 @@ type SposWorkerMock struct { DisplayStatisticsCalled func() ReceivedHeaderCalled func(headerHandler data.HeaderHandler, headerHash []byte) SetAppStatusHandlerCalled func(ash core.AppStatusHandler) error - ResetConsensusMessagesCalled func(currentHash []byte, prevHash []byte) + ResetConsensusMessagesCalled func() } // AddReceivedMessageCall - @@ -104,9 +105,9 @@ func (sposWorkerMock *SposWorkerMock) StartWorking() { } // ResetConsensusMessages - -func (sposWorkerMock *SposWorkerMock) ResetConsensusMessages(currentHash []byte, prevHash []byte) { +func (sposWorkerMock *SposWorkerMock) ResetConsensusMessages() { if sposWorkerMock.ResetConsensusMessagesCalled != nil { - sposWorkerMock.ResetConsensusMessagesCalled(currentHash, prevHash) + sposWorkerMock.ResetConsensusMessagesCalled() } } diff --git a/consensus/spos/bls/v2/benchmark_test.go b/consensus/spos/bls/v2/benchmark_test.go index 7cc8235bc84..24edc6355a7 100644 --- a/consensus/spos/bls/v2/benchmark_test.go +++ b/consensus/spos/bls/v2/benchmark_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" cryptoFactory "github.com/multiversx/mx-chain-go/factory/crypto" nodeMock "github.com/multiversx/mx-chain-go/node/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -107,7 +108,7 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number signatureSentForPks := make(map[string]struct{}) mutex := sync.Mutex{} - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ diff --git a/consensus/spos/bls/v2/benchmark_verify_signatures_test.go b/consensus/spos/bls/v2/benchmark_verify_signatures_test.go index 0190f50ea01..da27f6570e4 100644 --- a/consensus/spos/bls/v2/benchmark_verify_signatures_test.go +++ b/consensus/spos/bls/v2/benchmark_verify_signatures_test.go @@ -108,7 +108,7 @@ func BenchmarkSubroundEndRound_VerifyNodesOnAggSigFailTime(b *testing.B) { sr := initSubroundEndRoundWithContainerAndConsensusState(container, &statusHandler.AppStatusHandlerStub{}, consensusState, &dataRetrieverMocks.ThrottlerStub{}) for i := 0; i < len(sr.ConsensusGroup()); i++ { - _, err := sr.SigningHandler().CreateSignatureShareForPublicKey(dataToBeSigned, uint16(i), (*sr).EnableEpochsHandler().GetCurrentEpoch(), []byte(keys[i])) + _, err := sr.SigningHandler().CreateSignatureShareForPublicKey(dataToBeSigned, uint16(i), sr.EnableEpochsHandler().GetCurrentEpoch(), []byte(keys[i])) require.Nil(b, err) _ = sr.SetJobDone(keys[i], bls.SrSignature, true) } diff --git a/consensus/spos/bls/v2/blsSubroundsFactory.go b/consensus/spos/bls/v2/blsSubroundsFactory.go index dfb6a4050f3..977f78f14d7 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/outport" ) @@ -139,11 +140,11 @@ func (fct *factory) getTimeDuration() time.Duration { func (fct *factory) generateStartRoundSubround() error { subround, err := spos.NewSubround( -1, - SrStartRound, - SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(float64(fct.getTimeDuration())*srStartStartTime), int64(float64(fct.getTimeDuration())*srStartEndTime), - getSubroundName(SrStartRound), + bls.GetSubroundName(bls.SrStartRound), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -178,12 +179,12 @@ func (fct *factory) generateStartRoundSubround() error { func (fct *factory) generateBlockSubround() error { subround, err := spos.NewSubround( - SrStartRound, - SrBlock, - SrSignature, + bls.SrStartRound, + bls.SrBlock, + bls.SrSignature, int64(float64(fct.getTimeDuration())*srBlockStartTime), int64(float64(fct.getTimeDuration())*srBlockEndTime), - getSubroundName(SrBlock), + bls.GetSubroundName(bls.SrBlock), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -205,9 +206,9 @@ func (fct *factory) generateBlockSubround() error { return err } - fct.worker.AddReceivedMessageCall(MtBlockBodyAndHeader, subroundBlockInstance.receivedBlockBodyAndHeader) - fct.worker.AddReceivedMessageCall(MtBlockBody, subroundBlockInstance.receivedBlockBody) - fct.worker.AddReceivedMessageCall(MtBlockHeader, subroundBlockInstance.receivedBlockHeaderBeforeEquivalentProofs) + fct.worker.AddReceivedMessageCall(bls.MtBlockBodyAndHeader, subroundBlockInstance.receivedBlockBodyAndHeader) + fct.worker.AddReceivedMessageCall(bls.MtBlockBody, subroundBlockInstance.receivedBlockBody) + fct.worker.AddReceivedMessageCall(bls.MtBlockHeader, subroundBlockInstance.receivedBlockHeaderBeforeEquivalentProofs) fct.worker.AddReceivedHeaderHandler(subroundBlockInstance.receivedBlockHeader) fct.consensusCore.Chronology().AddSubround(subroundBlockInstance) @@ -216,12 +217,12 @@ func (fct *factory) generateBlockSubround() error { func (fct *factory) generateSignatureSubround() error { subround, err := spos.NewSubround( - SrBlock, - SrSignature, - SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(float64(fct.getTimeDuration())*srSignatureStartTime), int64(float64(fct.getTimeDuration())*srSignatureEndTime), - getSubroundName(SrSignature), + bls.GetSubroundName(bls.SrSignature), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -246,7 +247,7 @@ func (fct *factory) generateSignatureSubround() error { } // TODO[cleanup cns finality]: remove this - fct.worker.AddReceivedMessageCall(MtSignature, subroundSignatureObject.receivedSignature) + fct.worker.AddReceivedMessageCall(bls.MtSignature, subroundSignatureObject.receivedSignature) fct.consensusCore.Chronology().AddSubround(subroundSignatureObject) return nil @@ -254,12 +255,12 @@ func (fct *factory) generateSignatureSubround() error { func (fct *factory) generateEndRoundSubround() error { subround, err := spos.NewSubround( - SrSignature, - SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(float64(fct.getTimeDuration())*srEndStartTime), int64(float64(fct.getTimeDuration())*srEndEndTime), - getSubroundName(SrEndRound), + bls.GetSubroundName(bls.SrEndRound), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -284,9 +285,9 @@ func (fct *factory) generateEndRoundSubround() error { return err } - fct.worker.AddReceivedMessageCall(MtBlockHeaderFinalInfo, subroundEndRoundObject.receivedBlockHeaderFinalInfo) - fct.worker.AddReceivedMessageCall(MtInvalidSigners, subroundEndRoundObject.receivedInvalidSignersInfo) - fct.worker.AddReceivedMessageCall(MtSignature, subroundEndRoundObject.receivedSignature) + fct.worker.AddReceivedMessageCall(bls.MtBlockHeaderFinalInfo, subroundEndRoundObject.receivedBlockHeaderFinalInfo) + fct.worker.AddReceivedMessageCall(bls.MtInvalidSigners, subroundEndRoundObject.receivedInvalidSignersInfo) + fct.worker.AddReceivedMessageCall(bls.MtSignature, subroundEndRoundObject.receivedSignature) fct.worker.AddReceivedHeaderHandler(subroundEndRoundObject.receivedHeader) fct.consensusCore.Chronology().AddSubround(subroundEndRoundObject) @@ -296,10 +297,10 @@ func (fct *factory) generateEndRoundSubround() error { func (fct *factory) initConsensusThreshold() { pBFTThreshold := core.GetPBFTThreshold(fct.consensusState.ConsensusGroupSize()) pBFTFallbackThreshold := core.GetPBFTFallbackThreshold(fct.consensusState.ConsensusGroupSize()) - fct.consensusState.SetThreshold(SrBlock, 1) - fct.consensusState.SetThreshold(SrSignature, pBFTThreshold) - fct.consensusState.SetFallbackThreshold(SrBlock, 1) - fct.consensusState.SetFallbackThreshold(SrSignature, pBFTFallbackThreshold) + fct.consensusState.SetThreshold(bls.SrBlock, 1) + fct.consensusState.SetThreshold(bls.SrSignature, pBFTThreshold) + fct.consensusState.SetFallbackThreshold(bls.SrBlock, 1) + fct.consensusState.SetFallbackThreshold(bls.SrSignature, pBFTFallbackThreshold) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/bls/v2/blsSubroundsFactory_test.go b/consensus/spos/bls/v2/blsSubroundsFactory_test.go index babe2a19ca1..577d72f070d 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" dataRetrieverMocks "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/testscommon" @@ -57,11 +58,11 @@ func initWorker() spos.WorkerHandler { return sposWorker } -func initFactoryWithContainer(container *testscommonConsensus.ConsensusCoreMock) bls.Factory { +func initFactoryWithContainer(container *testscommonConsensus.ConsensusCoreMock) v2.Factory { worker := initWorker() consensusState := initConsensusState() - fct, _ := bls.NewSubroundsFactory( + fct, _ := v2.NewSubroundsFactory( container, consensusState, worker, @@ -75,7 +76,7 @@ func initFactoryWithContainer(container *testscommonConsensus.ConsensusCoreMock) return fct } -func initFactory() bls.Factory { +func initFactory() v2.Factory { container := testscommonConsensus.InitConsensusCore() return initFactoryWithContainer(container) } @@ -111,7 +112,7 @@ func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { consensusState := initConsensusState() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( nil, consensusState, worker, @@ -132,7 +133,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, nil, worker, @@ -155,7 +156,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { worker := initWorker() container.SetBlockchain(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -178,7 +179,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { worker := initWorker() container.SetBlockProcessor(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -201,7 +202,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { worker := initWorker() container.SetBootStrapper(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -224,7 +225,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { worker := initWorker() container.SetChronology(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -247,7 +248,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { worker := initWorker() container.SetHasher(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -270,7 +271,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { worker := initWorker() container.SetMarshalizer(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -293,7 +294,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { worker := initWorker() container.SetMultiSignerContainer(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -316,7 +317,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { worker := initWorker() container.SetRoundHandler(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -339,7 +340,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { worker := initWorker() container.SetShardCoordinator(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -362,7 +363,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { worker := initWorker() container.SetSyncTimer(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -385,7 +386,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { worker := initWorker() container.SetValidatorGroupSelector(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -406,7 +407,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { consensusState := initConsensusState() container := testscommonConsensus.InitConsensusCore() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, nil, @@ -428,7 +429,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -450,7 +451,7 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -462,7 +463,7 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { ) assert.Nil(t, fct) - assert.Equal(t, bls.ErrNilSentSignatureTracker, err) + assert.Equal(t, v2.ErrNilSentSignatureTracker, err) } func TestFactory_NewFactoryNilThrottlerShouldFail(t *testing.T) { @@ -472,7 +473,7 @@ func TestFactory_NewFactoryNilThrottlerShouldFail(t *testing.T) { container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -502,7 +503,7 @@ func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, diff --git a/consensus/spos/bls/v2/blsWorker.go b/consensus/spos/bls/v2/blsWorker.go index 79d1cbb24c0..c627ff1af76 100644 --- a/consensus/spos/bls/v2/blsWorker.go +++ b/consensus/spos/bls/v2/blsWorker.go @@ -3,6 +3,7 @@ package v2 import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) // peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by @@ -40,12 +41,12 @@ func NewConsensusService() (*worker, error) { // InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { receivedMessages := make(map[consensus.MessageType][]*consensus.Message) - receivedMessages[MtBlockBodyAndHeader] = make([]*consensus.Message, 0) - receivedMessages[MtBlockBody] = make([]*consensus.Message, 0) - receivedMessages[MtBlockHeader] = make([]*consensus.Message, 0) - receivedMessages[MtSignature] = make([]*consensus.Message, 0) - receivedMessages[MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) - receivedMessages[MtInvalidSigners] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockBodyAndHeader] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockBody] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockHeader] = make([]*consensus.Message, 0) + receivedMessages[bls.MtSignature] = make([]*consensus.Message, 0) + receivedMessages[bls.MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) + receivedMessages[bls.MtInvalidSigners] = make([]*consensus.Message, 0) return receivedMessages } @@ -57,71 +58,71 @@ func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { // GetStringValue gets the name of the messageType func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { - return getStringValue(messageType) + return bls.GetStringValue(messageType) } // GetSubroundName gets the subround name for the subround id provided func (wrk *worker) GetSubroundName(subroundId int) string { - return getSubroundName(subroundId) + return bls.GetSubroundName(subroundId) } // IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { - return msgType == MtBlockBodyAndHeader + return msgType == bls.MtBlockBodyAndHeader } // IsMessageWithBlockBody returns if the current messageType is about block body func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { - return msgType == MtBlockBody + return msgType == bls.MtBlockBody } // IsMessageWithBlockHeader returns if the current messageType is about block header func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { - return msgType == MtBlockHeader + return msgType == bls.MtBlockHeader } // IsMessageWithSignature returns if the current messageType is about signature func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { - return msgType == MtSignature + return msgType == bls.MtSignature } // IsMessageWithFinalInfo returns if the current messageType is about header final info func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { - return msgType == MtBlockHeaderFinalInfo + return msgType == bls.MtBlockHeaderFinalInfo } // IsMessageWithInvalidSigners returns if the current messageType is about invalid signers func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { - return msgType == MtInvalidSigners + return msgType == bls.MtInvalidSigners } // IsMessageTypeValid returns if the current messageType is valid func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { - isMessageTypeValid := msgType == MtBlockBodyAndHeader || - msgType == MtBlockBody || - msgType == MtBlockHeader || - msgType == MtSignature || - msgType == MtBlockHeaderFinalInfo || - msgType == MtInvalidSigners + isMessageTypeValid := msgType == bls.MtBlockBodyAndHeader || + msgType == bls.MtBlockBody || + msgType == bls.MtBlockHeader || + msgType == bls.MtSignature || + msgType == bls.MtBlockHeaderFinalInfo || + msgType == bls.MtInvalidSigners return isMessageTypeValid } // IsSubroundSignature returns if the current subround is about signature func (wrk *worker) IsSubroundSignature(subroundId int) bool { - return subroundId == SrSignature + return subroundId == bls.SrSignature } // IsSubroundStartRound returns if the current subround is about start round func (wrk *worker) IsSubroundStartRound(subroundId int) bool { - return subroundId == SrStartRound + return subroundId == bls.SrStartRound } // GetMessageRange provides the MessageType range used in checks by the consensus func (wrk *worker) GetMessageRange() []consensus.MessageType { var v []consensus.MessageType - for i := MtBlockBodyAndHeader; i <= MtInvalidSigners; i++ { + for i := bls.MtBlockBodyAndHeader; i <= bls.MtInvalidSigners; i++ { v = append(v, i) } @@ -131,18 +132,18 @@ func (wrk *worker) GetMessageRange() []consensus.MessageType { // CanProceed returns if the current messageType can proceed further if previous subrounds finished func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { switch msgType { - case MtBlockBodyAndHeader: - return consensusState.Status(SrStartRound) == spos.SsFinished - case MtBlockBody: - return consensusState.Status(SrStartRound) == spos.SsFinished - case MtBlockHeader: - return consensusState.Status(SrStartRound) == spos.SsFinished - case MtSignature: - return consensusState.Status(SrBlock) == spos.SsFinished - case MtBlockHeaderFinalInfo: - return consensusState.Status(SrSignature) == spos.SsFinished - case MtInvalidSigners: - return consensusState.Status(SrSignature) == spos.SsFinished + case bls.MtBlockBodyAndHeader: + return consensusState.Status(bls.SrStartRound) == spos.SsFinished + case bls.MtBlockBody: + return consensusState.Status(bls.SrStartRound) == spos.SsFinished + case bls.MtBlockHeader: + return consensusState.Status(bls.SrStartRound) == spos.SsFinished + case bls.MtSignature: + return consensusState.Status(bls.SrBlock) == spos.SsFinished + case bls.MtBlockHeaderFinalInfo: + return consensusState.Status(bls.SrSignature) == spos.SsFinished + case bls.MtInvalidSigners: + return consensusState.Status(bls.SrSignature) == spos.SsFinished } return false @@ -150,7 +151,7 @@ func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType conse // GetMaxNumOfMessageTypeAccepted returns the maximum number of accepted consensus message types per round, per public key func (wrk *worker) GetMaxNumOfMessageTypeAccepted(msgType consensus.MessageType) uint32 { - if msgType == MtSignature { + if msgType == bls.MtSignature { return maxNumOfMessageTypeSignatureAccepted } diff --git a/consensus/spos/bls/v2/blsWorker_test.go b/consensus/spos/bls/v2/blsWorker_test.go index 1f8377ef266..334ed6bbf43 100644 --- a/consensus/spos/bls/v2/blsWorker_test.go +++ b/consensus/spos/bls/v2/blsWorker_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" ) @@ -157,7 +158,7 @@ func createConsensusStateWithNodes(eligibleNodesPubKeys map[string]struct{}, con func TestWorker_NewConsensusServiceShouldWork(t *testing.T) { t.Parallel() - service, err := bls.NewConsensusService() + service, err := v2.NewConsensusService() assert.Nil(t, err) assert.False(t, check.IfNil(service)) } @@ -165,7 +166,7 @@ func TestWorker_NewConsensusServiceShouldWork(t *testing.T) { func TestWorker_InitReceivedMessagesShouldWork(t *testing.T) { t.Parallel() - bnService, _ := bls.NewConsensusService() + bnService, _ := v2.NewConsensusService() messages := bnService.InitReceivedMessages() receivedMessages := make(map[consensus.MessageType][]*consensus.Message) @@ -189,7 +190,7 @@ func TestWorker_GetMessageRangeShouldWork(t *testing.T) { t.Parallel() v := make([]consensus.MessageType, 0) - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() messagesRange := blsService.GetMessageRange() assert.NotNil(t, messagesRange) @@ -207,7 +208,7 @@ func TestWorker_GetMessageRangeShouldWork(t *testing.T) { func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyAndHeaderShouldWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) @@ -219,7 +220,7 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyAndHeaderShouldW func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyAndHeaderShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) @@ -231,7 +232,7 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyAndHeaderShou func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyShouldWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) @@ -243,7 +244,7 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyShouldWork(t *te func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) @@ -255,7 +256,7 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyShouldNotWork func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockHeaderShouldWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) @@ -267,7 +268,7 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockHeaderShouldWork(t * func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockHeaderShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) @@ -279,7 +280,7 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockHeaderShouldNotWo func TestWorker_CanProceedWithSrBlockFinishedForMtBlockHeaderShouldWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrBlock, spos.SsFinished) @@ -291,7 +292,7 @@ func TestWorker_CanProceedWithSrBlockFinishedForMtBlockHeaderShouldWork(t *testi func TestWorker_CanProceedWithSrBlockRoundNotFinishedForMtBlockHeaderShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrBlock, spos.SsNotFinished) @@ -303,7 +304,7 @@ func TestWorker_CanProceedWithSrBlockRoundNotFinishedForMtBlockHeaderShouldNotWo func TestWorker_CanProceedWithSrSignatureFinishedForMtBlockHeaderFinalInfoShouldWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrSignature, spos.SsFinished) @@ -315,7 +316,7 @@ func TestWorker_CanProceedWithSrSignatureFinishedForMtBlockHeaderFinalInfoShould func TestWorker_CanProceedWithSrSignatureRoundNotFinishedForMtBlockHeaderFinalInfoShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() consensusState.SetStatus(bls.SrSignature, spos.SsNotFinished) @@ -327,7 +328,7 @@ func TestWorker_CanProceedWithSrSignatureRoundNotFinishedForMtBlockHeaderFinalIn func TestWorker_CanProceedWitUnkownMessageTypeShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := bls.NewConsensusService() + blsService, _ := v2.NewConsensusService() consensusState := initConsensusState() canProceed := blsService.CanProceed(consensusState, -1) @@ -337,7 +338,7 @@ func TestWorker_CanProceedWitUnkownMessageTypeShouldNotWork(t *testing.T) { func TestWorker_GetSubroundName(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() r := service.GetSubroundName(bls.SrStartRound) assert.Equal(t, "(START_ROUND)", r) @@ -354,7 +355,7 @@ func TestWorker_GetSubroundName(t *testing.T) { func TestWorker_GetStringValue(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() r := service.GetStringValue(bls.MtBlockBodyAndHeader) assert.Equal(t, bls.BlockBodyAndHeaderStringValue, r) @@ -375,7 +376,7 @@ func TestWorker_GetStringValue(t *testing.T) { func TestWorker_IsMessageWithBlockBodyAndHeader(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsMessageWithBlockBodyAndHeader(bls.MtBlockBody) assert.False(t, ret) @@ -390,7 +391,7 @@ func TestWorker_IsMessageWithBlockBodyAndHeader(t *testing.T) { func TestWorker_IsMessageWithBlockBody(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsMessageWithBlockBody(bls.MtBlockHeader) assert.False(t, ret) @@ -402,7 +403,7 @@ func TestWorker_IsMessageWithBlockBody(t *testing.T) { func TestWorker_IsMessageWithBlockHeader(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsMessageWithBlockHeader(bls.MtBlockBody) assert.False(t, ret) @@ -414,7 +415,7 @@ func TestWorker_IsMessageWithBlockHeader(t *testing.T) { func TestWorker_IsMessageWithSignature(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsMessageWithSignature(bls.MtBlockBodyAndHeader) assert.False(t, ret) @@ -426,7 +427,7 @@ func TestWorker_IsMessageWithSignature(t *testing.T) { func TestWorker_IsMessageWithFinalInfo(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsMessageWithFinalInfo(bls.MtSignature) assert.False(t, ret) @@ -438,7 +439,7 @@ func TestWorker_IsMessageWithFinalInfo(t *testing.T) { func TestWorker_IsMessageWithInvalidSigners(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsMessageWithInvalidSigners(bls.MtBlockHeaderFinalInfo) assert.False(t, ret) @@ -450,7 +451,7 @@ func TestWorker_IsMessageWithInvalidSigners(t *testing.T) { func TestWorker_IsSubroundSignature(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsSubroundSignature(bls.SrEndRound) assert.False(t, ret) @@ -462,7 +463,7 @@ func TestWorker_IsSubroundSignature(t *testing.T) { func TestWorker_IsSubroundStartRound(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsSubroundStartRound(bls.SrSignature) assert.False(t, ret) @@ -474,7 +475,7 @@ func TestWorker_IsSubroundStartRound(t *testing.T) { func TestWorker_IsMessageTypeValid(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() ret := service.IsMessageTypeValid(bls.MtBlockBody) assert.True(t, ret) @@ -486,19 +487,19 @@ func TestWorker_IsMessageTypeValid(t *testing.T) { func TestWorker_GetMaxNumOfMessageTypeAccepted(t *testing.T) { t.Parallel() - service, _ := bls.NewConsensusService() + service, _ := v2.NewConsensusService() t.Run("message type signature", func(t *testing.T) { t.Parallel() - assert.Equal(t, bls.MaxNumOfMessageTypeSignatureAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtSignature)) + assert.Equal(t, v2.MaxNumOfMessageTypeSignatureAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtSignature)) }) t.Run("other message types", func(t *testing.T) { t.Parallel() - assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtUnknown)) - assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBody)) - assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeader)) - assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBodyAndHeader)) - assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeaderFinalInfo)) + assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtUnknown)) + assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBody)) + assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeader)) + assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBodyAndHeader)) + assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeaderFinalInfo)) }) } diff --git a/consensus/spos/bls/v2/constants.go b/consensus/spos/bls/v2/constants.go index a395f506ddd..ccfd6c27395 100644 --- a/consensus/spos/bls/v2/constants.go +++ b/consensus/spos/bls/v2/constants.go @@ -2,41 +2,10 @@ package v2 import ( logger "github.com/multiversx/mx-chain-logger-go" - - "github.com/multiversx/mx-chain-go/consensus" ) var log = logger.GetOrCreate("consensus/spos/bls") -const ( - // SrStartRound defines ID of Subround "Start round" - SrStartRound = iota - // SrBlock defines ID of Subround "block" - SrBlock - // SrSignature defines ID of Subround "signature" - SrSignature - // SrEndRound defines ID of Subround "End round" - SrEndRound -) - -const ( - // MtUnknown defines ID of a message that has unknown data inside - MtUnknown consensus.MessageType = iota - // MtBlockBodyAndHeader defines ID of a message that has a block body and a block header inside - MtBlockBodyAndHeader - // MtBlockBody defines ID of a message that has a block body inside - MtBlockBody - // MtBlockHeader defines ID of a message that has a block header inside - MtBlockHeader - // MtSignature defines ID of a message that has a Signature inside - MtSignature - // MtBlockHeaderFinalInfo defines ID of a message that has a block header final info inside - // (aggregate signature, bitmap and seal leader signature for the proposed and accepted header) - MtBlockHeaderFinalInfo - // MtInvalidSigners defines ID of a message that has a invalid signers p2p messages inside - MtInvalidSigners -) - // waitingAllSigsMaxTimeThreshold specifies the max allocated time for waiting all signatures from the total time of the subround signature const waitingAllSigsMaxTimeThreshold = 0.5 @@ -66,61 +35,3 @@ const srEndStartTime = 0.85 // srEndEndTime specifies the end time, from the total time of the round, of Subround End const srEndEndTime = 0.95 - -const ( - // BlockBodyAndHeaderStringValue represents the string to be used to identify a block body and a block header - BlockBodyAndHeaderStringValue = "(BLOCK_BODY_AND_HEADER)" - - // BlockBodyStringValue represents the string to be used to identify a block body - BlockBodyStringValue = "(BLOCK_BODY)" - - // BlockHeaderStringValue represents the string to be used to identify a block header - BlockHeaderStringValue = "(BLOCK_HEADER)" - - // BlockSignatureStringValue represents the string to be used to identify a block's signature - BlockSignatureStringValue = "(SIGNATURE)" - - // BlockHeaderFinalInfoStringValue represents the string to be used to identify a block's header final info - BlockHeaderFinalInfoStringValue = "(FINAL_INFO)" - - // BlockUnknownStringValue represents the string to be used to identify an unknown block - BlockUnknownStringValue = "(UNKNOWN)" - - // BlockDefaultStringValue represents the message to identify a message that is undefined - BlockDefaultStringValue = "Undefined message type" -) - -func getStringValue(msgType consensus.MessageType) string { - switch msgType { - case MtBlockBodyAndHeader: - return BlockBodyAndHeaderStringValue - case MtBlockBody: - return BlockBodyStringValue - case MtBlockHeader: - return BlockHeaderStringValue - case MtSignature: - return BlockSignatureStringValue - case MtBlockHeaderFinalInfo: - return BlockHeaderFinalInfoStringValue - case MtUnknown: - return BlockUnknownStringValue - default: - return BlockDefaultStringValue - } -} - -// getSubroundName returns the name of each Subround from a given Subround ID -func getSubroundName(subroundId int) string { - switch subroundId { - case SrStartRound: - return "(START_ROUND)" - case SrBlock: - return "(BLOCK)" - case SrSignature: - return "(SIGNATURE)" - case SrEndRound: - return "(END_ROUND)" - default: - return "Undefined subround" - } -} diff --git a/consensus/spos/bls/v2/export_test.go b/consensus/spos/bls/v2/export_test.go index 33bef8d7328..33f0ddadb3a 100644 --- a/consensus/spos/bls/v2/export_test.go +++ b/consensus/spos/bls/v2/export_test.go @@ -358,11 +358,6 @@ func (sr *subroundEndRound) GetSentSignatureTracker() spos.SentSignaturesTracker return sr.sentSignatureTracker } -// GetStringValue calls the unexported getStringValue function -func GetStringValue(messageType consensus.MessageType) string { - return getStringValue(messageType) -} - // ChangeEpoch calls the unexported changeEpoch function func (sr *subroundStartRound) ChangeEpoch(epoch uint32) { sr.changeEpoch(epoch) diff --git a/consensus/spos/bls/v2/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go index 7131415a0c7..1db0ed87ae2 100644 --- a/consensus/spos/bls/v2/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) // maxAllowedSizeInBytes defines how many bytes are allowed as payload in a message @@ -221,7 +222,7 @@ func (sr *subroundBlock) sendHeaderAndBlockBody( marshalizedHeader, []byte(leader), nil, - int(MtBlockBodyAndHeader), + int(bls.MtBlockBodyAndHeader), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -266,7 +267,7 @@ func (sr *subroundBlock) sendBlockBody( nil, []byte(leader), nil, - int(MtBlockBody), + int(bls.MtBlockBody), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -342,7 +343,7 @@ func (sr *subroundBlock) sendBlockHeaderBeforeEquivalentProofs( marshalledHeader, []byte(leader), nil, - int(MtBlockHeader), + int(bls.MtBlockHeader), sr.RoundHandler().Index(), sr.ChainID(), nil, diff --git a/consensus/spos/bls/v2/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go index 9f96ed9af4e..209d10d15bb 100644 --- a/consensus/spos/bls/v2/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" @@ -62,20 +63,20 @@ func createDefaultHeader() *block.Header { } } -func defaultSubroundBlockFromSubround(sr *spos.Subround) (bls.SubroundBlock, error) { - srBlock, err := bls.NewSubroundBlock( +func defaultSubroundBlockFromSubround(sr *spos.Subround) (v2.SubroundBlock, error) { + srBlock, err := v2.NewSubroundBlock( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &mock.SposWorkerMock{}, ) return srBlock, err } -func defaultSubroundBlockWithoutErrorFromSubround(sr *spos.Subround) bls.SubroundBlock { - srBlock, _ := bls.NewSubroundBlock( +func defaultSubroundBlockWithoutErrorFromSubround(sr *spos.Subround) v2.SubroundBlock { + srBlock, _ := v2.NewSubroundBlock( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &mock.SposWorkerMock{}, ) @@ -86,7 +87,7 @@ func initSubroundBlock( blockChain data.ChainHandler, container *consensusMocks.ConsensusCoreMock, appStatusHandler core.AppStatusHandler, -) bls.SubroundBlock { +) v2.SubroundBlock { if blockChain == nil { blockChain = &testscommon.ChainHandlerStub{ GetCurrentBlockHeaderCalled: func() data.HeaderHandler { @@ -127,7 +128,7 @@ func createConsensusContainers() []*consensusMocks.ConsensusCoreMock { func initSubroundBlockWithBlockProcessor( bp *testscommon.BlockProcessorStub, container *consensusMocks.ConsensusCoreMock, -) bls.SubroundBlock { +) v2.SubroundBlock { blockChain := &testscommon.ChainHandlerStub{ GetGenesisHeaderCalled: func() data.HeaderHandler { return &block.Header{ @@ -154,9 +155,9 @@ func initSubroundBlockWithBlockProcessor( func TestSubroundBlock_NewSubroundBlockNilSubroundShouldFail(t *testing.T) { t.Parallel() - srBlock, err := bls.NewSubroundBlock( + srBlock, err := v2.NewSubroundBlock( nil, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &mock.SposWorkerMock{}, ) assert.Nil(t, srBlock) @@ -308,9 +309,9 @@ func TestSubroundBlock_NewSubroundBlockNilWorkerShouldFail(t *testing.T) { ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - srBlock, err := bls.NewSubroundBlock( + srBlock, err := v2.NewSubroundBlock( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, nil, ) assert.Nil(t, srBlock) @@ -483,9 +484,9 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { ch := make(chan bool, 1) baseSr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - srBlock, _ := bls.NewSubroundBlock( + srBlock, _ := v2.NewSubroundBlock( baseSr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &mock.SposWorkerMock{}, ) sr := *srBlock diff --git a/consensus/spos/bls/v2/subroundEndRound.go b/consensus/spos/bls/v2/subroundEndRound.go index c142f1e4da1..fbe58f4c6b4 100644 --- a/consensus/spos/bls/v2/subroundEndRound.go +++ b/consensus/spos/bls/v2/subroundEndRound.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/headerCheck" ) @@ -453,7 +454,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { } func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandler, bool) { - bitmap := sr.GenerateBitmap(SrSignature) + bitmap := sr.GenerateBitmap(bls.SrSignature) err := sr.checkSignaturesValidity(bitmap) if err != nil { log.Debug("sendFinalInfo.checkSignaturesValidity", "error", err.Error()) @@ -590,7 +591,7 @@ func (sr *subroundEndRound) verifySignature(i int, pk string, sigShare []byte) e err := sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.Header.GetEpoch()) if err != nil { log.Trace("VerifySignatureShare returned an error: ", err) - errSetJob := sr.SetJobDone(pk, SrSignature, false) + errSetJob := sr.SetJobDone(pk, bls.SrSignature, false) if errSetJob != nil { return errSetJob } @@ -621,7 +622,7 @@ func (sr *subroundEndRound) verifyNodesOnAggSigFail(ctx context.Context) ([]stri } for i, pk := range pubKeys { - isJobDone, err := sr.JobDone(pk, SrSignature) + isJobDone, err := sr.JobDone(pk, bls.SrSignature) if err != nil || !isJobDone { continue } @@ -708,8 +709,8 @@ func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, } func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) { - threshold := sr.Threshold(SrSignature) - numValidSigShares := sr.ComputeSize(SrSignature) + threshold := sr.Threshold(bls.SrSignature) + numValidSigShares := sr.ComputeSize(bls.SrSignature) if check.IfNil(sr.Header) { return nil, nil, spos.ErrNilHeader @@ -720,7 +721,7 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) spos.ErrInvalidNumSigShares, numValidSigShares, threshold) } - bitmap := sr.GenerateBitmap(SrSignature) + bitmap := sr.GenerateBitmap(bls.SrSignature) err := sr.checkSignaturesValidity(bitmap) if err != nil { return nil, nil, err @@ -747,7 +748,7 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfoForKey(signature [] nil, pubKey, nil, - int(MtBlockHeaderFinalInfo), + int(bls.MtBlockHeaderFinalInfo), sr.RoundHandler().Index(), sr.ChainID(), bitmap, @@ -808,7 +809,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by nil, sender, nil, - int(MtInvalidSigners), + int(bls.MtInvalidSigners), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -1089,7 +1090,7 @@ func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { consensusGroup := sr.ConsensusGroup() signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, bitmap) for _, pubKey := range signers { - isSigJobDone, err := sr.JobDone(pubKey, SrSignature) + isSigJobDone, err := sr.JobDone(pubKey, bls.SrSignature) if err != nil { return err } @@ -1305,7 +1306,7 @@ func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consens return false } - err = sr.SetJobDone(node, SrSignature, true) + err = sr.SetJobDone(node, bls.SrSignature, true) if err != nil { log.Debug("receivedSignature.SetJobDone", "node", pkForLogs, @@ -1324,9 +1325,9 @@ func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consens } func (sr *subroundEndRound) checkReceivedSignatures() bool { - threshold := sr.Threshold(SrSignature) + threshold := sr.Threshold(bls.SrSignature) if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.Header) { - threshold = sr.FallbackThreshold(SrSignature) + threshold = sr.FallbackThreshold(bls.SrSignature) log.Warn("subroundEndRound.checkReceivedSignatures: fallback validation has been applied", "minimum number of signatures required", threshold, "actual number of signatures received", sr.getNumOfSignaturesCollected(), @@ -1338,7 +1339,7 @@ func (sr *subroundEndRound) checkReceivedSignatures() bool { isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.WaitingAllSignaturesTimeOut) - isSelfJobDone := sr.IsSelfJobDone(SrSignature) + isSelfJobDone := sr.IsSelfJobDone(bls.SrSignature) shouldStopWaitingSignatures := isSelfJobDone && isSignatureCollectionDone if shouldStopWaitingSignatures { @@ -1359,7 +1360,7 @@ func (sr *subroundEndRound) getNumOfSignaturesCollected() int { for i := 0; i < len(sr.ConsensusGroup()); i++ { node := sr.ConsensusGroup()[i] - isSignJobDone, err := sr.JobDone(node, SrSignature) + isSignJobDone, err := sr.JobDone(node, bls.SrSignature) if err != nil { log.Debug("getNumOfSignaturesCollected.JobDone", "node", node, diff --git a/consensus/spos/bls/v2/subroundEndRound_test.go b/consensus/spos/bls/v2/subroundEndRound_test.go index 6c8f448cd80..98edb65e825 100644 --- a/consensus/spos/bls/v2/subroundEndRound_test.go +++ b/consensus/spos/bls/v2/subroundEndRound_test.go @@ -22,6 +22,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" dataRetrieverMocks "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/p2p" @@ -37,7 +38,7 @@ import ( func initSubroundEndRoundWithContainer( container *consensusMocks.ConsensusCoreMock, appStatusHandler core.AppStatusHandler, -) bls.SubroundEndRound { +) v2.SubroundEndRound { ch := make(chan bool, 1) consensusState := initConsensusStateWithNodesCoordinator(container.NodesCoordinator()) sr, _ := spos.NewSubround( @@ -59,9 +60,9 @@ func initSubroundEndRoundWithContainer( Header: createDefaultHeader(), } - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, appStatusHandler, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -76,7 +77,7 @@ func initSubroundEndRoundWithContainerAndConsensusState( appStatusHandler core.AppStatusHandler, consensusState *spos.ConsensusState, signatureThrottler core.Throttler, -) bls.SubroundEndRound { +) v2.SubroundEndRound { ch := make(chan bool, 1) sr, _ := spos.NewSubround( bls.SrSignature, @@ -97,9 +98,9 @@ func initSubroundEndRoundWithContainerAndConsensusState( Header: createDefaultHeader(), } - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, appStatusHandler, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -109,7 +110,7 @@ func initSubroundEndRoundWithContainerAndConsensusState( return srEndRound } -func initSubroundEndRound(appStatusHandler core.AppStatusHandler) bls.SubroundEndRound { +func initSubroundEndRound(appStatusHandler core.AppStatusHandler) v2.SubroundEndRound { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, appStatusHandler) sr.Header = &block.HeaderV2{ @@ -143,9 +144,9 @@ func TestNewSubroundEndRound(t *testing.T) { t.Run("nil subround should error", func(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( nil, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -158,9 +159,9 @@ func TestNewSubroundEndRound(t *testing.T) { t.Run("nil app status handler should error", func(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, nil, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -173,9 +174,9 @@ func TestNewSubroundEndRound(t *testing.T) { t.Run("nil sent signatures tracker should error", func(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, nil, &mock.SposWorkerMock{}, @@ -183,14 +184,14 @@ func TestNewSubroundEndRound(t *testing.T) { ) assert.Nil(t, srEndRound) - assert.Equal(t, bls.ErrNilSentSignatureTracker, err) + assert.Equal(t, v2.ErrNilSentSignatureTracker, err) }) t.Run("nil worker should error", func(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, nil, @@ -225,9 +226,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. &statusHandler.AppStatusHandlerStub{}, ) container.SetBlockchain(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -261,9 +262,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test &statusHandler.AppStatusHandlerStub{}, ) container.SetBlockProcessor(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -298,9 +299,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test ) sr.ConsensusState = nil - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -334,9 +335,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t &statusHandler.AppStatusHandlerStub{}, ) container.SetMultiSignerContainer(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -370,9 +371,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin &statusHandler.AppStatusHandlerStub{}, ) container.SetRoundHandler(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -406,9 +407,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T &statusHandler.AppStatusHandlerStub{}, ) container.SetSyncTimer(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -442,9 +443,9 @@ func TestSubroundEndRound_NewSubroundEndRoundNilThrottlerShouldFail(t *testing.T &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -478,9 +479,9 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -1072,9 +1073,9 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -1218,9 +1219,9 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { Header: createDefaultHeader(), } - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -1579,9 +1580,9 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { Header: createDefaultHeader(), } - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -1747,9 +1748,9 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -1871,9 +1872,9 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, @@ -2239,9 +2240,9 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, diff --git a/consensus/spos/bls/v2/subroundSignature.go b/consensus/spos/bls/v2/subroundSignature.go index dfcf3cfcc8c..0e280aee8dc 100644 --- a/consensus/spos/bls/v2/subroundSignature.go +++ b/consensus/spos/bls/v2/subroundSignature.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) const timeSpentBetweenChecks = time.Millisecond @@ -122,7 +123,7 @@ func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte nil, pkBytes, nil, - int(MtSignature), + int(bls.MtSignature), sr.RoundHandler().Index(), sr.ChainID(), nil, diff --git a/consensus/spos/bls/v2/subroundSignature_test.go b/consensus/spos/bls/v2/subroundSignature_test.go index 5b152eca937..f0c8dc00644 100644 --- a/consensus/spos/bls/v2/subroundSignature_test.go +++ b/consensus/spos/bls/v2/subroundSignature_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" dataRetrieverMock "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" @@ -28,7 +29,7 @@ import ( const setThresholdJobsDone = "threshold" -func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreMock) bls.SubroundSignature { +func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreMock) v2.SubroundSignature { consensusState := initConsensusState() ch := make(chan bool, 1) @@ -48,7 +49,7 @@ func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreM &statusHandler.AppStatusHandlerStub{}, ) - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -59,7 +60,7 @@ func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreM return srSignature } -func initSubroundSignature() bls.SubroundSignature { +func initSubroundSignature() v2.SubroundSignature { container := consensusMocks.InitConsensusCore() return initSubroundSignatureWithContainer(container) } @@ -90,7 +91,7 @@ func TestNewSubroundSignature(t *testing.T) { t.Run("nil subround should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( nil, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -104,7 +105,7 @@ func TestNewSubroundSignature(t *testing.T) { t.Run("nil worker should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -118,7 +119,7 @@ func TestNewSubroundSignature(t *testing.T) { t.Run("nil app status handler should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, nil, &testscommon.SentSignatureTrackerStub{}, @@ -132,7 +133,7 @@ func TestNewSubroundSignature(t *testing.T) { t.Run("nil sent signatures tracker should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, nil, @@ -141,13 +142,13 @@ func TestNewSubroundSignature(t *testing.T) { ) assert.Nil(t, srSignature) - assert.Equal(t, bls.ErrNilSentSignatureTracker, err) + assert.Equal(t, v2.ErrNilSentSignatureTracker, err) }) t.Run("nil signatureThrottler should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -184,7 +185,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te ) sr.ConsensusState = nil - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -219,7 +220,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) &statusHandler.AppStatusHandlerStub{}, ) container.SetHasher(nil) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -254,7 +255,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail &statusHandler.AppStatusHandlerStub{}, ) container.SetMultiSignerContainer(nil) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -290,7 +291,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test ) container.SetRoundHandler(nil) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -325,7 +326,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing &statusHandler.AppStatusHandlerStub{}, ) container.SetSyncTimer(nil) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -360,7 +361,7 @@ func TestSubroundSignature_NewSubroundSignatureNilAppStatusHandlerShouldFail(t * &statusHandler.AppStatusHandlerStub{}, ) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, nil, &testscommon.SentSignatureTrackerStub{}, @@ -395,7 +396,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -546,7 +547,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { signatureSentForPks := make(map[string]struct{}) mutex := sync.Mutex{} - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -653,7 +654,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { signatureSentForPks := make(map[string]struct{}) mutex := sync.Mutex{} - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -761,7 +762,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { sr.Header = &block.Header{} signatureSentForPks := make(map[string]struct{}) - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -828,7 +829,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { sr.Header = &block.Header{} signatureSentForPks := make(map[string]struct{}) - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -896,7 +897,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { signatureSentForPks := make(map[string]struct{}) varCalled := false - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -962,7 +963,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { signatureSentForPks := make(map[string]struct{}) mutex := sync.Mutex{} - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -1060,7 +1061,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, diff --git a/consensus/spos/bls/v2/subroundStartRound_test.go b/consensus/spos/bls/v2/subroundStartRound_test.go index b0bd4bc9a26..ba042643986 100644 --- a/consensus/spos/bls/v2/subroundStartRound_test.go +++ b/consensus/spos/bls/v2/subroundStartRound_test.go @@ -9,6 +9,7 @@ import ( outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/stretchr/testify/require" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" processMock "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" "github.com/multiversx/mx-chain-go/testscommon/consensus" @@ -28,10 +29,10 @@ import ( var expErr = fmt.Errorf("expected error") -func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (bls.SubroundStartRound, error) { - startRound, err := bls.NewSubroundStartRound( +func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (v2.SubroundStartRound, error) { + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -39,10 +40,10 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (bls.SubroundStart return startRound, err } -func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) bls.SubroundStartRound { - startRound, _ := bls.NewSubroundStartRound( +func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) v2.SubroundStartRound { + startRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -73,13 +74,13 @@ func defaultSubround( ) } -func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) bls.SubroundStartRound { +func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) v2.SubroundStartRound { consensusState := initConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -87,7 +88,7 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) bl return srStartRound } -func initSubroundStartRound() bls.SubroundStartRound { +func initSubroundStartRound() v2.SubroundStartRound { container := consensus.InitConsensusCore() return initSubroundStartRoundWithContainer(container) } @@ -117,9 +118,9 @@ func TestNewSubroundStartRound(t *testing.T) { t.Run("nil subround should error", func(t *testing.T) { t.Parallel() - srStartRound, err := bls.NewSubroundStartRound( + srStartRound, err := v2.NewSubroundStartRound( nil, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -130,22 +131,22 @@ func TestNewSubroundStartRound(t *testing.T) { t.Run("nil sent signatures tracker should error", func(t *testing.T) { t.Parallel() - srStartRound, err := bls.NewSubroundStartRound( + srStartRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, nil, &mock.SposWorkerMock{}, ) assert.Nil(t, srStartRound) - assert.Equal(t, bls.ErrNilSentSignatureTracker, err) + assert.Equal(t, v2.ErrNilSentSignatureTracker, err) }) t.Run("nil worker should error", func(t *testing.T) { t.Parallel() - srStartRound, err := bls.NewSubroundStartRound( + srStartRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, nil, ) @@ -528,9 +529,9 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -580,9 +581,9 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -631,9 +632,9 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -693,9 +694,9 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -759,9 +760,9 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -808,9 +809,9 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldErrNilHeader(t *test container.SetBlockchain(chainHandlerMock) sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -835,9 +836,9 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenResetErr(t *tes container.SetSigningHandler(signingHandlerMock) sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -871,9 +872,9 @@ func TestSubroundStartRound_IndexRoundIfNeededFailShardIdForEpoch(t *testing.T) sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -915,9 +916,9 @@ func TestSubroundStartRound_IndexRoundIfNeededFailGetValidatorsIndexes(t *testin sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -954,9 +955,9 @@ func TestSubroundStartRound_IndexRoundIfNeededShouldFullyWork(t *testing.T) { sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -997,9 +998,9 @@ func TestSubroundStartRound_IndexRoundIfNeededDifferentShardIdFail(t *testing.T) sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -1049,9 +1050,9 @@ func TestSubroundStartRound_changeEpoch(t *testing.T) { sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) @@ -1078,9 +1079,9 @@ func TestSubroundStartRound_changeEpoch(t *testing.T) { sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, &mock.SposWorkerMock{}, ) diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 028852c3116..a063b4b7139 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" "github.com/multiversx/mx-chain-go/consensus" @@ -118,7 +119,7 @@ type WorkerHandler interface { // ReceivedHeader method is a wired method through which worker will receive headers from network ReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) // ResetConsensusMessages resets at the start of each round all the previous consensus messages received and equivalent messages, keeping the provided proofs - ResetConsensusMessages(currentHash []byte, prevHash []byte) + ResetConsensusMessages() // IsInterfaceNil returns true if there is no value under the interface IsInterfaceNil() bool } diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index c7ec3124701..dffa665c6b9 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -751,7 +751,7 @@ func (wrk *Worker) Close() error { } // ResetConsensusMessages resets at the start of each round all the previous consensus messages received and equivalent messages, keeping the provided proofs -func (wrk *Worker) ResetConsensusMessages(currentHash []byte, prevHash []byte) { +func (wrk *Worker) ResetConsensusMessages() { wrk.consensusMessageValidator.resetConsensusMessages() wrk.equivalentMessagesDebugger.ResetEquivalentMessages() } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 5fa1355f9e0..0b3b30c2091 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -24,6 +24,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon" @@ -84,7 +85,7 @@ func createDefaultWorkerArgs(appStatusHandler core.AppStatusHandler) *spos.Worke } syncTimerMock := &consensusMocks.SyncTimerMock{} hasher := &hashingMocks.HasherMock{} - blsService, _ := bls.NewConsensusService() + blsService, _ := v1.NewConsensusService() poolAdder := cache.NewCacherMock() scheduledProcessorArgs := spos.ScheduledProcessorWrapperArgs{ diff --git a/factory/interface.go b/factory/interface.go index 0bbc16f1982..762271f934b 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -14,6 +14,8 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" @@ -37,7 +39,6 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/vm" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // EpochStartNotifier defines which actions should be done for handling new epoch's events @@ -398,7 +399,7 @@ type ConsensusWorker interface { // DisplayStatistics method displays statistics of worker at the end of the round DisplayStatistics() // ResetConsensusMessages resets at the start of each round all the previous consensus messages received and equivalent messages, keeping the provided proofs - ResetConsensusMessages(currentHash []byte, prevHash []byte) + ResetConsensusMessages() // ReceivedHeader method is a wired method through which worker will receive headers from network ReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) // IsInterfaceNil returns true if there is no value under the interface From 9ec2d11fc7dfa426590ee20f6c58002fb5d33c12 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 20 Sep 2024 17:06:17 +0300 Subject: [PATCH 05/30] fix start round reset --- consensus/spos/bls/v2/subroundStartRound.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/consensus/spos/bls/v2/subroundStartRound.go b/consensus/spos/bls/v2/subroundStartRound.go index e0cc0b5d055..3e2980146cc 100644 --- a/consensus/spos/bls/v2/subroundStartRound.go +++ b/consensus/spos/bls/v2/subroundStartRound.go @@ -100,14 +100,7 @@ func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool { sr.RoundTimeStamp = sr.RoundHandler().TimeStamp() topic := spos.GetConsensusTopicID(sr.ShardCoordinator()) sr.GetAntiFloodHandler().ResetForTopic(topic) - // reset the consensus messages, but still keep the proofs for current hash and previous hash - currentHash := sr.Blockchain().GetCurrentBlockHeaderHash() - prevHash := make([]byte, 0) - currentHeader := sr.Blockchain().GetCurrentBlockHeader() - if !check.IfNil(currentHeader) { - prevHash = currentHeader.GetPrevHash() - } - sr.worker.ResetConsensusMessages(currentHash, prevHash) + sr.worker.ResetConsensusMessages() return true } From ba61a2d936398717587987e266b651205f28e45f Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 20 Sep 2024 17:46:05 +0300 Subject: [PATCH 06/30] tests fixes --- consensus/spos/bls/v1/blsWorker_test.go | 1 + consensus/spos/bls/v1/export_test.go | 2 +- consensus/spos/bls/v1/subroundBlock_test.go | 94 ++++++++++++--------- 3 files changed, 55 insertions(+), 42 deletions(-) diff --git a/consensus/spos/bls/v1/blsWorker_test.go b/consensus/spos/bls/v1/blsWorker_test.go index 21cf32a6de2..f25e0d91615 100644 --- a/consensus/spos/bls/v1/blsWorker_test.go +++ b/consensus/spos/bls/v1/blsWorker_test.go @@ -43,6 +43,7 @@ func initConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos. ) rcns.SetConsensusGroup(eligibleList) + rcns.SetLeader(eligibleList[indexLeader]) rcns.ResetRoundState() pBFTThreshold := consensusGroupSize*2/3 + 1 diff --git a/consensus/spos/bls/v1/export_test.go b/consensus/spos/bls/v1/export_test.go index f5590b0b4f4..36755e05af3 100644 --- a/consensus/spos/bls/v1/export_test.go +++ b/consensus/spos/bls/v1/export_test.go @@ -161,7 +161,7 @@ func (sr *subroundStartRound) GetSentSignatureTracker() spos.SentSignaturesTrack // subroundBlock // SubroundBlock defines a type for the subroundBlock structure -type SubroundBlock *subroundBlock +type SubroundBlock = *subroundBlock // Blockchain gets the ChainHandler stored in the ConsensusCore func (sr *subroundBlock) BlockChain() data.ChainHandler { diff --git a/consensus/spos/bls/v1/subroundBlock_test.go b/consensus/spos/bls/v1/subroundBlock_test.go index 6724bd15d9e..1f42fdff49f 100644 --- a/consensus/spos/bls/v1/subroundBlock_test.go +++ b/consensus/spos/bls/v1/subroundBlock_test.go @@ -310,11 +310,11 @@ func TestSubroundBlock_NewSubroundBlockShouldWork(t *testing.T) { func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) r := sr.DoBlockJob() assert.False(t, r) - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + sr.SetSelfPubKey(sr.Leader()) _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrBlock, true) r = sr.DoBlockJob() assert.False(t, r) @@ -354,12 +354,12 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) sr.Data = []byte("some data") r := sr.ReceivedBlockBodyAndHeader(cnsMsg) @@ -370,7 +370,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t * t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -386,15 +386,15 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) sr.Data = nil - _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrBlock, true) + _ = sr.SetJobDone(sr.Leader(), bls.SrBlock, true) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -411,12 +411,12 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { } container.SetBlockProcessor(blProc) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) sr.Data = nil r := sr.ReceivedBlockBodyAndHeader(cnsMsg) @@ -428,12 +428,12 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing. t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) sr.Data = nil sr.Body = &block.Body{} @@ -446,12 +446,12 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testin t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) sr.Data = nil sr.Header = &block.Header{Nonce: 1} @@ -463,12 +463,14 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) t.Run("block is valid", func(t *testing.T) { hdr := createDefaultHeader() blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) + leader, err := sr.GetLeader() + require.Nil(t, err) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) sr.Data = nil r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.True(t, r) @@ -478,7 +480,9 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { Nonce: 1, } blkBody := &block.Body{} - cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) + leader, err := sr.GetLeader() + require.Nil(t, err) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) sr.Data = nil r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -514,16 +518,18 @@ func createConsensusMessage(header *block.Header, body *block.Body, leader []byt func TestSubroundBlock_ReceivedBlock(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) blockProcessorMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) blkBody := &block.Body{} blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, err := sr.GetLeader() + assert.Nil(t, err) cnsMsg := consensus.NewConsensusMessage( nil, nil, blkBodyStr, nil, - []byte(sr.ConsensusGroup()[0]), + []byte(leader), []byte("sig"), int(bls.MtBlockBody), 0, @@ -561,7 +567,7 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { nil, nil, hdrStr, - []byte(sr.ConsensusGroup()[0]), + []byte(leader), []byte("sig"), int(bls.MtBlockHeader), 0, @@ -607,13 +613,14 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAreNotSet(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + leader, _ := sr.GetLeader() cnsMsg := consensus.NewConsensusMessage( nil, nil, nil, nil, - []byte(sr.ConsensusGroup()[0]), + []byte(leader), []byte("sig"), int(bls.MtBlockBodyAndHeader), 0, @@ -630,7 +637,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAre func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFails(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) blProcMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) err := errors.New("error process block") blProcMock.ProcessBlockCalled = func(data.HeaderHandler, data.BodyHandler, func() time.Duration) error { @@ -640,12 +647,13 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail hdr := &block.Header{} blkBody := &block.Body{} blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, _ := sr.GetLeader() cnsMsg := consensus.NewConsensusMessage( nil, nil, blkBodyStr, nil, - []byte(sr.ConsensusGroup()[0]), + []byte(leader), []byte("sig"), int(bls.MtBlockBody), 0, @@ -664,16 +672,17 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockReturnsInNextRound(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{} blkBody := &block.Body{} blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, _ := sr.GetLeader() cnsMsg := consensus.NewConsensusMessage( nil, nil, blkBodyStr, nil, - []byte(sr.ConsensusGroup()[0]), + []byte(leader), []byte("sig"), int(bls.MtBlockBody), 0, @@ -700,17 +709,18 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr, _ := container.BlockProcessor().CreateNewHeader(1, 1) hdr, blkBody, _ := container.BlockProcessor().CreateBlock(hdr, func() bool { return true }) blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, _ := sr.GetLeader() cnsMsg := consensus.NewConsensusMessage( nil, nil, blkBodyStr, nil, - []byte(sr.ConsensusGroup()[0]), + []byte(leader), []byte("sig"), int(bls.MtBlockBody), 0, @@ -733,7 +743,7 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { roundHandlerMock := initRoundHandlerMock() container.SetRoundHandler(roundHandlerMock) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) remainingTimeInThisRound := func() time.Duration { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -764,7 +774,7 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) sr.RoundCanceled = true assert.False(t, sr.DoBlockConsensusCheck()) } @@ -780,7 +790,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinish func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedReturnTrue(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < sr.Threshold(bls.SrBlock); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, true) } @@ -790,14 +800,14 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedR func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenBlockIsReceivedReturnFalse(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) assert.False(t, sr.DoBlockConsensusCheck()) } func TestSubroundBlock_IsBlockReceived(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < len(sr.ConsensusGroup()); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, false) @@ -819,7 +829,7 @@ func TestSubroundBlock_IsBlockReceived(t *testing.T) { func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) haveTimeInCurrentSubound := func() bool { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -849,7 +859,7 @@ func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) haveTimeInCurrentSubound := func() bool { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -895,7 +905,7 @@ func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(blockChain, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(blockChain, container, &statusHandler.AppStatusHandlerStub{}) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(nil, nil) header, _ := sr.CreateHeader() header, body, _ := sr.CreateBlock(header) @@ -926,7 +936,7 @@ func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { func TestSubroundBlock_CreateHeaderNotNilCurrentHeader(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ Nonce: 1, }, []byte("root hash")) @@ -979,7 +989,7 @@ func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { return shardHeader, &block.Body{}, nil } - sr := *initSubroundBlockWithBlockProcessor(bp, container) + sr := initSubroundBlockWithBlockProcessor(bp, container) container.SetBlockchain(&blockChainMock) header, _ := sr.CreateHeader() @@ -1010,7 +1020,7 @@ func TestSubroundBlock_CreateHeaderNilMiniBlocks(t *testing.T) { bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { return nil, nil, expectedErr } - sr := *initSubroundBlockWithBlockProcessor(bp, container) + sr := initSubroundBlockWithBlockProcessor(bp, container) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ Nonce: 1, }, []byte("root hash")) @@ -1070,7 +1080,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { return nil }, }) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{ + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{ SetUInt64ValueHandler: func(key string, value uint64) { receivedValue = value }}) @@ -1078,12 +1088,14 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { blkBody := &block.Body{} blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, err := sr.GetLeader() + assert.Nil(t, err) cnsMsg := consensus.NewConsensusMessage( nil, nil, blkBodyStr, nil, - []byte(sr.ConsensusGroup()[0]), + []byte(leader), []byte("sig"), int(bls.MtBlockBody), 0, From f474f1433deaf9f4a39a1d225103a354cfd82119 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Mon, 23 Sep 2024 11:36:35 +0300 Subject: [PATCH 07/30] cleanup and v1 unit tests fixes --- consensus/spos/bls/v1/export_test.go | 12 +- consensus/spos/bls/v1/subroundBlock_test.go | 4 +- .../spos/bls/v1/subroundEndRound_test.go | 127 ++++++++++-------- .../spos/bls/v1/subroundSignature_test.go | 28 ++-- .../spos/bls/v1/subroundStartRound_test.go | 34 +++-- consensus/spos/bls/v2/export_test.go | 12 +- consensus/spos/bls/v2/subroundBlock_test.go | 74 +++++----- .../spos/bls/v2/subroundSignature_test.go | 26 ++-- .../spos/bls/v2/subroundStartRound_test.go | 26 ++-- testscommon/consensus/mockTestInitializer.go | 4 +- 10 files changed, 183 insertions(+), 164 deletions(-) diff --git a/consensus/spos/bls/v1/export_test.go b/consensus/spos/bls/v1/export_test.go index 36755e05af3..452f9bb0d04 100644 --- a/consensus/spos/bls/v1/export_test.go +++ b/consensus/spos/bls/v1/export_test.go @@ -130,8 +130,8 @@ func (fct *factory) Outport() outport.OutportHandler { // subroundStartRound -// SubroundStartRound defines a type for the subroundStartRound structure -type SubroundStartRound *subroundStartRound +// SubroundStartRound defines an alias to the subroundStartRound structure +type SubroundStartRound = *subroundStartRound // DoStartRoundJob method does the job of the subround StartRound func (sr *subroundStartRound) DoStartRoundJob() bool { @@ -230,8 +230,8 @@ func (sr *subroundBlock) ReceivedBlockBodyAndHeader(cnsDta *consensus.Message) b // subroundSignature -// SubroundSignature defines a type for the subroundSignature structure -type SubroundSignature *subroundSignature +// SubroundSignature defines an alias for the subroundSignature structure +type SubroundSignature = *subroundSignature // DoSignatureJob method does the job of the subround Signature func (sr *subroundSignature) DoSignatureJob() bool { @@ -255,8 +255,8 @@ func (sr *subroundSignature) AreSignaturesCollected(threshold int) (bool, int) { // subroundEndRound -// SubroundEndRound defines a type for the subroundEndRound structure -type SubroundEndRound *subroundEndRound +// SubroundEndRound defines an alias for the subroundEndRound structure +type SubroundEndRound = *subroundEndRound // DoEndRoundJob method does the job of the subround EndRound func (sr *subroundEndRound) DoEndRoundJob() bool { diff --git a/consensus/spos/bls/v1/subroundBlock_test.go b/consensus/spos/bls/v1/subroundBlock_test.go index 1f42fdff49f..44bd8ad813b 100644 --- a/consensus/spos/bls/v1/subroundBlock_test.go +++ b/consensus/spos/bls/v1/subroundBlock_test.go @@ -782,7 +782,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) sr.SetStatus(bls.SrBlock, spos.SsFinished) assert.True(t, sr.DoBlockConsensusCheck()) } @@ -1134,7 +1134,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDurationWithZeroDurationShould ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - srBlock := *defaultSubroundBlockWithoutErrorFromSubround(sr) + srBlock := defaultSubroundBlockWithoutErrorFromSubround(sr) srBlock.ComputeSubroundProcessingMetric(time.Now(), "dummy") } diff --git a/consensus/spos/bls/v1/subroundEndRound_test.go b/consensus/spos/bls/v1/subroundEndRound_test.go index 6d7f1ac391d..6cc7cbc75ff 100644 --- a/consensus/spos/bls/v1/subroundEndRound_test.go +++ b/consensus/spos/bls/v1/subroundEndRound_test.go @@ -408,7 +408,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) signingHandler := &consensusMocks.SigningHandlerStub{ AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { @@ -420,6 +420,7 @@ func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) sr.Header = &block.Header{} sr.SetSelfPubKey("A") + sr.SetLeader("A") assert.True(t, sr.IsSelfLeaderInCurrentRound()) r := sr.DoEndRoundJob() @@ -430,8 +431,9 @@ func TestSubroundEndRound_DoEndRoundJobErrCommitBlockShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") blProcMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) blProcMock.CommitBlockCalled = func( @@ -452,8 +454,9 @@ func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") remainingTime := time.Millisecond roundHandlerMock := &consensusMocks.RoundHandlerMock{ @@ -484,8 +487,9 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastBlockOK(t *testing.T) { }, } container.SetBroadcastMessenger(bm) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") sr.Header = &block.Header{} @@ -518,8 +522,9 @@ func TestSubroundEndRound_DoEndRoundJobErrMarshalizedDataToBroadcastOK(t *testin }, } container.SetBroadcastMessenger(bm) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") sr.Header = &block.Header{} @@ -553,8 +558,9 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastMiniBlocksOK(t *testing.T) { }, } container.SetBroadcastMessenger(bm) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") sr.Header = &block.Header{} @@ -589,8 +595,9 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastTransactionsOK(t *testing.T) }, } container.SetBroadcastMessenger(bm) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") sr.Header = &block.Header{} @@ -610,8 +617,9 @@ func TestSubroundEndRound_DoEndRoundJobAllOK(t *testing.T) { }, } container.SetBroadcastMessenger(bm) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") sr.Header = &block.Header{} @@ -638,8 +646,9 @@ func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { }, } container.SetBroadcastMessenger(bm) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") sr.Header = &block.Header{Nonce: 5} @@ -651,7 +660,7 @@ func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.RoundCanceled = true ok := sr.DoEndRoundConsensusCheck() @@ -661,7 +670,7 @@ func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsCa func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnTrueWhenRoundIsFinished(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.SetStatus(bls.SrEndRound, spos.SsFinished) ok := sr.DoEndRoundConsensusCheck() @@ -671,7 +680,7 @@ func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnTrueWhenRoundIsFin func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsNotFinished(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) ok := sr.DoEndRoundConsensusCheck() assert.False(t, ok) @@ -680,7 +689,7 @@ func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsNo func TestSubroundEndRound_CheckSignaturesValidityShouldErrNilSignature(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) err := sr.CheckSignaturesValidity([]byte{2}) assert.Equal(t, spos.ErrNilSignature, err) @@ -689,7 +698,7 @@ func TestSubroundEndRound_CheckSignaturesValidityShouldErrNilSignature(t *testin func TestSubroundEndRound_CheckSignaturesValidityShouldReturnNil(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) @@ -700,7 +709,7 @@ func TestSubroundEndRound_CheckSignaturesValidityShouldReturnNil(t *testing.T) { func TestSubroundEndRound_DoEndRoundJobByParticipant_RoundCanceledShouldReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.RoundCanceled = true cnsData := consensus.Message{} @@ -711,7 +720,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_RoundCanceledShouldReturnFa func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusDataNotSetShouldReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.Data = nil cnsData := consensus.Message{} @@ -722,7 +731,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusDataNotSetShouldRe func TestSubroundEndRound_DoEndRoundJobByParticipant_PreviousSubroundNotFinishedShouldReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.SetStatus(2, spos.SsNotFinished) cnsData := consensus.Message{} res := sr.DoEndRoundJobByParticipant(&cnsData) @@ -732,7 +741,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_PreviousSubroundNotFinished func TestSubroundEndRound_DoEndRoundJobByParticipant_CurrentSubroundFinishedShouldReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) // set previous as finished sr.SetStatus(2, spos.SsFinished) @@ -748,7 +757,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_CurrentSubroundFinishedShou func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusHeaderNotReceivedShouldReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) // set previous as finished sr.SetStatus(2, spos.SsFinished) @@ -765,7 +774,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ShouldReturnTrue(t *testing t.Parallel() hdr := &block.Header{Nonce: 37} - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.Header = hdr sr.AddReceivedHeader(hdr) @@ -784,7 +793,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceived_NoReceivedHeadersShouldRetur t.Parallel() hdr := &block.Header{Nonce: 37} - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.Header = hdr res, retHdr := sr.IsConsensusHeaderReceived() @@ -797,7 +806,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceived_HeaderNotReceivedShouldRetur hdr := &block.Header{Nonce: 37} hdrToSearchFor := &block.Header{Nonce: 38} - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.AddReceivedHeader(hdr) sr.Header = hdrToSearchFor @@ -810,7 +819,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceivedShouldReturnTrue(t *testing.T t.Parallel() hdr := &block.Header{Nonce: 37} - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.Header = hdr sr.AddReceivedHeader(hdr) @@ -822,7 +831,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceivedShouldReturnTrue(t *testing.T func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoNilHdrShouldNotWork(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{} @@ -845,7 +854,7 @@ func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoShouldWork(t *testing.T Signature: originalSig, LeaderSignature: originalLeaderSig, } - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.Header = &hdr cnsData := consensus.Message{ @@ -875,7 +884,7 @@ func TestSubroundEndRound_CreateAndBroadcastHeaderFinalInfoBroadcastShouldBeCall }, } container.SetBroadcastMessenger(messenger) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.Header = &block.Header{LeaderSignature: leaderSigInHdr} sr.CreateAndBroadcastHeaderFinalInfo() @@ -891,7 +900,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { t.Parallel() hdr := &block.Header{Nonce: 37} - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.Header = hdr sr.AddReceivedHeader(hdr) @@ -923,7 +932,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinal } container.SetHeaderSigVerifier(headerSigVerifier) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), @@ -936,7 +945,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinal func TestSubroundEndRound_IsOutOfTimeShouldReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) res := sr.IsOutOfTime() assert.False(t, res) @@ -955,7 +964,7 @@ func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { return remainingTime }} container.SetRoundHandler(&roundHandler) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.RoundTimeStamp = time.Now().AddDate(0, 0, -1) @@ -978,7 +987,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify } container.SetHeaderSigVerifier(headerSigVerifier) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} sr.Header = &block.Header{} isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) @@ -1000,7 +1009,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify } container.SetHeaderSigVerifier(headerSigVerifier) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} sr.Header = &block.Header{} isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) @@ -1022,7 +1031,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnTrue(t *testing } container.SetHeaderSigVerifier(headerSigVerifier) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} sr.Header = &block.Header{} isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) @@ -1036,7 +1045,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -1058,7 +1067,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -1086,7 +1095,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) signingHandler := &consensusMocks.SigningHandlerStub{ SignatureShareCalled: func(index uint16) ([]byte, error) { return nil, nil @@ -1117,7 +1126,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.Header = &block.Header{} sr.SetThreshold(bls.SrEndRound, 2) @@ -1129,7 +1138,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -1150,7 +1159,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) expectedErr := errors.New("exptected error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -1170,7 +1179,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.Header = &block.Header{} _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) @@ -1188,7 +1197,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) verifySigShareNumCalls := 0 verifyFirstCall := true @@ -1235,7 +1244,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) verifySigShareNumCalls := 0 verifyFirstCall := true @@ -1288,7 +1297,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.ConsensusState.Data = nil cnsData := consensus.Message{ @@ -1305,7 +1314,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), @@ -1321,8 +1330,9 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), @@ -1385,7 +1395,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{ BlockHeaderHash: []byte("Y"), @@ -1401,7 +1411,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), @@ -1418,7 +1428,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), @@ -1442,7 +1452,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() container.SetMessageSigningHandler(messageSigningHandler) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), @@ -1458,7 +1468,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), @@ -1488,7 +1498,7 @@ func TestVerifyInvalidSigners(t *testing.T) { container.SetMessageSigningHandler(messageSigningHandler) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) err := sr.VerifyInvalidSigners([]byte{}) require.Equal(t, expectedErr, err) @@ -1517,7 +1527,7 @@ func TestVerifyInvalidSigners(t *testing.T) { container.SetMessageSigningHandler(messageSigningHandler) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) err := sr.VerifyInvalidSigners(invalidSignersBytes) require.Equal(t, expectedErr, err) @@ -1559,7 +1569,7 @@ func TestVerifyInvalidSigners(t *testing.T) { container.SetSigningHandler(signingHandler) container.SetMessageSigningHandler(messageSigningHandler) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) err := sr.VerifyInvalidSigners(invalidSignersBytes) require.Nil(t, err) @@ -1587,7 +1597,7 @@ func TestVerifyInvalidSigners(t *testing.T) { messageSigningHandler := &mock.MessageSignerMock{} container.SetMessageSigningHandler(messageSigningHandler) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) err := sr.VerifyInvalidSigners(invalidSignersBytes) require.Nil(t, err) @@ -1619,7 +1629,7 @@ func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { }, } container.SetBroadcastMessenger(messenger) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) }) @@ -1642,8 +1652,9 @@ func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { }, } container.SetBroadcastMessenger(messenger) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") + sr.SetLeader("A") sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) @@ -1671,7 +1682,7 @@ func TestGetFullMessagesForInvalidSigners(t *testing.T) { container.SetMessageSigningHandler(messageSigningHandler) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) invalidSigners := []string{"B", "C"} invalidSignersBytes, err := sr.GetFullMessagesForInvalidSigners(invalidSigners) @@ -1696,7 +1707,7 @@ func TestGetFullMessagesForInvalidSigners(t *testing.T) { container.SetMessageSigningHandler(messageSigningHandler) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.AddMessageWithSignature("B", &p2pmocks.P2PMessageMock{}) sr.AddMessageWithSignature("C", &p2pmocks.P2PMessageMock{}) diff --git a/consensus/spos/bls/v1/subroundSignature_test.go b/consensus/spos/bls/v1/subroundSignature_test.go index a31bf841740..8f6c673978e 100644 --- a/consensus/spos/bls/v1/subroundSignature_test.go +++ b/consensus/spos/bls/v1/subroundSignature_test.go @@ -340,7 +340,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.Header = &block.Header{} sr.Data = nil @@ -470,7 +470,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { func TestSubroundSignature_ReceivedSignature(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() signature := []byte("signature") cnsMsg := consensus.NewConsensusMessage( sr.Data, @@ -541,7 +541,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { container := consensusMocks.InitConsensusCore() container.SetSigningHandler(signingHandler) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.Header = &block.Header{} signature := []byte("signature") @@ -600,7 +600,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { func TestSubroundSignature_SignaturesCollected(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() for i := 0; i < len(sr.ConsensusGroup()); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) @@ -629,7 +629,7 @@ func TestSubroundSignature_SignaturesCollected(t *testing.T) { func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() sr.RoundCanceled = true assert.False(t, sr.DoSignatureConsensusCheck()) } @@ -637,7 +637,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIs func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() sr.SetStatus(bls.SrSignature, spos.SsFinished) assert.True(t, sr.DoSignatureConsensusCheck()) } @@ -645,7 +645,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubround func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignaturesCollectedReturnTrue(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() for i := 0; i < sr.Threshold(bls.SrSignature); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) @@ -657,7 +657,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignatur func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenSignaturesCollectedReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() assert.False(t, sr.DoSignatureConsensusCheck()) } @@ -665,7 +665,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenNotAllS t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = false sr.SetSelfPubKey(sr.ConsensusGroup()[0]) @@ -681,7 +681,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenAllSigna t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = false sr.SetSelfPubKey(sr.ConsensusGroup()[0]) @@ -697,7 +697,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenEnoughBu t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = true sr.SetSelfPubKey(sr.ConsensusGroup()[0]) @@ -718,7 +718,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbac return false }, }) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = false sr.SetSelfPubKey(sr.ConsensusGroup()[0]) @@ -739,7 +739,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback return true }, }) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = true sr.SetSelfPubKey(sr.ConsensusGroup()[0]) @@ -754,7 +754,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqual(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() cnsMsg := consensus.NewConsensusMessage( append(sr.Data, []byte("X")...), diff --git a/consensus/spos/bls/v1/subroundStartRound_test.go b/consensus/spos/bls/v1/subroundStartRound_test.go index 8910fffc3aa..7bb0fccbf7e 100644 --- a/consensus/spos/bls/v1/subroundStartRound_test.go +++ b/consensus/spos/bls/v1/subroundStartRound_test.go @@ -330,7 +330,7 @@ func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { sr, _ := defaultSubround(consensusState, ch, container) - srStartRound := *defaultWithoutErrorSubroundStartRoundFromSubround(sr) + srStartRound := defaultWithoutErrorSubroundStartRoundFromSubround(sr) r := srStartRound.DoStartRoundJob() assert.True(t, r) @@ -339,7 +339,7 @@ func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() - sr := *initSubroundStartRound() + sr := initSubroundStartRound() sr.RoundCanceled = true @@ -350,7 +350,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRound func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenRoundIsFinished(t *testing.T) { t.Parallel() - sr := *initSubroundStartRound() + sr := initSubroundStartRound() sr.SetStatus(bls.SrStartRound, spos.SsFinished) @@ -368,7 +368,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCu container := consensusMocks.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - sr := *initSubroundStartRoundWithContainer(container) + sr := initSubroundStartRoundWithContainer(container) sentTrackerInterface := sr.GetSentSignatureTracker() sentTracker := sentTrackerInterface.(*testscommon.SentSignatureTrackerStub) startRoundCalled := false @@ -392,7 +392,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenInitC container.SetBootStrapper(bootstrapperMock) container.SetRoundHandler(initRoundHandlerMock()) - sr := *initSubroundStartRoundWithContainer(container) + sr := initSubroundStartRoundWithContainer(container) ok := sr.DoStartRoundConsensusCheck() assert.False(t, ok) @@ -409,7 +409,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetNodeStateNot container := consensusMocks.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -426,7 +426,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon container := consensusMocks.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -443,7 +443,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenMainMachineIsAct container := consensusMocks.InitConsensusCore() container.SetNodeRedundancyHandler(nodeRedundancyMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -453,19 +453,24 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t t.Parallel() validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} + leader := &shardingMocks.ValidatorMock{PubKeyCalled: func() []byte { + return []byte("leader") + }} + validatorGroupSelector.ComputeValidatorsGroupCalled = func( bytes []byte, round uint64, shardId uint32, epoch uint32, ) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { - return nil, make([]nodesCoordinator.Validator, 0), nil + // will cause an error in GetLeader because of empty consensus group + return leader, []nodesCoordinator.Validator{}, nil } container := consensusMocks.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -481,7 +486,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenIsNotInTheConsen sr, _ := defaultSubround(consensusState, ch, container) - srStartRound := *defaultWithoutErrorSubroundStartRoundFromSubround(sr) + srStartRound := defaultWithoutErrorSubroundStartRoundFromSubround(sr) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -499,7 +504,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenTimeIsOut(t *te container := consensusMocks.InitConsensusCore() container.SetRoundHandler(roundHandlerMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -517,7 +522,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { container := consensusMocks.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -714,6 +719,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { consensusState := initConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() consensusState.SetSelfPubKey(leader) + sr, _ := spos.NewSubround( -1, bls.SrStartRound, @@ -830,7 +836,7 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing container := consensusMocks.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) err2 := srStartRound.GenerateNextConsensusGroup(0) diff --git a/consensus/spos/bls/v2/export_test.go b/consensus/spos/bls/v2/export_test.go index 33f0ddadb3a..e3c1d9caff1 100644 --- a/consensus/spos/bls/v2/export_test.go +++ b/consensus/spos/bls/v2/export_test.go @@ -130,8 +130,8 @@ func (fct *factory) Outport() outport.OutportHandler { // subroundStartRound -// SubroundStartRound defines a type for the subroundStartRound structure -type SubroundStartRound *subroundStartRound +// SubroundStartRound defines an alias for the subroundStartRound structure +type SubroundStartRound = *subroundStartRound // DoStartRoundJob method does the job of the subround StartRound func (sr *subroundStartRound) DoStartRoundJob() bool { @@ -160,8 +160,8 @@ func (sr *subroundStartRound) GetSentSignatureTracker() spos.SentSignaturesTrack // subroundBlock -// SubroundBlock defines a type for the subroundBlock structure -type SubroundBlock *subroundBlock +// SubroundBlock defines an alias for the subroundBlock structure +type SubroundBlock = *subroundBlock // Blockchain gets the ChainHandler stored in the ConsensusCore func (sr *subroundBlock) BlockChain() data.ChainHandler { @@ -235,8 +235,8 @@ func (sr *subroundBlock) ReceivedBlockBodyAndHeader(cnsDta *consensus.Message) b // subroundSignature -// SubroundSignature defines a type for the subroundSignature structure -type SubroundSignature *subroundSignature +// SubroundSignature defines an alias to the subroundSignature structure +type SubroundSignature = *subroundSignature // DoSignatureJob method does the job of the subround Signature func (sr *subroundSignature) DoSignatureJob() bool { diff --git a/consensus/spos/bls/v2/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go index 209d10d15bb..b1163137262 100644 --- a/consensus/spos/bls/v2/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -336,14 +336,14 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("not leader should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) r := sr.DoBlockJob() assert.False(t, r) }) t.Run("round index lower than last committed block should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -355,7 +355,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("leader job done should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -372,7 +372,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("subround finished should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -390,7 +390,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("create header error should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -413,7 +413,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("create block error should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -438,7 +438,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("send block error should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -557,7 +557,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("should work, equivalent messages flag not enabled", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -590,7 +590,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -608,7 +608,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t * t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -624,7 +624,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -651,7 +651,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { } container.SetBlockProcessor(blProc) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -670,7 +670,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing. t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -690,7 +690,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testin t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -712,7 +712,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := createDefaultHeader() blkBody := &block.Body{} leader, err := sr.GetLeader() @@ -726,7 +726,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{ Nonce: 1, } @@ -758,7 +758,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { return &block.HeaderV2{} }, } - sr := *initSubroundBlock(chainHandler, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(chainHandler, container, &statusHandler.AppStatusHandlerStub{}) blkBody := &block.Body{} hdr := &block.HeaderV2{ Header: createDefaultHeader(), @@ -810,7 +810,7 @@ func createConsensusMessage(header data.HeaderHandler, body *block.Body, leader func TestSubroundBlock_ReceivedBlock(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) blockProcessorMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) blkBody := &block.Body{} blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) @@ -906,7 +906,7 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAreNotSet(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) leader, _ := sr.GetLeader() cnsMsg := consensus.NewConsensusMessage( nil, @@ -930,7 +930,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAre func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFails(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) blProcMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) err := errors.New("error process block") blProcMock.ProcessBlockCalled = func(data.HeaderHandler, data.BodyHandler, func() time.Duration) error { @@ -965,7 +965,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockReturnsInNextRound(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{} blkBody := &block.Body{} blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) @@ -1002,7 +1002,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr, _ := container.BlockProcessor().CreateNewHeader(1, 1) hdr, blkBody, _ := container.BlockProcessor().CreateBlock(hdr, func() bool { return true }) @@ -1036,7 +1036,7 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { roundHandlerMock := initRoundHandlerMock() container.SetRoundHandler(roundHandlerMock) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) remainingTimeInThisRound := func() time.Duration { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -1067,7 +1067,7 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) sr.RoundCanceled = true assert.False(t, sr.DoBlockConsensusCheck()) } @@ -1075,7 +1075,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) sr.SetStatus(bls.SrBlock, spos.SsFinished) assert.True(t, sr.DoBlockConsensusCheck()) } @@ -1083,7 +1083,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinish func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedReturnTrue(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < sr.Threshold(bls.SrBlock); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, true) } @@ -1093,14 +1093,14 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedR func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenBlockIsReceivedReturnFalse(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) assert.False(t, sr.DoBlockConsensusCheck()) } func TestSubroundBlock_IsBlockReceived(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < len(sr.ConsensusGroup()); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, false) @@ -1122,7 +1122,7 @@ func TestSubroundBlock_IsBlockReceived(t *testing.T) { func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) haveTimeInCurrentSubound := func() bool { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -1152,7 +1152,7 @@ func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) haveTimeInCurrentSubound := func() bool { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -1198,7 +1198,7 @@ func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(blockChain, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(blockChain, container, &statusHandler.AppStatusHandlerStub{}) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(nil, nil) header, _ := sr.CreateHeader() header, body, _ := sr.CreateBlock(header) @@ -1229,7 +1229,7 @@ func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { func TestSubroundBlock_CreateHeaderNotNilCurrentHeader(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ Nonce: 1, }, []byte("root hash")) @@ -1282,7 +1282,7 @@ func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { return shardHeader, &block.Body{}, nil } - sr := *initSubroundBlockWithBlockProcessor(bp, container) + sr := initSubroundBlockWithBlockProcessor(bp, container) container.SetBlockchain(&blockChainMock) header, _ := sr.CreateHeader() @@ -1313,7 +1313,7 @@ func TestSubroundBlock_CreateHeaderNilMiniBlocks(t *testing.T) { bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { return nil, nil, expectedErr } - sr := *initSubroundBlockWithBlockProcessor(bp, container) + sr := initSubroundBlockWithBlockProcessor(bp, container) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ Nonce: 1, }, []byte("root hash")) @@ -1373,7 +1373,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { return nil }, }) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{ + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{ SetUInt64ValueHandler: func(key string, value uint64) { receivedValue = value }}) @@ -1427,7 +1427,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDurationWithZeroDurationShould ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - srBlock := *defaultSubroundBlockWithoutErrorFromSubround(sr) + srBlock := defaultSubroundBlockWithoutErrorFromSubround(sr) srBlock.ComputeSubroundProcessingMetric(time.Now(), "dummy") } @@ -1436,7 +1436,7 @@ func TestSubroundBlock_ReceivedBlockHeader(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) // nil header sr.ReceivedBlockHeader(nil) diff --git a/consensus/spos/bls/v2/subroundSignature_test.go b/consensus/spos/bls/v2/subroundSignature_test.go index f0c8dc00644..b0f0dc060b0 100644 --- a/consensus/spos/bls/v2/subroundSignature_test.go +++ b/consensus/spos/bls/v2/subroundSignature_test.go @@ -415,7 +415,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.Header = &block.Header{} sr.Data = nil @@ -489,7 +489,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { }, } container.SetEnableEpochsHandler(enableEpochsHandler) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.Header = &block.Header{} leader, err := sr.GetLeader() @@ -1087,7 +1087,7 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) signature := []byte("signature") cnsMsg := consensus.NewConsensusMessage( sr.Data, @@ -1169,7 +1169,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { container := consensusMocks.InitConsensusCore() container.SetSigningHandler(signingHandler) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.Header = &block.Header{} signature := []byte("signature") @@ -1230,7 +1230,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { func TestSubroundSignature_SignaturesCollected(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() for i := 0; i < len(sr.ConsensusGroup()); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) @@ -1259,7 +1259,7 @@ func TestSubroundSignature_SignaturesCollected(t *testing.T) { func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() sr.RoundCanceled = true assert.False(t, sr.DoSignatureConsensusCheck()) } @@ -1267,7 +1267,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIs func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() sr.SetStatus(bls.SrSignature, spos.SsFinished) assert.True(t, sr.DoSignatureConsensusCheck()) } @@ -1275,7 +1275,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubround func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignaturesCollectedReturnTrue(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() for i := 0; i < sr.Threshold(bls.SrSignature); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) @@ -1288,7 +1288,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignatur func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenSignaturesCollectedReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() sr.Header = &block.HeaderV2{Header: createDefaultHeader()} assert.False(t, sr.DoSignatureConsensusCheck()) } @@ -1363,7 +1363,7 @@ func testSubroundSignatureDoSignatureConsensusCheck(args argTestSubroundSignatur return false }, }) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = args.waitingAllSignaturesTimeOut if !args.flagActive { @@ -1394,7 +1394,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbac return false }, }) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = false leader, err := sr.GetLeader() @@ -1417,7 +1417,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback return true }, }) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = true leader, err := sr.GetLeader() @@ -1435,7 +1435,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqual(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() leader, err := sr.GetLeader() require.Nil(t, err) diff --git a/consensus/spos/bls/v2/subroundStartRound_test.go b/consensus/spos/bls/v2/subroundStartRound_test.go index ba042643986..7f9c03de7dd 100644 --- a/consensus/spos/bls/v2/subroundStartRound_test.go +++ b/consensus/spos/bls/v2/subroundStartRound_test.go @@ -294,7 +294,7 @@ func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { sr, _ := defaultSubround(consensusState, ch, container) - srStartRound := *defaultWithoutErrorSubroundStartRoundFromSubround(sr) + srStartRound := defaultWithoutErrorSubroundStartRoundFromSubround(sr) r := srStartRound.DoStartRoundJob() assert.True(t, r) @@ -303,7 +303,7 @@ func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() - sr := *initSubroundStartRound() + sr := initSubroundStartRound() sr.RoundCanceled = true @@ -314,7 +314,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRound func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenRoundIsFinished(t *testing.T) { t.Parallel() - sr := *initSubroundStartRound() + sr := initSubroundStartRound() sr.SetStatus(bls.SrStartRound, spos.SsFinished) @@ -332,7 +332,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCu container := consensus.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - sr := *initSubroundStartRoundWithContainer(container) + sr := initSubroundStartRoundWithContainer(container) sentTrackerInterface := sr.GetSentSignatureTracker() sentTracker := sentTrackerInterface.(*testscommon.SentSignatureTrackerStub) startRoundCalled := false @@ -356,7 +356,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenInitC container.SetBootStrapper(bootstrapperMock) container.SetRoundHandler(initRoundHandlerMock()) - sr := *initSubroundStartRoundWithContainer(container) + sr := initSubroundStartRoundWithContainer(container) ok := sr.DoStartRoundConsensusCheck() assert.False(t, ok) @@ -373,7 +373,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetNodeStateNot container := consensus.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -391,7 +391,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -408,7 +408,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenMainMachineIsAct container := consensus.InitConsensusCore() container.SetNodeRedundancyHandler(nodeRedundancyMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -435,7 +435,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t container := consensus.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -451,7 +451,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenIsNotInTheConsen sr, _ := defaultSubround(consensusState, ch, container) - srStartRound := *defaultWithoutErrorSubroundStartRoundFromSubround(sr) + srStartRound := defaultWithoutErrorSubroundStartRoundFromSubround(sr) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -469,7 +469,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenTimeIsOut(t *te container := consensus.InitConsensusCore() container.SetRoundHandler(roundHandlerMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -487,7 +487,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { container := consensus.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -1106,7 +1106,7 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing container := consensus.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) err2 := srStartRound.GenerateNextConsensusGroup(0) diff --git a/testscommon/consensus/mockTestInitializer.go b/testscommon/consensus/mockTestInitializer.go index b9d74889e39..4cdd7174618 100644 --- a/testscommon/consensus/mockTestInitializer.go +++ b/testscommon/consensus/mockTestInitializer.go @@ -167,7 +167,9 @@ func InitConsensusCore() *ConsensusCoreMock { func InitConsensusCoreWithMultiSigner(multiSigner crypto.MultiSigner) *ConsensusCoreMock { blockChain := &testscommon.ChainHandlerStub{ GetGenesisHeaderCalled: func() data.HeaderHandler { - return &block.Header{} + return &block.Header{ + RandSeed: []byte("randSeed"), + } }, } marshalizerMock := mock.MarshalizerMock{} From 4246b293f68e21064b944dd7bc40c367ec193c5c Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Mon, 23 Sep 2024 12:31:04 +0300 Subject: [PATCH 08/30] fix v1 consensus package unit tests --- consensus/spos/bls/v1/blsWorker_test.go | 4 +- .../spos/bls/v1/subroundSignature_test.go | 39 ++++++++++++++----- consensus/spos/bls/v1/subroundStartRound.go | 3 ++ .../spos/bls/v1/subroundStartRound_test.go | 17 ++++---- .../spos/bls/v2/subroundStartRound_test.go | 16 ++++---- 5 files changed, 52 insertions(+), 27 deletions(-) diff --git a/consensus/spos/bls/v1/blsWorker_test.go b/consensus/spos/bls/v1/blsWorker_test.go index f25e0d91615..c8fd86162c0 100644 --- a/consensus/spos/bls/v1/blsWorker_test.go +++ b/consensus/spos/bls/v1/blsWorker_test.go @@ -34,11 +34,11 @@ func initConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos. eligibleNodesPubKeys[key] = struct{}{} } - indexLeader := 1 + indexLeader := 0 rcns, _ := spos.NewRoundConsensus( eligibleNodesPubKeys, consensusGroupSize, - eligibleList[indexLeader], + eligibleList[1], keysHandler, ) diff --git a/consensus/spos/bls/v1/subroundSignature_test.go b/consensus/spos/bls/v1/subroundSignature_test.go index 8f6c673978e..e13bb1ccfac 100644 --- a/consensus/spos/bls/v1/subroundSignature_test.go +++ b/consensus/spos/bls/v1/subroundSignature_test.go @@ -372,7 +372,10 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) sr.RoundCanceled = false - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + leader, err := sr.GetLeader() + assert.Nil(t, err) + + sr.SetSelfPubKey(leader) r = sr.DoSignatureJob() assert.True(t, r) assert.False(t, sr.RoundCanceled) @@ -449,7 +452,11 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) sr.RoundCanceled = false - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + leader, err := sr.GetLeader() + assert.Nil(t, err) + + sr.SetSelfPubKey(leader) r = srSignature.DoSignatureJob() assert.True(t, r) assert.False(t, sr.RoundCanceled) @@ -501,8 +508,10 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { sr.Data = []byte("X") r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) + leader, err := sr.GetLeader() + assert.Nil(t, err) - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + sr.SetSelfPubKey(leader) cnsMsg.PubKey = []byte("X") r = sr.ReceivedSignature(cnsMsg) @@ -574,7 +583,9 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) cnsMsg.PubKey = []byte("X") r = sr.ReceivedSignature(cnsMsg) @@ -668,7 +679,9 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenNotAllS sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = false - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) for i := 0; i < sr.Threshold(bls.SrSignature); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) @@ -684,7 +697,9 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenAllSigna sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = false - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) for i := 0; i < sr.ConsensusGroupSize(); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) @@ -700,7 +715,9 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenEnoughBu sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = true - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) for i := 0; i < sr.Threshold(bls.SrSignature); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) @@ -742,7 +759,9 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback sr := initSubroundSignatureWithContainer(container) sr.WaitingAllSignaturesTimeOut = true - sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) for i := 0; i < sr.FallbackThreshold(bls.SrSignature); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) @@ -756,12 +775,14 @@ func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqu sr := initSubroundSignature() + leader, err := sr.GetLeader() + assert.Nil(t, err) cnsMsg := consensus.NewConsensusMessage( append(sr.Data, []byte("X")...), []byte("signature"), nil, nil, - []byte(sr.ConsensusGroup()[0]), + []byte(leader), []byte("sig"), int(bls.MtSignature), 0, diff --git a/consensus/spos/bls/v1/subroundStartRound.go b/consensus/spos/bls/v1/subroundStartRound.go index 2de413aa9cb..f654fa2036d 100644 --- a/consensus/spos/bls/v1/subroundStartRound.go +++ b/consensus/spos/bls/v1/subroundStartRound.go @@ -344,6 +344,9 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error sr.SetConsensusGroup(nextConsensusGroup) sr.SetLeader(leader) + consensusGroupSizeForEpoch := sr.NodesCoordinator().ConsensusGroupSizeForShardAndEpoch(shardId, currentHeader.GetEpoch()) + sr.SetConsensusGroupSize(consensusGroupSizeForEpoch) + return nil } diff --git a/consensus/spos/bls/v1/subroundStartRound_test.go b/consensus/spos/bls/v1/subroundStartRound_test.go index 7bb0fccbf7e..2afeafcbdd8 100644 --- a/consensus/spos/bls/v1/subroundStartRound_test.go +++ b/consensus/spos/bls/v1/subroundStartRound_test.go @@ -541,7 +541,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasCalled = true - assert.Equal(t, value, "not in consensus group") + assert.Equal(t, "not in consensus group", value) } }, } @@ -641,7 +641,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasCalled = true - assert.Equal(t, value, "participant") + assert.Equal(t, "participant", value) } }, IncrementHandler: func(key string) { @@ -652,6 +652,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { } ch := make(chan bool, 1) consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("B") keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { return string(pkBytes) == consensusState.SelfPubKey() } @@ -695,15 +696,15 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasMetricConsensusStateCalled = true - assert.Equal(t, value, "proposer") + assert.Equal(t, "proposer", value) } if key == common.MetricConsensusRoundState { cntMetricConsensusRoundStateCalled++ switch cntMetricConsensusRoundStateCalled { case 1: - assert.Equal(t, value, "") + assert.Equal(t, "", value) case 2: - assert.Equal(t, value, "proposed") + assert.Equal(t, "proposed", value) default: assert.Fail(t, "should have been called only twice") } @@ -761,15 +762,15 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasMetricConsensusStateCalled = true - assert.Equal(t, value, "proposer") + assert.Equal(t, "proposer", value) } if key == common.MetricConsensusRoundState { cntMetricConsensusRoundStateCalled++ switch cntMetricConsensusRoundStateCalled { case 1: - assert.Equal(t, value, "") + assert.Equal(t, "", value) case 2: - assert.Equal(t, value, "proposed") + assert.Equal(t, "proposed", value) default: assert.Fail(t, "should have been called only twice") } diff --git a/consensus/spos/bls/v2/subroundStartRound_test.go b/consensus/spos/bls/v2/subroundStartRound_test.go index 7f9c03de7dd..d288e67b14b 100644 --- a/consensus/spos/bls/v2/subroundStartRound_test.go +++ b/consensus/spos/bls/v2/subroundStartRound_test.go @@ -506,7 +506,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasCalled = true - assert.Equal(t, value, "not in consensus group") + assert.Equal(t, "not in consensus group", value) } }, } @@ -602,7 +602,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasCalled = true - assert.Equal(t, value, "participant") + assert.Equal(t, "participant", value) } }, IncrementHandler: func(key string) { @@ -654,15 +654,15 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasMetricConsensusStateCalled = true - assert.Equal(t, value, "proposer") + assert.Equal(t, "proposer", value) } if key == common.MetricConsensusRoundState { cntMetricConsensusRoundStateCalled++ switch cntMetricConsensusRoundStateCalled { case 1: - assert.Equal(t, value, "") + assert.Equal(t, "", value) case 2: - assert.Equal(t, value, "proposed") + assert.Equal(t, "proposed", value) default: assert.Fail(t, "should have been called only twice") } @@ -717,15 +717,15 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasMetricConsensusStateCalled = true - assert.Equal(t, value, "proposer") + assert.Equal(t, "proposer", value) } if key == common.MetricConsensusRoundState { cntMetricConsensusRoundStateCalled++ switch cntMetricConsensusRoundStateCalled { case 1: - assert.Equal(t, value, "") + assert.Equal(t, "", value) case 2: - assert.Equal(t, value, "proposed") + assert.Equal(t, "proposed", value) default: assert.Fail(t, "should have been called only twice") } From b8ca2283bb06f67986b5a0740bf56ab4baaa4991 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Mon, 23 Sep 2024 13:44:15 +0300 Subject: [PATCH 09/30] extract common worker for v1 and v2 --- consensus/spos/bls/{v1 => }/blsWorker.go | 93 ++-- consensus/spos/bls/{v1 => }/blsWorker_test.go | 147 ++--- .../spos/bls/v1/blsSubroundsFactory_test.go | 35 +- consensus/spos/bls/v1/export_test.go | 2 - consensus/spos/bls/v1/subroundBlock_test.go | 27 +- .../spos/bls/v1/subroundEndRound_test.go | 23 +- .../spos/bls/v1/subroundSignature_test.go | 19 +- .../spos/bls/v1/subroundStartRound_test.go | 35 +- consensus/spos/bls/v2/benchmark_test.go | 5 +- .../v2/benchmark_verify_signatures_test.go | 3 +- .../spos/bls/v2/blsSubroundsFactory_test.go | 37 +- consensus/spos/bls/v2/blsWorker.go | 164 ------ consensus/spos/bls/v2/blsWorker_test.go | 505 ------------------ consensus/spos/bls/v2/export_test.go | 2 - consensus/spos/bls/v2/subroundBlock_test.go | 31 +- .../spos/bls/v2/subroundEndRound_test.go | 33 +- .../spos/bls/v2/subroundSignature_test.go | 33 +- .../spos/bls/v2/subroundStartRound_test.go | 37 +- consensus/spos/sposFactory/sposFactory.go | 2 +- consensus/spos/worker_test.go | 3 +- 20 files changed, 258 insertions(+), 978 deletions(-) rename consensus/spos/bls/{v1 => }/blsWorker.go (67%) rename consensus/spos/bls/{v1 => }/blsWorker_test.go (69%) delete mode 100644 consensus/spos/bls/v2/blsWorker.go delete mode 100644 consensus/spos/bls/v2/blsWorker_test.go diff --git a/consensus/spos/bls/v1/blsWorker.go b/consensus/spos/bls/blsWorker.go similarity index 67% rename from consensus/spos/bls/v1/blsWorker.go rename to consensus/spos/bls/blsWorker.go index b6e168d61c0..b8ceffe9122 100644 --- a/consensus/spos/bls/v1/blsWorker.go +++ b/consensus/spos/bls/blsWorker.go @@ -1,12 +1,11 @@ -package v1 +package bls import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" - "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) -// peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by +// PeerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by // following the next premises: // 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; // 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round @@ -17,15 +16,15 @@ import ( // // Validators only send one signature message in a round, treating the edge case of a delayed message, will need at most // 2 messages per round (which is ok as it is below the set value of 5) -const peerMaxMessagesPerSec = uint32(6) +const PeerMaxMessagesPerSec = uint32(6) -// defaultMaxNumOfMessageTypeAccepted represents the maximum number of the same message type accepted in one round to be +// DefaultMaxNumOfMessageTypeAccepted represents the maximum number of the same message type accepted in one round to be // received from the same public key for the default message types -const defaultMaxNumOfMessageTypeAccepted = uint32(1) +const DefaultMaxNumOfMessageTypeAccepted = uint32(1) -// maxNumOfMessageTypeSignatureAccepted represents the maximum number of the signature message type accepted in one round to be +// MaxNumOfMessageTypeSignatureAccepted represents the maximum number of the signature message type accepted in one round to be // received from the same public key -const maxNumOfMessageTypeSignatureAccepted = uint32(2) +const MaxNumOfMessageTypeSignatureAccepted = uint32(2) // worker defines the data needed by spos to communicate between nodes which are in the validators group type worker struct { @@ -41,88 +40,88 @@ func NewConsensusService() (*worker, error) { // InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { receivedMessages := make(map[consensus.MessageType][]*consensus.Message) - receivedMessages[bls.MtBlockBodyAndHeader] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockBody] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockHeader] = make([]*consensus.Message, 0) - receivedMessages[bls.MtSignature] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) - receivedMessages[bls.MtInvalidSigners] = make([]*consensus.Message, 0) + receivedMessages[MtBlockBodyAndHeader] = make([]*consensus.Message, 0) + receivedMessages[MtBlockBody] = make([]*consensus.Message, 0) + receivedMessages[MtBlockHeader] = make([]*consensus.Message, 0) + receivedMessages[MtSignature] = make([]*consensus.Message, 0) + receivedMessages[MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) + receivedMessages[MtInvalidSigners] = make([]*consensus.Message, 0) return receivedMessages } // GetMaxMessagesInARoundPerPeer returns the maximum number of messages a peer can send per round for BLS func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { - return peerMaxMessagesPerSec + return PeerMaxMessagesPerSec } // GetStringValue gets the name of the messageType func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { - return bls.GetStringValue(messageType) + return GetStringValue(messageType) } // GetSubroundName gets the subround name for the subround id provided func (wrk *worker) GetSubroundName(subroundId int) string { - return bls.GetSubroundName(subroundId) + return GetSubroundName(subroundId) } // IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { - return msgType == bls.MtBlockBodyAndHeader + return msgType == MtBlockBodyAndHeader } // IsMessageWithBlockBody returns if the current messageType is about block body func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { - return msgType == bls.MtBlockBody + return msgType == MtBlockBody } // IsMessageWithBlockHeader returns if the current messageType is about block header func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { - return msgType == bls.MtBlockHeader + return msgType == MtBlockHeader } // IsMessageWithSignature returns if the current messageType is about signature func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { - return msgType == bls.MtSignature + return msgType == MtSignature } // IsMessageWithFinalInfo returns if the current messageType is about header final info func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { - return msgType == bls.MtBlockHeaderFinalInfo + return msgType == MtBlockHeaderFinalInfo } // IsMessageWithInvalidSigners returns if the current messageType is about invalid signers func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { - return msgType == bls.MtInvalidSigners + return msgType == MtInvalidSigners } // IsMessageTypeValid returns if the current messageType is valid func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { - isMessageTypeValid := msgType == bls.MtBlockBodyAndHeader || - msgType == bls.MtBlockBody || - msgType == bls.MtBlockHeader || - msgType == bls.MtSignature || - msgType == bls.MtBlockHeaderFinalInfo || - msgType == bls.MtInvalidSigners + isMessageTypeValid := msgType == MtBlockBodyAndHeader || + msgType == MtBlockBody || + msgType == MtBlockHeader || + msgType == MtSignature || + msgType == MtBlockHeaderFinalInfo || + msgType == MtInvalidSigners return isMessageTypeValid } // IsSubroundSignature returns if the current subround is about signature func (wrk *worker) IsSubroundSignature(subroundId int) bool { - return subroundId == bls.SrSignature + return subroundId == SrSignature } // IsSubroundStartRound returns if the current subround is about start round func (wrk *worker) IsSubroundStartRound(subroundId int) bool { - return subroundId == bls.SrStartRound + return subroundId == SrStartRound } // GetMessageRange provides the MessageType range used in checks by the consensus func (wrk *worker) GetMessageRange() []consensus.MessageType { var v []consensus.MessageType - for i := bls.MtBlockBodyAndHeader; i <= bls.MtInvalidSigners; i++ { + for i := MtBlockBodyAndHeader; i <= MtInvalidSigners; i++ { v = append(v, i) } @@ -132,18 +131,18 @@ func (wrk *worker) GetMessageRange() []consensus.MessageType { // CanProceed returns if the current messageType can proceed further if previous subrounds finished func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { switch msgType { - case bls.MtBlockBodyAndHeader: - return consensusState.Status(bls.SrStartRound) == spos.SsFinished - case bls.MtBlockBody: - return consensusState.Status(bls.SrStartRound) == spos.SsFinished - case bls.MtBlockHeader: - return consensusState.Status(bls.SrStartRound) == spos.SsFinished - case bls.MtSignature: - return consensusState.Status(bls.SrBlock) == spos.SsFinished - case bls.MtBlockHeaderFinalInfo: - return consensusState.Status(bls.SrSignature) == spos.SsFinished - case bls.MtInvalidSigners: - return consensusState.Status(bls.SrSignature) == spos.SsFinished + case MtBlockBodyAndHeader: + return consensusState.Status(SrStartRound) == spos.SsFinished + case MtBlockBody: + return consensusState.Status(SrStartRound) == spos.SsFinished + case MtBlockHeader: + return consensusState.Status(SrStartRound) == spos.SsFinished + case MtSignature: + return consensusState.Status(SrBlock) == spos.SsFinished + case MtBlockHeaderFinalInfo: + return consensusState.Status(SrSignature) == spos.SsFinished + case MtInvalidSigners: + return consensusState.Status(SrSignature) == spos.SsFinished } return false @@ -151,11 +150,11 @@ func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType conse // GetMaxNumOfMessageTypeAccepted returns the maximum number of accepted consensus message types per round, per public key func (wrk *worker) GetMaxNumOfMessageTypeAccepted(msgType consensus.MessageType) uint32 { - if msgType == bls.MtSignature { - return maxNumOfMessageTypeSignatureAccepted + if msgType == MtSignature { + return MaxNumOfMessageTypeSignatureAccepted } - return defaultMaxNumOfMessageTypeAccepted + return DefaultMaxNumOfMessageTypeAccepted } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/bls/v1/blsWorker_test.go b/consensus/spos/bls/blsWorker_test.go similarity index 69% rename from consensus/spos/bls/v1/blsWorker_test.go rename to consensus/spos/bls/blsWorker_test.go index c8fd86162c0..8d39b02e5f1 100644 --- a/consensus/spos/bls/v1/blsWorker_test.go +++ b/consensus/spos/bls/blsWorker_test.go @@ -1,4 +1,4 @@ -package v1_test +package bls_test import ( "testing" @@ -9,70 +9,13 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" - v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" ) -func createEligibleList(size int) []string { - eligibleList := make([]string, 0) - for i := 0; i < size; i++ { - eligibleList = append(eligibleList, string([]byte{byte(i + 65)})) - } - return eligibleList -} - -func initConsensusState() *spos.ConsensusState { - return initConsensusStateWithKeysHandler(&testscommon.KeysHandlerStub{}) -} - -func initConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos.ConsensusState { - consensusGroupSize := 9 - eligibleList := createEligibleList(consensusGroupSize) - - eligibleNodesPubKeys := make(map[string]struct{}) - for _, key := range eligibleList { - eligibleNodesPubKeys[key] = struct{}{} - } - - indexLeader := 0 - rcns, _ := spos.NewRoundConsensus( - eligibleNodesPubKeys, - consensusGroupSize, - eligibleList[1], - keysHandler, - ) - - rcns.SetConsensusGroup(eligibleList) - rcns.SetLeader(eligibleList[indexLeader]) - rcns.ResetRoundState() - - pBFTThreshold := consensusGroupSize*2/3 + 1 - pBFTFallbackThreshold := consensusGroupSize*1/2 + 1 - - rthr := spos.NewRoundThreshold() - rthr.SetThreshold(1, 1) - rthr.SetThreshold(2, pBFTThreshold) - rthr.SetFallbackThreshold(1, 1) - rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) - - rstatus := spos.NewRoundStatus() - rstatus.ResetRoundStatus() - - cns := spos.NewConsensusState( - rcns, - rthr, - rstatus, - ) - - cns.Data = []byte("X") - cns.RoundIndex = 0 - return cns -} - func TestWorker_NewConsensusServiceShouldWork(t *testing.T) { t.Parallel() - service, err := v1.NewConsensusService() + service, err := bls.NewConsensusService() assert.Nil(t, err) assert.False(t, check.IfNil(service)) } @@ -80,7 +23,7 @@ func TestWorker_NewConsensusServiceShouldWork(t *testing.T) { func TestWorker_InitReceivedMessagesShouldWork(t *testing.T) { t.Parallel() - bnService, _ := v1.NewConsensusService() + bnService, _ := bls.NewConsensusService() messages := bnService.InitReceivedMessages() receivedMessages := make(map[consensus.MessageType][]*consensus.Message) @@ -104,7 +47,7 @@ func TestWorker_GetMessageRangeShouldWork(t *testing.T) { t.Parallel() v := make([]consensus.MessageType, 0) - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() messagesRange := blsService.GetMessageRange() assert.NotNil(t, messagesRange) @@ -122,9 +65,9 @@ func TestWorker_GetMessageRangeShouldWork(t *testing.T) { func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyAndHeaderShouldWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockBodyAndHeader) @@ -134,9 +77,9 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyAndHeaderShouldW func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyAndHeaderShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockBodyAndHeader) @@ -146,9 +89,9 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyAndHeaderShou func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyShouldWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockBody) @@ -158,9 +101,9 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyShouldWork(t *te func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockBody) @@ -170,9 +113,9 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyShouldNotWork func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockHeaderShouldWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeader) @@ -182,9 +125,9 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockHeaderShouldWork(t * func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockHeaderShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeader) @@ -194,9 +137,9 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockHeaderShouldNotWo func TestWorker_CanProceedWithSrBlockFinishedForMtBlockHeaderShouldWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrBlock, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtSignature) @@ -206,9 +149,9 @@ func TestWorker_CanProceedWithSrBlockFinishedForMtBlockHeaderShouldWork(t *testi func TestWorker_CanProceedWithSrBlockRoundNotFinishedForMtBlockHeaderShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrBlock, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtSignature) @@ -218,9 +161,9 @@ func TestWorker_CanProceedWithSrBlockRoundNotFinishedForMtBlockHeaderShouldNotWo func TestWorker_CanProceedWithSrSignatureFinishedForMtBlockHeaderFinalInfoShouldWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrSignature, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeaderFinalInfo) @@ -230,9 +173,9 @@ func TestWorker_CanProceedWithSrSignatureFinishedForMtBlockHeaderFinalInfoShould func TestWorker_CanProceedWithSrSignatureRoundNotFinishedForMtBlockHeaderFinalInfoShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrSignature, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeaderFinalInfo) @@ -242,8 +185,8 @@ func TestWorker_CanProceedWithSrSignatureRoundNotFinishedForMtBlockHeaderFinalIn func TestWorker_CanProceedWitUnkownMessageTypeShouldNotWork(t *testing.T) { t.Parallel() - blsService, _ := v1.NewConsensusService() - consensusState := initConsensusState() + blsService, _ := bls.NewConsensusService() + consensusState := initializers.InitConsensusState() canProceed := blsService.CanProceed(consensusState, -1) assert.False(t, canProceed) @@ -252,7 +195,7 @@ func TestWorker_CanProceedWitUnkownMessageTypeShouldNotWork(t *testing.T) { func TestWorker_GetSubroundName(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() r := service.GetSubroundName(bls.SrStartRound) assert.Equal(t, "(START_ROUND)", r) @@ -269,7 +212,7 @@ func TestWorker_GetSubroundName(t *testing.T) { func TestWorker_GetStringValue(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() r := service.GetStringValue(bls.MtBlockBodyAndHeader) assert.Equal(t, bls.BlockBodyAndHeaderStringValue, r) @@ -290,7 +233,7 @@ func TestWorker_GetStringValue(t *testing.T) { func TestWorker_IsMessageWithBlockBodyAndHeader(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsMessageWithBlockBodyAndHeader(bls.MtBlockBody) assert.False(t, ret) @@ -305,7 +248,7 @@ func TestWorker_IsMessageWithBlockBodyAndHeader(t *testing.T) { func TestWorker_IsMessageWithBlockBody(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsMessageWithBlockBody(bls.MtBlockHeader) assert.False(t, ret) @@ -317,7 +260,7 @@ func TestWorker_IsMessageWithBlockBody(t *testing.T) { func TestWorker_IsMessageWithBlockHeader(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsMessageWithBlockHeader(bls.MtBlockBody) assert.False(t, ret) @@ -329,7 +272,7 @@ func TestWorker_IsMessageWithBlockHeader(t *testing.T) { func TestWorker_IsMessageWithSignature(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsMessageWithSignature(bls.MtBlockBodyAndHeader) assert.False(t, ret) @@ -341,7 +284,7 @@ func TestWorker_IsMessageWithSignature(t *testing.T) { func TestWorker_IsMessageWithFinalInfo(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsMessageWithFinalInfo(bls.MtSignature) assert.False(t, ret) @@ -353,7 +296,7 @@ func TestWorker_IsMessageWithFinalInfo(t *testing.T) { func TestWorker_IsMessageWithInvalidSigners(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsMessageWithInvalidSigners(bls.MtBlockHeaderFinalInfo) assert.False(t, ret) @@ -365,7 +308,7 @@ func TestWorker_IsMessageWithInvalidSigners(t *testing.T) { func TestWorker_IsSubroundSignature(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsSubroundSignature(bls.SrEndRound) assert.False(t, ret) @@ -377,7 +320,7 @@ func TestWorker_IsSubroundSignature(t *testing.T) { func TestWorker_IsSubroundStartRound(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsSubroundStartRound(bls.SrSignature) assert.False(t, ret) @@ -389,7 +332,7 @@ func TestWorker_IsSubroundStartRound(t *testing.T) { func TestWorker_IsMessageTypeValid(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() ret := service.IsMessageTypeValid(bls.MtBlockBody) assert.True(t, ret) @@ -401,19 +344,19 @@ func TestWorker_IsMessageTypeValid(t *testing.T) { func TestWorker_GetMaxNumOfMessageTypeAccepted(t *testing.T) { t.Parallel() - service, _ := v1.NewConsensusService() + service, _ := bls.NewConsensusService() t.Run("message type signature", func(t *testing.T) { t.Parallel() - assert.Equal(t, v1.MaxNumOfMessageTypeSignatureAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtSignature)) + assert.Equal(t, bls.MaxNumOfMessageTypeSignatureAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtSignature)) }) t.Run("other message types", func(t *testing.T) { t.Parallel() - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtUnknown)) - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBody)) - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeader)) - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBodyAndHeader)) - assert.Equal(t, v1.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeaderFinalInfo)) + assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtUnknown)) + assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBody)) + assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeader)) + assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBodyAndHeader)) + assert.Equal(t, bls.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeaderFinalInfo)) }) } diff --git a/consensus/spos/bls/v1/blsSubroundsFactory_test.go b/consensus/spos/bls/v1/blsSubroundsFactory_test.go index 3024eb79de0..b5c9e6c4d03 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/testscommon" consensusMock "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -71,7 +72,7 @@ func initWorker() spos.WorkerHandler { func initFactoryWithContainer(container *consensusMock.ConsensusCoreMock) v1.Factory { worker := initWorker() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() fct, _ := v1.NewSubroundsFactory( container, @@ -119,7 +120,7 @@ func TestFactory_GetMessageTypeName(t *testing.T) { func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() worker := initWorker() fct, err := v1.NewSubroundsFactory( @@ -159,7 +160,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetBlockchain(nil) @@ -181,7 +182,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetBlockProcessor(nil) @@ -203,7 +204,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetBootStrapper(nil) @@ -225,7 +226,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetChronology(nil) @@ -247,7 +248,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetHasher(nil) @@ -269,7 +270,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetMarshalizer(nil) @@ -291,7 +292,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetMultiSignerContainer(nil) @@ -313,7 +314,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetRoundHandler(nil) @@ -335,7 +336,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetShardCoordinator(nil) @@ -357,7 +358,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetSyncTimer(nil) @@ -379,7 +380,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() container.SetValidatorGroupSelector(nil) @@ -401,7 +402,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() fct, err := v1.NewSubroundsFactory( @@ -421,7 +422,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() @@ -442,7 +443,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() @@ -471,7 +472,7 @@ func TestFactory_NewFactoryShouldWork(t *testing.T) { func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMock.InitConsensusCore() worker := initWorker() diff --git a/consensus/spos/bls/v1/export_test.go b/consensus/spos/bls/v1/export_test.go index 452f9bb0d04..6cb39895b7e 100644 --- a/consensus/spos/bls/v1/export_test.go +++ b/consensus/spos/bls/v1/export_test.go @@ -20,8 +20,6 @@ import ( ) const ProcessingThresholdPercent = processingThresholdPercent -const DefaultMaxNumOfMessageTypeAccepted = defaultMaxNumOfMessageTypeAccepted -const MaxNumOfMessageTypeSignatureAccepted = maxNumOfMessageTypeSignatureAccepted // factory diff --git a/consensus/spos/bls/v1/subroundBlock_test.go b/consensus/spos/bls/v1/subroundBlock_test.go index 44bd8ad813b..16dbc95aebb 100644 --- a/consensus/spos/bls/v1/subroundBlock_test.go +++ b/consensus/spos/bls/v1/subroundBlock_test.go @@ -19,6 +19,7 @@ import ( v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/testscommon" consensusMock "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" @@ -101,7 +102,7 @@ func initSubroundBlock( } } - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) container.SetBlockchain(blockChain) @@ -139,7 +140,7 @@ func initSubroundBlockWithBlockProcessor( container.SetBlockchain(blockChain) container.SetBlockProcessor(blockProcessorMock) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -163,7 +164,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockchainShouldFail(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -179,7 +180,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -194,7 +195,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -209,7 +210,7 @@ func TestSubroundBlock_NewSubroundBlockNilHasherShouldFail(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -224,7 +225,7 @@ func TestSubroundBlock_NewSubroundBlockNilMarshalizerShouldFail(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -239,7 +240,7 @@ func TestSubroundBlock_NewSubroundBlockNilMultiSignerContainerShouldFail(t *test t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -254,7 +255,7 @@ func TestSubroundBlock_NewSubroundBlockNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -269,7 +270,7 @@ func TestSubroundBlock_NewSubroundBlockNilShardCoordinatorShouldFail(t *testing. t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -284,7 +285,7 @@ func TestSubroundBlock_NewSubroundBlockNilSyncTimerShouldFail(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -299,7 +300,7 @@ func TestSubroundBlock_NewSubroundBlockShouldWork(t *testing.T) { t.Parallel() container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) srBlock, err := defaultSubroundBlockFromSubround(sr) @@ -1130,7 +1131,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDurationWithZeroDurationShould container := consensusMock.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) diff --git a/consensus/spos/bls/v1/subroundEndRound_test.go b/consensus/spos/bls/v1/subroundEndRound_test.go index 6cc7cbc75ff..d1d2e920fdc 100644 --- a/consensus/spos/bls/v1/subroundEndRound_test.go +++ b/consensus/spos/bls/v1/subroundEndRound_test.go @@ -25,6 +25,7 @@ import ( "github.com/multiversx/mx-chain-go/p2p/factory" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -34,7 +35,7 @@ func initSubroundEndRoundWithContainer( appStatusHandler core.AppStatusHandler, ) v1.SubroundEndRound { ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -72,7 +73,7 @@ func TestNewSubroundEndRound(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( bls.SrSignature, @@ -156,7 +157,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -192,7 +193,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -228,7 +229,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -265,7 +266,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -301,7 +302,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -337,7 +338,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -373,7 +374,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -1353,7 +1354,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1725,7 +1726,7 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, diff --git a/consensus/spos/bls/v1/subroundSignature_test.go b/consensus/spos/bls/v1/subroundSignature_test.go index e13bb1ccfac..d9eb9260f46 100644 --- a/consensus/spos/bls/v1/subroundSignature_test.go +++ b/consensus/spos/bls/v1/subroundSignature_test.go @@ -15,11 +15,12 @@ import ( v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreMock) v1.SubroundSignature { - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -57,7 +58,7 @@ func TestNewSubroundSignature(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -134,7 +135,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -169,7 +170,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -203,7 +204,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -237,7 +238,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -272,7 +273,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -306,7 +307,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -385,7 +386,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true diff --git a/consensus/spos/bls/v1/subroundStartRound_test.go b/consensus/spos/bls/v1/subroundStartRound_test.go index 2afeafcbdd8..d343cf75266 100644 --- a/consensus/spos/bls/v1/subroundStartRound_test.go +++ b/consensus/spos/bls/v1/subroundStartRound_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -70,7 +71,7 @@ func defaultSubround( } func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) v1.SubroundStartRound { - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) srStartRound, _ := v1.NewSubroundStartRound( @@ -94,7 +95,7 @@ func TestNewSubroundStartRound(t *testing.T) { t.Parallel() ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensusMocks.InitConsensusCore() sr, _ := spos.NewSubround( -1, @@ -197,7 +198,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilBlockChainShouldFail(t *test container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -213,7 +214,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilBootstrapperShouldFail(t *te container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -228,7 +229,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -245,7 +246,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerContainerShouldFa container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -261,7 +262,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilRoundHandlerShouldFail(t *te container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -277,7 +278,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilSyncTimerShouldFail(t *testi container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -293,7 +294,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilValidatorGroupSelectorShould container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -309,7 +310,7 @@ func TestSubroundStartRound_NewSubroundStartRoundShouldWork(t *testing.T) { container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -325,7 +326,7 @@ func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -480,7 +481,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenIsNotInTheConsen t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetSelfPubKey(consensusState.SelfPubKey() + "X") ch := make(chan bool, 1) @@ -546,7 +547,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) consensusState.SetSelfPubKey("not in consensus") sr, _ := spos.NewSubround( -1, @@ -600,7 +601,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) consensusState.SetSelfPubKey("B") sr, _ := spos.NewSubround( -1, @@ -651,7 +652,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) consensusState.SetSelfPubKey("B") keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { return string(pkBytes) == consensusState.SelfPubKey() @@ -717,7 +718,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() consensusState.SetSelfPubKey(leader) @@ -783,7 +784,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() consensusState.SetSelfPubKey(leader) keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { diff --git a/consensus/spos/bls/v2/benchmark_test.go b/consensus/spos/bls/v2/benchmark_test.go index 24edc6355a7..b48058eef56 100644 --- a/consensus/spos/bls/v2/benchmark_test.go +++ b/consensus/spos/bls/v2/benchmark_test.go @@ -23,6 +23,7 @@ import ( nodeMock "github.com/multiversx/mx-chain-go/node/mock" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" @@ -74,7 +75,7 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number } args := cryptoFactory.ArgsSigningHandler{ - PubKeys: createEligibleListFromMap(mapKeys), + PubKeys: initializers.CreateEligibleListFromMap(mapKeys), MultiSignerContainer: &cryptoMocks.MultiSignerContainerStub{ GetMultiSignerCalled: func(epoch uint32) (crypto.MultiSigner, error) { return multiSigHandler, nil @@ -87,7 +88,7 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number require.Nil(b, err) container.SetSigningHandler(signingHandler) - consensusState := initConsensusStateWithArgs(keysHandlerMock, mapKeys) + consensusState := initializers.InitConsensusStateWithArgs(keysHandlerMock, mapKeys) ch := make(chan bool, 1) sr, _ := spos.NewSubround( diff --git a/consensus/spos/bls/v2/benchmark_verify_signatures_test.go b/consensus/spos/bls/v2/benchmark_verify_signatures_test.go index da27f6570e4..09a276dc3a3 100644 --- a/consensus/spos/bls/v2/benchmark_verify_signatures_test.go +++ b/consensus/spos/bls/v2/benchmark_verify_signatures_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos/bls" dataRetrieverMocks "github.com/multiversx/mx-chain-go/dataRetriever/mock" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/common" factoryCrypto "github.com/multiversx/mx-chain-go/factory/crypto" @@ -102,7 +103,7 @@ func BenchmarkSubroundEndRound_VerifyNodesOnAggSigFailTime(b *testing.B) { require.Nil(b, err) container.SetSigningHandler(signingHandler) - consensusState := initConsensusStateWithArgsVerifySignature(keysHandlerMock, keys) + consensusState := initializers.InitConsensusStateWithArgsVerifySignature(keysHandlerMock, keys) dataToBeSigned := []byte("message") consensusState.Data = dataToBeSigned diff --git a/consensus/spos/bls/v2/blsSubroundsFactory_test.go b/consensus/spos/bls/v2/blsSubroundsFactory_test.go index 577d72f070d..a3ac6ed432e 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/testscommon" testscommonConsensus "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -60,7 +61,7 @@ func initWorker() spos.WorkerHandler { func initFactoryWithContainer(container *testscommonConsensus.ConsensusCoreMock) v2.Factory { worker := initWorker() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() fct, _ := v2.NewSubroundsFactory( container, @@ -109,7 +110,7 @@ func TestFactory_GetMessageTypeName(t *testing.T) { func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() worker := initWorker() fct, err := v2.NewSubroundsFactory( @@ -151,7 +152,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetBlockchain(nil) @@ -174,7 +175,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetBlockProcessor(nil) @@ -197,7 +198,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetBootStrapper(nil) @@ -220,7 +221,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetChronology(nil) @@ -243,7 +244,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetHasher(nil) @@ -266,7 +267,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetMarshalizer(nil) @@ -289,7 +290,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetMultiSignerContainer(nil) @@ -312,7 +313,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetRoundHandler(nil) @@ -335,7 +336,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetShardCoordinator(nil) @@ -358,7 +359,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetSyncTimer(nil) @@ -381,7 +382,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetValidatorGroupSelector(nil) @@ -404,7 +405,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() fct, err := v2.NewSubroundsFactory( @@ -425,7 +426,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() @@ -447,7 +448,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() @@ -469,7 +470,7 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilThrottlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() @@ -499,7 +500,7 @@ func TestFactory_NewFactoryShouldWork(t *testing.T) { func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() diff --git a/consensus/spos/bls/v2/blsWorker.go b/consensus/spos/bls/v2/blsWorker.go deleted file mode 100644 index c627ff1af76..00000000000 --- a/consensus/spos/bls/v2/blsWorker.go +++ /dev/null @@ -1,164 +0,0 @@ -package v2 - -import ( - "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/spos" - "github.com/multiversx/mx-chain-go/consensus/spos/bls" -) - -// peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by -// following the next premises: -// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; -// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round -// adds an extra 1 to the total value, reaching value 4; -// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly -// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. -// 4. If we consider the forks that can appear on the system wee need to add one more to the value. -// -// Validators only send one signature message in a round, treating the edge case of a delayed message, will need at most -// 2 messages per round (which is ok as it is below the set value of 5) -const peerMaxMessagesPerSec = uint32(6) - -// defaultMaxNumOfMessageTypeAccepted represents the maximum number of the same message type accepted in one round to be -// received from the same public key for the default message types -const defaultMaxNumOfMessageTypeAccepted = uint32(1) - -// maxNumOfMessageTypeSignatureAccepted represents the maximum number of the signature message type accepted in one round to be -// received from the same public key -const maxNumOfMessageTypeSignatureAccepted = uint32(2) - -// worker defines the data needed by spos to communicate between nodes which are in the validators group -type worker struct { -} - -// NewConsensusService creates a new worker object -func NewConsensusService() (*worker, error) { - wrk := worker{} - - return &wrk, nil -} - -// InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService -func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { - receivedMessages := make(map[consensus.MessageType][]*consensus.Message) - receivedMessages[bls.MtBlockBodyAndHeader] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockBody] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockHeader] = make([]*consensus.Message, 0) - receivedMessages[bls.MtSignature] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) - receivedMessages[bls.MtInvalidSigners] = make([]*consensus.Message, 0) - - return receivedMessages -} - -// GetMaxMessagesInARoundPerPeer returns the maximum number of messages a peer can send per round for BLS -func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { - return peerMaxMessagesPerSec -} - -// GetStringValue gets the name of the messageType -func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { - return bls.GetStringValue(messageType) -} - -// GetSubroundName gets the subround name for the subround id provided -func (wrk *worker) GetSubroundName(subroundId int) string { - return bls.GetSubroundName(subroundId) -} - -// IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header -func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { - return msgType == bls.MtBlockBodyAndHeader -} - -// IsMessageWithBlockBody returns if the current messageType is about block body -func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { - return msgType == bls.MtBlockBody -} - -// IsMessageWithBlockHeader returns if the current messageType is about block header -func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { - return msgType == bls.MtBlockHeader -} - -// IsMessageWithSignature returns if the current messageType is about signature -func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { - return msgType == bls.MtSignature -} - -// IsMessageWithFinalInfo returns if the current messageType is about header final info -func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { - return msgType == bls.MtBlockHeaderFinalInfo -} - -// IsMessageWithInvalidSigners returns if the current messageType is about invalid signers -func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { - return msgType == bls.MtInvalidSigners -} - -// IsMessageTypeValid returns if the current messageType is valid -func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { - isMessageTypeValid := msgType == bls.MtBlockBodyAndHeader || - msgType == bls.MtBlockBody || - msgType == bls.MtBlockHeader || - msgType == bls.MtSignature || - msgType == bls.MtBlockHeaderFinalInfo || - msgType == bls.MtInvalidSigners - - return isMessageTypeValid -} - -// IsSubroundSignature returns if the current subround is about signature -func (wrk *worker) IsSubroundSignature(subroundId int) bool { - return subroundId == bls.SrSignature -} - -// IsSubroundStartRound returns if the current subround is about start round -func (wrk *worker) IsSubroundStartRound(subroundId int) bool { - return subroundId == bls.SrStartRound -} - -// GetMessageRange provides the MessageType range used in checks by the consensus -func (wrk *worker) GetMessageRange() []consensus.MessageType { - var v []consensus.MessageType - - for i := bls.MtBlockBodyAndHeader; i <= bls.MtInvalidSigners; i++ { - v = append(v, i) - } - - return v -} - -// CanProceed returns if the current messageType can proceed further if previous subrounds finished -func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { - switch msgType { - case bls.MtBlockBodyAndHeader: - return consensusState.Status(bls.SrStartRound) == spos.SsFinished - case bls.MtBlockBody: - return consensusState.Status(bls.SrStartRound) == spos.SsFinished - case bls.MtBlockHeader: - return consensusState.Status(bls.SrStartRound) == spos.SsFinished - case bls.MtSignature: - return consensusState.Status(bls.SrBlock) == spos.SsFinished - case bls.MtBlockHeaderFinalInfo: - return consensusState.Status(bls.SrSignature) == spos.SsFinished - case bls.MtInvalidSigners: - return consensusState.Status(bls.SrSignature) == spos.SsFinished - } - - return false -} - -// GetMaxNumOfMessageTypeAccepted returns the maximum number of accepted consensus message types per round, per public key -func (wrk *worker) GetMaxNumOfMessageTypeAccepted(msgType consensus.MessageType) uint32 { - if msgType == bls.MtSignature { - return maxNumOfMessageTypeSignatureAccepted - } - - return defaultMaxNumOfMessageTypeAccepted -} - -// IsInterfaceNil returns true if there is no value under the interface -func (wrk *worker) IsInterfaceNil() bool { - return wrk == nil -} diff --git a/consensus/spos/bls/v2/blsWorker_test.go b/consensus/spos/bls/v2/blsWorker_test.go deleted file mode 100644 index 334ed6bbf43..00000000000 --- a/consensus/spos/bls/v2/blsWorker_test.go +++ /dev/null @@ -1,505 +0,0 @@ -package v2_test - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - crypto "github.com/multiversx/mx-chain-crypto-go" - "github.com/stretchr/testify/assert" - "golang.org/x/exp/slices" - - "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/spos" - "github.com/multiversx/mx-chain-go/consensus/spos/bls" - v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - "github.com/multiversx/mx-chain-go/testscommon" -) - -func createEligibleList(size int) []string { - eligibleList := make([]string, 0) - for i := 0; i < size; i++ { - eligibleList = append(eligibleList, string([]byte{byte(i + 65)})) - } - return eligibleList -} - -func createEligibleListFromMap(mapKeys map[string]crypto.PrivateKey) []string { - eligibleList := make([]string, 0, len(mapKeys)) - for key := range mapKeys { - eligibleList = append(eligibleList, key) - } - slices.Sort(eligibleList) - return eligibleList -} - -func initConsensusStateWithNodesCoordinator(validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { - return initConsensusStateWithKeysHandlerAndNodesCoordinator(&testscommon.KeysHandlerStub{}, validatorsGroupSelector) -} - -func initConsensusState() *spos.ConsensusState { - return initConsensusStateWithKeysHandler(&testscommon.KeysHandlerStub{}) -} - -func initConsensusStateWithArgs(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { - return initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler, mapKeys) -} - -func initConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos.ConsensusState { - consensusGroupSize := 9 - return initConsensusStateWithKeysHandlerWithGroupSize(keysHandler, consensusGroupSize) -} - -func initConsensusStateWithKeysHandlerAndNodesCoordinator(keysHandler consensus.KeysHandler, validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { - leader, consensusValidators, _ := validatorsGroupSelector.GetConsensusValidatorsPublicKeys([]byte("randomness"), 0, 0, 0) - eligibleNodesPubKeys := make(map[string]struct{}) - for _, key := range consensusValidators { - eligibleNodesPubKeys[key] = struct{}{} - } - return createConsensusStateWithNodes(eligibleNodesPubKeys, consensusValidators, leader, keysHandler) -} - -func initConsensusStateWithArgsVerifySignature(keysHandler consensus.KeysHandler, keys []string) *spos.ConsensusState { - numberOfKeys := len(keys) - eligibleNodesPubKeys := make(map[string]struct{}, numberOfKeys) - for _, key := range keys { - eligibleNodesPubKeys[key] = struct{}{} - } - - indexLeader := 1 - rcns, _ := spos.NewRoundConsensus( - eligibleNodesPubKeys, - numberOfKeys, - keys[indexLeader], - keysHandler, - ) - rcns.SetConsensusGroup(keys) - rcns.ResetRoundState() - - pBFTThreshold := numberOfKeys*2/3 + 1 - pBFTFallbackThreshold := numberOfKeys*1/2 + 1 - rthr := spos.NewRoundThreshold() - rthr.SetThreshold(1, 1) - rthr.SetThreshold(2, pBFTThreshold) - rthr.SetFallbackThreshold(1, 1) - rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) - - rstatus := spos.NewRoundStatus() - rstatus.ResetRoundStatus() - cns := spos.NewConsensusState( - rcns, - rthr, - rstatus, - ) - cns.Data = []byte("X") - cns.RoundIndex = 0 - - return cns -} - -func initConsensusStateWithKeysHandlerWithGroupSize(keysHandler consensus.KeysHandler, consensusGroupSize int) *spos.ConsensusState { - eligibleList := createEligibleList(consensusGroupSize) - - eligibleNodesPubKeys := make(map[string]struct{}) - for _, key := range eligibleList { - eligibleNodesPubKeys[key] = struct{}{} - } - - return createConsensusStateWithNodes(eligibleNodesPubKeys, eligibleList, eligibleList[0], keysHandler) -} - -func initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { - eligibleList := createEligibleListFromMap(mapKeys) - - eligibleNodesPubKeys := make(map[string]struct{}, len(eligibleList)) - for _, key := range eligibleList { - eligibleNodesPubKeys[key] = struct{}{} - } - - return createConsensusStateWithNodes(eligibleNodesPubKeys, eligibleList, eligibleList[0], keysHandler) -} - -func createConsensusStateWithNodes(eligibleNodesPubKeys map[string]struct{}, consensusValidators []string, leader string, keysHandler consensus.KeysHandler) *spos.ConsensusState { - consensusGroupSize := len(consensusValidators) - rcns, _ := spos.NewRoundConsensus( - eligibleNodesPubKeys, - consensusGroupSize, - consensusValidators[1], - keysHandler, - ) - - rcns.SetConsensusGroup(consensusValidators) - rcns.SetLeader(leader) - rcns.ResetRoundState() - - pBFTThreshold := consensusGroupSize*2/3 + 1 - pBFTFallbackThreshold := consensusGroupSize*1/2 + 1 - - rthr := spos.NewRoundThreshold() - rthr.SetThreshold(1, 1) - rthr.SetThreshold(2, pBFTThreshold) - rthr.SetFallbackThreshold(1, 1) - rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) - - rstatus := spos.NewRoundStatus() - rstatus.ResetRoundStatus() - - cns := spos.NewConsensusState( - rcns, - rthr, - rstatus, - ) - - cns.Data = []byte("X") - cns.RoundIndex = 0 - return cns -} - -func TestWorker_NewConsensusServiceShouldWork(t *testing.T) { - t.Parallel() - - service, err := v2.NewConsensusService() - assert.Nil(t, err) - assert.False(t, check.IfNil(service)) -} - -func TestWorker_InitReceivedMessagesShouldWork(t *testing.T) { - t.Parallel() - - bnService, _ := v2.NewConsensusService() - messages := bnService.InitReceivedMessages() - - receivedMessages := make(map[consensus.MessageType][]*consensus.Message) - receivedMessages[bls.MtBlockBodyAndHeader] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockBody] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockHeader] = make([]*consensus.Message, 0) - receivedMessages[bls.MtSignature] = make([]*consensus.Message, 0) - receivedMessages[bls.MtBlockHeaderFinalInfo] = make([]*consensus.Message, 0) - receivedMessages[bls.MtInvalidSigners] = make([]*consensus.Message, 0) - - assert.Equal(t, len(receivedMessages), len(messages)) - assert.NotNil(t, messages[bls.MtBlockBodyAndHeader]) - assert.NotNil(t, messages[bls.MtBlockBody]) - assert.NotNil(t, messages[bls.MtBlockHeader]) - assert.NotNil(t, messages[bls.MtSignature]) - assert.NotNil(t, messages[bls.MtBlockHeaderFinalInfo]) - assert.NotNil(t, messages[bls.MtInvalidSigners]) -} - -func TestWorker_GetMessageRangeShouldWork(t *testing.T) { - t.Parallel() - - v := make([]consensus.MessageType, 0) - blsService, _ := v2.NewConsensusService() - - messagesRange := blsService.GetMessageRange() - assert.NotNil(t, messagesRange) - - for i := bls.MtBlockBodyAndHeader; i <= bls.MtInvalidSigners; i++ { - v = append(v, i) - } - assert.NotNil(t, v) - - for i, val := range messagesRange { - assert.Equal(t, v[i], val) - } -} - -func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyAndHeaderShouldWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtBlockBodyAndHeader) - assert.True(t, canProceed) -} - -func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyAndHeaderShouldNotWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtBlockBodyAndHeader) - assert.False(t, canProceed) -} - -func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyShouldWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtBlockBody) - assert.True(t, canProceed) -} - -func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyShouldNotWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtBlockBody) - assert.False(t, canProceed) -} - -func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockHeaderShouldWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeader) - assert.True(t, canProceed) -} - -func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockHeaderShouldNotWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeader) - assert.False(t, canProceed) -} - -func TestWorker_CanProceedWithSrBlockFinishedForMtBlockHeaderShouldWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrBlock, spos.SsFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtSignature) - assert.True(t, canProceed) -} - -func TestWorker_CanProceedWithSrBlockRoundNotFinishedForMtBlockHeaderShouldNotWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrBlock, spos.SsNotFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtSignature) - assert.False(t, canProceed) -} - -func TestWorker_CanProceedWithSrSignatureFinishedForMtBlockHeaderFinalInfoShouldWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrSignature, spos.SsFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeaderFinalInfo) - assert.True(t, canProceed) -} - -func TestWorker_CanProceedWithSrSignatureRoundNotFinishedForMtBlockHeaderFinalInfoShouldNotWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - - consensusState := initConsensusState() - consensusState.SetStatus(bls.SrSignature, spos.SsNotFinished) - - canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeaderFinalInfo) - assert.False(t, canProceed) -} - -func TestWorker_CanProceedWitUnkownMessageTypeShouldNotWork(t *testing.T) { - t.Parallel() - - blsService, _ := v2.NewConsensusService() - consensusState := initConsensusState() - - canProceed := blsService.CanProceed(consensusState, -1) - assert.False(t, canProceed) -} - -func TestWorker_GetSubroundName(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - r := service.GetSubroundName(bls.SrStartRound) - assert.Equal(t, "(START_ROUND)", r) - r = service.GetSubroundName(bls.SrBlock) - assert.Equal(t, "(BLOCK)", r) - r = service.GetSubroundName(bls.SrSignature) - assert.Equal(t, "(SIGNATURE)", r) - r = service.GetSubroundName(bls.SrEndRound) - assert.Equal(t, "(END_ROUND)", r) - r = service.GetSubroundName(-1) - assert.Equal(t, "Undefined subround", r) -} - -func TestWorker_GetStringValue(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - r := service.GetStringValue(bls.MtBlockBodyAndHeader) - assert.Equal(t, bls.BlockBodyAndHeaderStringValue, r) - r = service.GetStringValue(bls.MtBlockBody) - assert.Equal(t, bls.BlockBodyStringValue, r) - r = service.GetStringValue(bls.MtBlockHeader) - assert.Equal(t, bls.BlockHeaderStringValue, r) - r = service.GetStringValue(bls.MtSignature) - assert.Equal(t, bls.BlockSignatureStringValue, r) - r = service.GetStringValue(bls.MtBlockHeaderFinalInfo) - assert.Equal(t, bls.BlockHeaderFinalInfoStringValue, r) - r = service.GetStringValue(bls.MtUnknown) - assert.Equal(t, bls.BlockUnknownStringValue, r) - r = service.GetStringValue(-1) - assert.Equal(t, bls.BlockDefaultStringValue, r) -} - -func TestWorker_IsMessageWithBlockBodyAndHeader(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsMessageWithBlockBodyAndHeader(bls.MtBlockBody) - assert.False(t, ret) - - ret = service.IsMessageWithBlockBodyAndHeader(bls.MtBlockHeader) - assert.False(t, ret) - - ret = service.IsMessageWithBlockBodyAndHeader(bls.MtBlockBodyAndHeader) - assert.True(t, ret) -} - -func TestWorker_IsMessageWithBlockBody(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsMessageWithBlockBody(bls.MtBlockHeader) - assert.False(t, ret) - - ret = service.IsMessageWithBlockBody(bls.MtBlockBody) - assert.True(t, ret) -} - -func TestWorker_IsMessageWithBlockHeader(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsMessageWithBlockHeader(bls.MtBlockBody) - assert.False(t, ret) - - ret = service.IsMessageWithBlockHeader(bls.MtBlockHeader) - assert.True(t, ret) -} - -func TestWorker_IsMessageWithSignature(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsMessageWithSignature(bls.MtBlockBodyAndHeader) - assert.False(t, ret) - - ret = service.IsMessageWithSignature(bls.MtSignature) - assert.True(t, ret) -} - -func TestWorker_IsMessageWithFinalInfo(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsMessageWithFinalInfo(bls.MtSignature) - assert.False(t, ret) - - ret = service.IsMessageWithFinalInfo(bls.MtBlockHeaderFinalInfo) - assert.True(t, ret) -} - -func TestWorker_IsMessageWithInvalidSigners(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsMessageWithInvalidSigners(bls.MtBlockHeaderFinalInfo) - assert.False(t, ret) - - ret = service.IsMessageWithInvalidSigners(bls.MtInvalidSigners) - assert.True(t, ret) -} - -func TestWorker_IsSubroundSignature(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsSubroundSignature(bls.SrEndRound) - assert.False(t, ret) - - ret = service.IsSubroundSignature(bls.SrSignature) - assert.True(t, ret) -} - -func TestWorker_IsSubroundStartRound(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsSubroundStartRound(bls.SrSignature) - assert.False(t, ret) - - ret = service.IsSubroundStartRound(bls.SrStartRound) - assert.True(t, ret) -} - -func TestWorker_IsMessageTypeValid(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - - ret := service.IsMessageTypeValid(bls.MtBlockBody) - assert.True(t, ret) - - ret = service.IsMessageTypeValid(666) - assert.False(t, ret) -} - -func TestWorker_GetMaxNumOfMessageTypeAccepted(t *testing.T) { - t.Parallel() - - service, _ := v2.NewConsensusService() - t.Run("message type signature", func(t *testing.T) { - t.Parallel() - - assert.Equal(t, v2.MaxNumOfMessageTypeSignatureAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtSignature)) - }) - t.Run("other message types", func(t *testing.T) { - t.Parallel() - - assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtUnknown)) - assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBody)) - assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeader)) - assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockBodyAndHeader)) - assert.Equal(t, v2.DefaultMaxNumOfMessageTypeAccepted, service.GetMaxNumOfMessageTypeAccepted(bls.MtBlockHeaderFinalInfo)) - }) -} diff --git a/consensus/spos/bls/v2/export_test.go b/consensus/spos/bls/v2/export_test.go index e3c1d9caff1..72bdfb1790d 100644 --- a/consensus/spos/bls/v2/export_test.go +++ b/consensus/spos/bls/v2/export_test.go @@ -20,8 +20,6 @@ import ( ) const ProcessingThresholdPercent = processingThresholdPercent -const DefaultMaxNumOfMessageTypeAccepted = defaultMaxNumOfMessageTypeAccepted -const MaxNumOfMessageTypeSignatureAccepted = maxNumOfMessageTypeSignatureAccepted // factory diff --git a/consensus/spos/bls/v2/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go index b1163137262..663a3ece1d7 100644 --- a/consensus/spos/bls/v2/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -21,6 +21,7 @@ import ( v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -106,7 +107,7 @@ func initSubroundBlock( } } - consensusState := initConsensusStateWithNodesCoordinator(container.NodesCoordinator()) + consensusState := initializers.InitConsensusStateWithNodesCoordinator(container.NodesCoordinator()) ch := make(chan bool, 1) container.SetBlockchain(blockChain) @@ -144,7 +145,7 @@ func initSubroundBlockWithBlockProcessor( container.SetBlockchain(blockChain) container.SetBlockProcessor(blockProcessorMock) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -168,7 +169,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockchainShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -184,7 +185,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -199,7 +200,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -214,7 +215,7 @@ func TestSubroundBlock_NewSubroundBlockNilHasherShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -229,7 +230,7 @@ func TestSubroundBlock_NewSubroundBlockNilMarshalizerShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -244,7 +245,7 @@ func TestSubroundBlock_NewSubroundBlockNilMultiSignerContainerShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -259,7 +260,7 @@ func TestSubroundBlock_NewSubroundBlockNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -274,7 +275,7 @@ func TestSubroundBlock_NewSubroundBlockNilShardCoordinatorShouldFail(t *testing. t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -289,7 +290,7 @@ func TestSubroundBlock_NewSubroundBlockNilSyncTimerShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -304,7 +305,7 @@ func TestSubroundBlock_NewSubroundBlockNilWorkerShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -322,7 +323,7 @@ func TestSubroundBlock_NewSubroundBlockShouldWork(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) srBlock, err := defaultSubroundBlockFromSubround(sr) @@ -480,7 +481,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { } container.SetBlockchain(chainHandler) - consensusState := initConsensusStateWithNodesCoordinator(container.NodesCoordinator()) + consensusState := initializers.InitConsensusStateWithNodesCoordinator(container.NodesCoordinator()) ch := make(chan bool, 1) baseSr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -1423,7 +1424,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDurationWithZeroDurationShould container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) diff --git a/consensus/spos/bls/v2/subroundEndRound_test.go b/consensus/spos/bls/v2/subroundEndRound_test.go index 98edb65e825..68c12d31674 100644 --- a/consensus/spos/bls/v2/subroundEndRound_test.go +++ b/consensus/spos/bls/v2/subroundEndRound_test.go @@ -29,6 +29,7 @@ import ( "github.com/multiversx/mx-chain-go/p2p/factory" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" @@ -40,7 +41,7 @@ func initSubroundEndRoundWithContainer( appStatusHandler core.AppStatusHandler, ) v2.SubroundEndRound { ch := make(chan bool, 1) - consensusState := initConsensusStateWithNodesCoordinator(container.NodesCoordinator()) + consensusState := initializers.InitConsensusStateWithNodesCoordinator(container.NodesCoordinator()) sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -123,7 +124,7 @@ func TestNewSubroundEndRound(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( bls.SrSignature, @@ -207,7 +208,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -243,7 +244,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -279,7 +280,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -316,7 +317,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -352,7 +353,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -388,7 +389,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -424,7 +425,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilThrottlerShouldFail(t *testing.T t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -460,7 +461,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -1056,7 +1057,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { } ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1199,7 +1200,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { }) ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1560,7 +1561,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { }) ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1731,7 +1732,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { }) ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1855,7 +1856,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -2223,7 +2224,7 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, diff --git a/consensus/spos/bls/v2/subroundSignature_test.go b/consensus/spos/bls/v2/subroundSignature_test.go index b0f0dc060b0..24289498d83 100644 --- a/consensus/spos/bls/v2/subroundSignature_test.go +++ b/consensus/spos/bls/v2/subroundSignature_test.go @@ -23,6 +23,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -30,7 +31,7 @@ import ( const setThresholdJobsDone = "threshold" func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreMock) v2.SubroundSignature { - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -69,7 +70,7 @@ func TestNewSubroundSignature(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -165,7 +166,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -201,7 +202,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -236,7 +237,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -271,7 +272,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -307,7 +308,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -342,7 +343,7 @@ func TestSubroundSignature_NewSubroundSignatureNilAppStatusHandlerShouldFail(t * t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -377,7 +378,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -520,7 +521,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -627,7 +628,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { }, } container.SetSigningHandler(signingHandler) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -734,7 +735,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { return make([]byte, 0), expErr }, }) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -801,7 +802,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { return fmt.Errorf("error") }, }) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -868,7 +869,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { return nil }, }) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -936,7 +937,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { }, } container.SetSigningHandler(signingHandler) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -1036,7 +1037,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { } container.SetEnableEpochsHandler(enableEpochsHandler) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true diff --git a/consensus/spos/bls/v2/subroundStartRound_test.go b/consensus/spos/bls/v2/subroundStartRound_test.go index d288e67b14b..da1ee8c1b04 100644 --- a/consensus/spos/bls/v2/subroundStartRound_test.go +++ b/consensus/spos/bls/v2/subroundStartRound_test.go @@ -13,6 +13,7 @@ import ( processMock "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/stretchr/testify/assert" @@ -75,7 +76,7 @@ func defaultSubround( } func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) v2.SubroundStartRound { - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) srStartRound, _ := v2.NewSubroundStartRound( @@ -97,7 +98,7 @@ func TestNewSubroundStartRound(t *testing.T) { t.Parallel() ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensus.InitConsensusCore() sr, _ := spos.NewSubround( -1, @@ -161,7 +162,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilBlockChainShouldFail(t *test container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -177,7 +178,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilBootstrapperShouldFail(t *te container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -192,7 +193,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * t.Parallel() container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -209,7 +210,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerContainerShouldFa container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -225,7 +226,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilRoundHandlerShouldFail(t *te container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -241,7 +242,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilSyncTimerShouldFail(t *testi container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -257,7 +258,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilValidatorGroupSelectorShould container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -273,7 +274,7 @@ func TestSubroundStartRound_NewSubroundStartRoundShouldWork(t *testing.T) { container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -289,7 +290,7 @@ func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -445,7 +446,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenIsNotInTheConsen t.Parallel() container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetSelfPubKey(consensusState.SelfPubKey() + "X") ch := make(chan bool, 1) @@ -511,7 +512,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) consensusState.SetSelfPubKey("not in consensus") sr, _ := spos.NewSubround( -1, @@ -563,7 +564,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) consensusState.SetSelfPubKey("B") sr, _ := spos.NewSubround( -1, @@ -612,7 +613,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { return string(pkBytes) == consensusState.SelfPubKey() } @@ -675,7 +676,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() consensusState.SetSelfPubKey(leader) sr, _ := spos.NewSubround( @@ -738,7 +739,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() consensusState.SetSelfPubKey(leader) keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { @@ -775,7 +776,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { func buildDefaultSubround(container spos.ConsensusCoreHandler) *spos.Subround { ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( -1, bls.SrStartRound, diff --git a/consensus/spos/sposFactory/sposFactory.go b/consensus/spos/sposFactory/sposFactory.go index 0dd5e10011b..196ce66133f 100644 --- a/consensus/spos/sposFactory/sposFactory.go +++ b/consensus/spos/sposFactory/sposFactory.go @@ -89,7 +89,7 @@ func GetBroadcastMessenger( LeaderCacheSize: maxDelayCacheSize, ValidatorCacheSize: maxDelayCacheSize, AlarmScheduler: alarmScheduler, - Config: config, + Config: config, } delayedBroadcaster, err := broadcast.NewDelayedBlockBroadcaster(dbbArgs) diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 0b3b30c2091..5fa1355f9e0 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -24,7 +24,6 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" - v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon" @@ -85,7 +84,7 @@ func createDefaultWorkerArgs(appStatusHandler core.AppStatusHandler) *spos.Worke } syncTimerMock := &consensusMocks.SyncTimerMock{} hasher := &hashingMocks.HasherMock{} - blsService, _ := v1.NewConsensusService() + blsService, _ := bls.NewConsensusService() poolAdder := cache.NewCacherMock() scheduledProcessorArgs := spos.ScheduledProcessorWrapperArgs{ From 1c6f59a07651a45973eb6db92c412bfd43dcef21 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Wed, 25 Sep 2024 17:36:40 +0300 Subject: [PATCH 10/30] add handler for consensus subrounds stm --- consensus/spos/bls/subroundsHandler.go | 174 ++++++++++++++++++++++ consensus/spos/sposFactory/sposFactory.go | 38 ----- factory/consensus/consensusComponents.go | 32 ++-- 3 files changed, 191 insertions(+), 53 deletions(-) create mode 100644 consensus/spos/bls/subroundsHandler.go diff --git a/consensus/spos/bls/subroundsHandler.go b/consensus/spos/bls/subroundsHandler.go new file mode 100644 index 00000000000..f90f839684a --- /dev/null +++ b/consensus/spos/bls/subroundsHandler.go @@ -0,0 +1,174 @@ +package bls + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/outport" +) + +// pick up stuff from consensusComponents and intermediate it here + +// SubroundsHandlerArgs struct contains the needed data for the SubroundsHandler +type SubroundsHandlerArgs struct { + Chronology consensus.ChronologyHandler + ConsensusCoreHandler spos.ConsensusCoreHandler + ConsensusState *spos.ConsensusState + Worker factory.ConsensusWorker + SignatureThrottler core.Throttler + AppStatusHandler core.AppStatusHandler + OutportHandler outport.OutportHandler + SentSignatureTracker spos.SentSignaturesTracker + EnableEpochsHandler core.EnableEpochsHandler + ChainID []byte + CurrentPid core.PeerID +} + +// SubroundsFactory defines the methods needed to generate the subrounds +type SubroundsFactory interface { + GenerateSubrounds() error + SetOutportHandler(driver outport.OutportHandler) + IsInterfaceNil() bool +} + +type ConsensusStateMachineType int + +// SubroundsHandler struct contains the needed data for the SubroundsHandler +type SubroundsHandler struct { + chronology consensus.ChronologyHandler + consensusCoreHandler spos.ConsensusCoreHandler + consensusState *spos.ConsensusState + worker factory.ConsensusWorker + signatureThrottler core.Throttler + appStatusHandler core.AppStatusHandler + outportHandler outport.OutportHandler + sentSignatureTracker spos.SentSignaturesTracker + enableEpochsHandler core.EnableEpochsHandler + chainID []byte + currentPid core.PeerID + currentConsensusType ConsensusStateMachineType +} + +const ( + ConsensusNone ConsensusStateMachineType = iota + ConsensusV1 + ConsensusV2 +) + +func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) { + if check.IfNil(args.Chronology) { + return nil, ErrNilChronologyHandler + } + if check.IfNil(args.ConsensusCoreHandler) { + return nil, ErrNilConsensusCoreHandler + } + // TODO: use an interface instead + if args.ConsensusState == nil { + return nil, ErrNilConsensusState + } + if check.IfNil(args.Worker) { + return nil, ErrNilWorker + } + if check.IfNil(args.SignatureThrottler) { + return nil, ErrNilSignatureThrottler + } + if check.IfNil(args.AppStatusHandler) { + return nil, ErrNilAppStatusHandler + } + if check.IfNil(args.OutportHandler) { + return nil, ErrNilOutportHandler + } + if check.IfNil(args.SentSignatureTracker) { + return nil, ErrNilSentSignatureTracker + } + if check.IfNil(args.EnableEpochsHandler) { + return nil, ErrNilEnableEpochsHandler + } + if args.ChainID == nil { + return nil, ErrNilChainID + } + if len(args.CurrentPid) == 0 { + return nil, ErrNilCurrentPid + } + + return &SubroundsHandler{ + chronology: args.Chronology, + consensusCoreHandler: args.ConsensusCoreHandler, + consensusState: args.ConsensusState, + worker: args.Worker, + signatureThrottler: args.SignatureThrottler, + appStatusHandler: args.AppStatusHandler, + outportHandler: args.OutportHandler, + sentSignatureTracker: args.SentSignatureTracker, + enableEpochsHandler: args.EnableEpochsHandler, + chainID: args.ChainID, + currentPid: args.CurrentPid, + currentConsensusType: ConsensusNone, + }, nil +} + +// Start starts the sub-rounds handler +func (s *SubroundsHandler) Start(epoch uint32) error { + return s.initSubroundsForEpoch(epoch) +} + +func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { + var err error + var fct SubroundsFactory + if s.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, epoch) { + if s.currentConsensusType == ConsensusV2 { + return nil + } + + s.currentConsensusType = ConsensusV2 + fct, err = v2.NewSubroundsFactory( + s.consensusCoreHandler, + s.consensusState, + s.worker, + s.chainID, + s.currentPid, + s.appStatusHandler, + s.sentSignatureTracker, + s.signatureThrottler, + ) + } else { + if s.currentConsensusType == ConsensusV1 { + return nil + } + + s.currentConsensusType = ConsensusV1 + fct, err = v1.NewSubroundsFactory( + s.consensusCoreHandler, + s.consensusState, + s.worker, + s.chainID, + s.currentPid, + s.appStatusHandler, + s.sentSignatureTracker, + ) + } + if err != nil { + return err + } + + fct.SetOutportHandler(s.outportHandler) + err = fct.GenerateSubrounds() + if err != nil { + return err + } + + s.chronology.StartRounds() + return nil +} + +// HandleEpochChange handles the epoch change event +// TODO: register to the epoch change event +func (s *SubroundsHandler) HandleEpochChange(epoch uint32) error { + return s.initSubroundsForEpoch(epoch) +} diff --git a/consensus/spos/sposFactory/sposFactory.go b/consensus/spos/sposFactory/sposFactory.go index 196ce66133f..bb2d409a97f 100644 --- a/consensus/spos/sposFactory/sposFactory.go +++ b/consensus/spos/sposFactory/sposFactory.go @@ -12,48 +12,10 @@ import ( "github.com/multiversx/mx-chain-go/consensus/broadcast" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" - "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" ) -// GetSubroundsFactory returns a subrounds factory depending on the given parameter -func GetSubroundsFactory( - consensusDataContainer spos.ConsensusCoreHandler, - consensusState *spos.ConsensusState, - worker spos.WorkerHandler, - consensusType string, - appStatusHandler core.AppStatusHandler, - outportHandler outport.OutportHandler, - sentSignatureTracker spos.SentSignaturesTracker, - chainID []byte, - currentPid core.PeerID, - signatureThrottler core.Throttler, -) (spos.SubroundsFactory, error) { - switch consensusType { - case blsConsensusType: - subRoundFactoryBls, err := bls.NewSubroundsFactory( - consensusDataContainer, - consensusState, - worker, - chainID, - currentPid, - appStatusHandler, - sentSignatureTracker, - signatureThrottler, - ) - if err != nil { - return nil, err - } - - subRoundFactoryBls.SetOutportHandler(outportHandler) - - return subRoundFactoryBls, nil - default: - return nil, ErrInvalidConsensusType - } -} - // GetConsensusCoreFactory returns a consensus service depending on the given parameter func GetConsensusCoreFactory(consensusType string) (spos.ConsensusService, error) { switch consensusType { diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index eb7887d20da..16052a0b531 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/blacklist" "github.com/multiversx/mx-chain-go/consensus/chronology" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/consensus/spos/debug" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -279,29 +280,30 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - fct, err := sposFactory.GetSubroundsFactory( - consensusDataContainer, - consensusState, - cc.worker, - ccf.config.Consensus.Type, - ccf.statusCoreComponents.AppStatusHandler(), - ccf.statusComponents.OutportHandler(), - ccf.processComponents.SentSignaturesTracker(), - []byte(ccf.coreComponents.ChainID()), - ccf.networkComponents.NetworkMessenger().ID(), - signatureThrottler, - ) + subroundsHandlerArgs := &bls.SubroundsHandlerArgs{ + Chronology: cc.chronology, + ConsensusCoreHandler: consensusDataContainer, + ConsensusState: consensusState, + Worker: cc.worker, + SignatureThrottler: signatureThrottler, + AppStatusHandler: ccf.statusCoreComponents.AppStatusHandler(), + OutportHandler: ccf.statusComponents.OutportHandler(), + SentSignatureTracker: ccf.processComponents.SentSignaturesTracker(), + EnableEpochsHandler: nil, + ChainID: []byte(ccf.coreComponents.ChainID()), + CurrentPid: ccf.networkComponents.NetworkMessenger().ID(), + } + + subroundsHandler, err := bls.NewSubroundsHandler(subroundsHandlerArgs) if err != nil { return nil, err } - err = fct.GenerateSubrounds() + err = subroundsHandler.Start(epoch) if err != nil { return nil, err } - cc.chronology.StartRounds() - err = ccf.addCloserInstances(cc.chronology, cc.bootstrapper, cc.worker, ccf.coreComponents.SyncTimer()) if err != nil { return nil, err From 29b083de21aa710e306feabf1a33f78a0603e00a Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Wed, 25 Sep 2024 17:40:07 +0300 Subject: [PATCH 11/30] fix cyclic import --- .../spos/bls/{ => proxy}/subroundsHandler.go | 25 ++++++++++--------- factory/consensus/consensusComponents.go | 6 ++--- 2 files changed, 16 insertions(+), 15 deletions(-) rename consensus/spos/bls/{ => proxy}/subroundsHandler.go (90%) diff --git a/consensus/spos/bls/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go similarity index 90% rename from consensus/spos/bls/subroundsHandler.go rename to consensus/spos/bls/proxy/subroundsHandler.go index f90f839684a..72c82080edc 100644 --- a/consensus/spos/bls/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -1,4 +1,4 @@ -package bls +package proxy import ( "github.com/multiversx/mx-chain-core-go/core" @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" "github.com/multiversx/mx-chain-go/factory" @@ -63,38 +64,38 @@ const ( func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) { if check.IfNil(args.Chronology) { - return nil, ErrNilChronologyHandler + return nil, bls.ErrNilChronologyHandler } if check.IfNil(args.ConsensusCoreHandler) { - return nil, ErrNilConsensusCoreHandler + return nil, bls.ErrNilConsensusCoreHandler } // TODO: use an interface instead if args.ConsensusState == nil { - return nil, ErrNilConsensusState + return nil, bls.ErrNilConsensusState } if check.IfNil(args.Worker) { - return nil, ErrNilWorker + return nil, bls.ErrNilWorker } if check.IfNil(args.SignatureThrottler) { - return nil, ErrNilSignatureThrottler + return nil, bls.ErrNilSignatureThrottler } if check.IfNil(args.AppStatusHandler) { - return nil, ErrNilAppStatusHandler + return nil, bls.ErrNilAppStatusHandler } if check.IfNil(args.OutportHandler) { - return nil, ErrNilOutportHandler + return nil, bls.ErrNilOutportHandler } if check.IfNil(args.SentSignatureTracker) { - return nil, ErrNilSentSignatureTracker + return nil, bls.ErrNilSentSignatureTracker } if check.IfNil(args.EnableEpochsHandler) { - return nil, ErrNilEnableEpochsHandler + return nil, bls.ErrNilEnableEpochsHandler } if args.ChainID == nil { - return nil, ErrNilChainID + return nil, bls.ErrNilChainID } if len(args.CurrentPid) == 0 { - return nil, ErrNilCurrentPid + return nil, bls.ErrNilCurrentPid } return &SubroundsHandler{ diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 16052a0b531..beaa5ebc5d5 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -19,7 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/blacklist" "github.com/multiversx/mx-chain-go/consensus/chronology" "github.com/multiversx/mx-chain-go/consensus/spos" - "github.com/multiversx/mx-chain-go/consensus/spos/bls" + "github.com/multiversx/mx-chain-go/consensus/spos/bls/proxy" "github.com/multiversx/mx-chain-go/consensus/spos/debug" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -280,7 +280,7 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - subroundsHandlerArgs := &bls.SubroundsHandlerArgs{ + subroundsHandlerArgs := &proxy.SubroundsHandlerArgs{ Chronology: cc.chronology, ConsensusCoreHandler: consensusDataContainer, ConsensusState: consensusState, @@ -294,7 +294,7 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { CurrentPid: ccf.networkComponents.NetworkMessenger().ID(), } - subroundsHandler, err := bls.NewSubroundsHandler(subroundsHandlerArgs) + subroundsHandler, err := proxy.NewSubroundsHandler(subroundsHandlerArgs) if err != nil { return nil, err } From 299bc50a6f7677e5838443b6572090f14cbe9dc2 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Wed, 25 Sep 2024 17:55:46 +0300 Subject: [PATCH 12/30] fix unit tests --- consensus/spos/bls/errors.go | 38 +++++ .../spos/sposFactory/sposFactory_test.go | 100 ------------ factory/consensus/consensusComponents.go | 2 +- .../consensus/initializers/initializers.go | 150 ++++++++++++++++++ 4 files changed, 189 insertions(+), 101 deletions(-) create mode 100644 consensus/spos/bls/errors.go create mode 100644 testscommon/consensus/initializers/initializers.go diff --git a/consensus/spos/bls/errors.go b/consensus/spos/bls/errors.go new file mode 100644 index 00000000000..9f889ed50f0 --- /dev/null +++ b/consensus/spos/bls/errors.go @@ -0,0 +1,38 @@ +package bls + +import ( + "errors" +) + +// ErrNilChronologyHandler is the error returned when the chronology handler is nil +var ErrNilChronologyHandler = errors.New("nil chronology handler") + +// ErrNilConsensusCoreHandler is the error returned when the consensus core handler is nil +var ErrNilConsensusCoreHandler = errors.New("nil consensus core handler") + +// ErrNilConsensusState is the error returned when the consensus state is nil +var ErrNilConsensusState = errors.New("nil consensus state") + +// ErrNilWorker is the error returned when the worker is nil +var ErrNilWorker = errors.New("nil worker") + +// ErrNilSignatureThrottler is the error returned when the signature throttler is nil +var ErrNilSignatureThrottler = errors.New("nil signature throttler") + +// ErrNilAppStatusHandler is the error returned when the app status handler is nil +var ErrNilAppStatusHandler = errors.New("nil app status handler") + +// ErrNilOutportHandler is the error returned when the outport handler is nil +var ErrNilOutportHandler = errors.New("nil outport handler") + +// ErrNilSentSignatureTracker is the error returned when the sent signature tracker is nil +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") + +// ErrNilChainID is the error returned when the chain ID is nil +var ErrNilChainID = errors.New("nil chain ID") + +// ErrNilCurrentPid is the error returned when the current PID is nil +var ErrNilCurrentPid = errors.New("nil current PID") + +// ErrNilEnableEpochsHandler is the error returned when the enable epochs handler is nil +var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") diff --git a/consensus/spos/sposFactory/sposFactory_test.go b/consensus/spos/sposFactory/sposFactory_test.go index 1e17d29f03f..1c05ff64c6f 100644 --- a/consensus/spos/sposFactory/sposFactory_test.go +++ b/consensus/spos/sposFactory/sposFactory_test.go @@ -12,14 +12,10 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" - dataRetrieverMocks "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/testscommon" - testscommonConsensus "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/pool" - statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) var currentPid = core.PeerID("pid") @@ -42,102 +38,6 @@ func TestGetConsensusCoreFactory_BlsShouldWork(t *testing.T) { assert.False(t, check.IfNil(csf)) } -func TestGetSubroundsFactory_BlsNilConsensusCoreShouldErr(t *testing.T) { - t.Parallel() - - worker := &mock.SposWorkerMock{} - consensusType := consensus.BlsConsensusType - statusHandler := statusHandlerMock.NewAppStatusHandlerMock() - chainID := []byte("chain-id") - indexer := &outport.OutportStub{} - sf, err := sposFactory.GetSubroundsFactory( - nil, - &spos.ConsensusState{}, - worker, - consensusType, - statusHandler, - indexer, - &testscommon.SentSignatureTrackerStub{}, - chainID, - currentPid, - &dataRetrieverMocks.ThrottlerStub{}, - ) - - assert.Nil(t, sf) - assert.Equal(t, spos.ErrNilConsensusCore, err) -} - -func TestGetSubroundsFactory_BlsNilStatusHandlerShouldErr(t *testing.T) { - t.Parallel() - - consensusCore := testscommonConsensus.InitConsensusCore() - worker := &mock.SposWorkerMock{} - consensusType := consensus.BlsConsensusType - chainID := []byte("chain-id") - indexer := &outport.OutportStub{} - sf, err := sposFactory.GetSubroundsFactory( - consensusCore, - &spos.ConsensusState{}, - worker, - consensusType, - nil, - indexer, - &testscommon.SentSignatureTrackerStub{}, - chainID, - currentPid, - &dataRetrieverMocks.ThrottlerStub{}, - ) - - assert.Nil(t, sf) - assert.Equal(t, spos.ErrNilAppStatusHandler, err) -} - -func TestGetSubroundsFactory_BlsShouldWork(t *testing.T) { - t.Parallel() - - consensusCore := testscommonConsensus.InitConsensusCore() - worker := &mock.SposWorkerMock{} - consensusType := consensus.BlsConsensusType - statusHandler := statusHandlerMock.NewAppStatusHandlerMock() - chainID := []byte("chain-id") - indexer := &outport.OutportStub{} - sf, err := sposFactory.GetSubroundsFactory( - consensusCore, - &spos.ConsensusState{}, - worker, - consensusType, - statusHandler, - indexer, - &testscommon.SentSignatureTrackerStub{}, - chainID, - currentPid, - &dataRetrieverMocks.ThrottlerStub{}, - ) - assert.Nil(t, err) - assert.False(t, check.IfNil(sf)) -} - -func TestGetSubroundsFactory_InvalidConsensusTypeShouldErr(t *testing.T) { - t.Parallel() - - consensusType := "invalid" - sf, err := sposFactory.GetSubroundsFactory( - nil, - nil, - nil, - consensusType, - nil, - nil, - nil, - nil, - currentPid, - &dataRetrieverMocks.ThrottlerStub{}, - ) - - assert.Nil(t, sf) - assert.Equal(t, sposFactory.ErrInvalidConsensusType, err) -} - func TestGetBroadcastMessenger_ShardShouldWork(t *testing.T) { t.Parallel() diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index beaa5ebc5d5..32092341f10 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -289,7 +289,7 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { AppStatusHandler: ccf.statusCoreComponents.AppStatusHandler(), OutportHandler: ccf.statusComponents.OutportHandler(), SentSignatureTracker: ccf.processComponents.SentSignaturesTracker(), - EnableEpochsHandler: nil, + EnableEpochsHandler: ccf.coreComponents.EnableEpochsHandler(), ChainID: []byte(ccf.coreComponents.ChainID()), CurrentPid: ccf.networkComponents.NetworkMessenger().ID(), } diff --git a/testscommon/consensus/initializers/initializers.go b/testscommon/consensus/initializers/initializers.go new file mode 100644 index 00000000000..30bb88b44cb --- /dev/null +++ b/testscommon/consensus/initializers/initializers.go @@ -0,0 +1,150 @@ +package initializers + +import ( + crypto "github.com/multiversx/mx-chain-crypto-go" + "golang.org/x/exp/slices" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" +) + +func createEligibleList(size int) []string { + eligibleList := make([]string, 0) + for i := 0; i < size; i++ { + eligibleList = append(eligibleList, string([]byte{byte(i + 65)})) + } + return eligibleList +} + +func CreateEligibleListFromMap(mapKeys map[string]crypto.PrivateKey) []string { + eligibleList := make([]string, 0, len(mapKeys)) + for key := range mapKeys { + eligibleList = append(eligibleList, key) + } + slices.Sort(eligibleList) + return eligibleList +} + +func InitConsensusStateWithNodesCoordinator(validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { + return initConsensusStateWithKeysHandlerAndNodesCoordinator(&testscommon.KeysHandlerStub{}, validatorsGroupSelector) +} + +func InitConsensusState() *spos.ConsensusState { + return InitConsensusStateWithKeysHandler(&testscommon.KeysHandlerStub{}) +} + +func InitConsensusStateWithArgs(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { + return initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler, mapKeys) +} + +func InitConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos.ConsensusState { + consensusGroupSize := 9 + return initConsensusStateWithKeysHandlerWithGroupSize(keysHandler, consensusGroupSize) +} + +func initConsensusStateWithKeysHandlerAndNodesCoordinator(keysHandler consensus.KeysHandler, validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { + leader, consensusValidators, _ := validatorsGroupSelector.GetConsensusValidatorsPublicKeys([]byte("randomness"), 0, 0, 0) + eligibleNodesPubKeys := make(map[string]struct{}) + for _, key := range consensusValidators { + eligibleNodesPubKeys[key] = struct{}{} + } + return createConsensusStateWithNodes(eligibleNodesPubKeys, consensusValidators, leader, keysHandler) +} + +func InitConsensusStateWithArgsVerifySignature(keysHandler consensus.KeysHandler, keys []string) *spos.ConsensusState { + numberOfKeys := len(keys) + eligibleNodesPubKeys := make(map[string]struct{}, numberOfKeys) + for _, key := range keys { + eligibleNodesPubKeys[key] = struct{}{} + } + + indexLeader := 1 + rcns, _ := spos.NewRoundConsensus( + eligibleNodesPubKeys, + numberOfKeys, + keys[indexLeader], + keysHandler, + ) + rcns.SetConsensusGroup(keys) + rcns.ResetRoundState() + + pBFTThreshold := numberOfKeys*2/3 + 1 + pBFTFallbackThreshold := numberOfKeys*1/2 + 1 + rthr := spos.NewRoundThreshold() + rthr.SetThreshold(1, 1) + rthr.SetThreshold(2, pBFTThreshold) + rthr.SetFallbackThreshold(1, 1) + rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) + + rstatus := spos.NewRoundStatus() + rstatus.ResetRoundStatus() + cns := spos.NewConsensusState( + rcns, + rthr, + rstatus, + ) + cns.Data = []byte("X") + cns.RoundIndex = 0 + + return cns +} + +func initConsensusStateWithKeysHandlerWithGroupSize(keysHandler consensus.KeysHandler, consensusGroupSize int) *spos.ConsensusState { + eligibleList := createEligibleList(consensusGroupSize) + + eligibleNodesPubKeys := make(map[string]struct{}) + for _, key := range eligibleList { + eligibleNodesPubKeys[key] = struct{}{} + } + + return createConsensusStateWithNodes(eligibleNodesPubKeys, eligibleList, eligibleList[0], keysHandler) +} + +func initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { + eligibleList := CreateEligibleListFromMap(mapKeys) + + eligibleNodesPubKeys := make(map[string]struct{}, len(eligibleList)) + for _, key := range eligibleList { + eligibleNodesPubKeys[key] = struct{}{} + } + + return createConsensusStateWithNodes(eligibleNodesPubKeys, eligibleList, eligibleList[0], keysHandler) +} + +func createConsensusStateWithNodes(eligibleNodesPubKeys map[string]struct{}, consensusValidators []string, leader string, keysHandler consensus.KeysHandler) *spos.ConsensusState { + consensusGroupSize := len(consensusValidators) + rcns, _ := spos.NewRoundConsensus( + eligibleNodesPubKeys, + consensusGroupSize, + consensusValidators[1], + keysHandler, + ) + + rcns.SetConsensusGroup(consensusValidators) + rcns.SetLeader(leader) + rcns.ResetRoundState() + + pBFTThreshold := consensusGroupSize*2/3 + 1 + pBFTFallbackThreshold := consensusGroupSize*1/2 + 1 + + rthr := spos.NewRoundThreshold() + rthr.SetThreshold(1, 1) + rthr.SetThreshold(2, pBFTThreshold) + rthr.SetFallbackThreshold(1, 1) + rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) + + rstatus := spos.NewRoundStatus() + rstatus.ResetRoundStatus() + + cns := spos.NewConsensusState( + rcns, + rthr, + rstatus, + ) + + cns.Data = []byte("X") + cns.RoundIndex = 0 + return cns +} From 357d2c29f8a8642501efa0401b871fe7b878cabf Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Thu, 26 Sep 2024 16:55:42 +0300 Subject: [PATCH 13/30] fix missing proofs pool in unit tests --- keysManagement/managedPeersHolder.go | 3 ++- testscommon/dataRetriever/poolFactory.go | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/keysManagement/managedPeersHolder.go b/keysManagement/managedPeersHolder.go index 8156b64c8eb..39f80f6bbaf 100644 --- a/keysManagement/managedPeersHolder.go +++ b/keysManagement/managedPeersHolder.go @@ -12,10 +12,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" crypto "github.com/multiversx/mx-chain-crypto-go" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/redundancy/common" - logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("keysManagement") diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index df416a9f56a..b631e6d4ba2 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -6,6 +6,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" @@ -242,6 +243,7 @@ func CreatePoolsHolderWithTxPool(txPool dataRetriever.ShardedDataCacherNotifier) PeerAuthentications: peerAuthPool, Heartbeats: heartbeatPool, ValidatorsInfo: validatorsInfo, + Proofs: &ProofsPoolMock{}, } holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolderWithTxPool", err) From 19fcca25a28004e9e6cc6a3660ed5e54f180da05 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Thu, 26 Sep 2024 17:57:44 +0300 Subject: [PATCH 14/30] fix linter --- consensus/spos/bls/v1/subroundSignature.go | 4 ++++ factory/consensus/consensusComponents_test.go | 22 ------------------- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/consensus/spos/bls/v1/subroundSignature.go b/consensus/spos/bls/v1/subroundSignature.go index 86fc65b50dc..2cf77192925 100644 --- a/consensus/spos/bls/v1/subroundSignature.go +++ b/consensus/spos/bls/v1/subroundSignature.go @@ -390,6 +390,10 @@ func (sr *subroundSignature) doSignatureJobForManagedKeys() bool { } sr.sentSignatureTracker.SignatureSent(pkBytes) leader, err := sr.GetLeader() + if err != nil { + log.Debug("doSignatureJobForManagedKeys.GetLeader", "error", err.Error()) + return false + } isLeader := pk == leader ok := sr.completeSignatureSubRound(pk, isLeader) diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index de4aeff58ed..c5e2e0450b9 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -840,28 +840,6 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { require.True(t, strings.Contains(err.Error(), "signing handler")) require.Nil(t, cc) }) - t.Run("GetSubroundsFactory failure should error", func(t *testing.T) { - t.Parallel() - - args := createMockConsensusComponentsFactoryArgs() - statusCoreCompStub, ok := args.StatusCoreComponents.(*factoryMocks.StatusCoreComponentsStub) - require.True(t, ok) - cnt := 0 - statusCoreCompStub.AppStatusHandlerCalled = func() core.AppStatusHandler { - cnt++ - if cnt > 4 { - return nil - } - return &statusHandler.AppStatusHandlerStub{} - } - ccf, _ := consensusComp.NewConsensusComponentsFactory(args) - require.NotNil(t, ccf) - - cc, err := ccf.Create() - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), "AppStatusHandler")) - require.Nil(t, cc) - }) t.Run("addCloserInstances failure should error", func(t *testing.T) { t.Parallel() From 381e3765fd61f4870a7d93283a47fbe141e7645d Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 12:52:41 +0300 Subject: [PATCH 15/30] register subrounds handler on epoch trigger --- common/constants.go | 6 ++- consensus/spos/bls/proxy/subroundsHandler.go | 39 +++++++++++++++++++- consensus/spos/bls/v1/subroundStartRound.go | 2 +- consensus/spos/bls/v2/subroundStartRound.go | 2 +- 4 files changed, 43 insertions(+), 6 deletions(-) diff --git a/common/constants.go b/common/constants.go index 98473acfd9f..4f9ac681316 100644 --- a/common/constants.go +++ b/common/constants.go @@ -843,8 +843,10 @@ const ( ChainParametersOrder // NodesCoordinatorOrder defines the order in which NodesCoordinator is notified of a start of epoch event NodesCoordinatorOrder - // ConsensusOrder defines the order in which Consensus is notified of a start of epoch event - ConsensusOrder + // ConsensusHandlerOrder defines the order in which ConsensusHandler is notified of a start of epoch event + ConsensusHandlerOrder + // ConsensusStartRoundOrder defines the order in which Consensus StartRound subround is notified of a start of epoch event + ConsensusStartRoundOrder // NetworkShardingOrder defines the order in which the network sharding subsystem is notified of a start of epoch event NetworkShardingOrder // IndexerOrder defines the order in which indexer is notified of a start of epoch event diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go index 72c82080edc..6d880ca9ef9 100644 --- a/consensus/spos/bls/proxy/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -3,6 +3,8 @@ package proxy import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" @@ -14,6 +16,8 @@ import ( "github.com/multiversx/mx-chain-go/outport" ) +var log = logger.GetOrCreate("consensus/spos/bls/proxy") + // pick up stuff from consensusComponents and intermediate it here // SubroundsHandlerArgs struct contains the needed data for the SubroundsHandler @@ -98,7 +102,7 @@ func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) return nil, bls.ErrNilCurrentPid } - return &SubroundsHandler{ + subroundHandler := &SubroundsHandler{ chronology: args.Chronology, consensusCoreHandler: args.ConsensusCoreHandler, consensusState: args.ConsensusState, @@ -111,7 +115,11 @@ func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) chainID: args.ChainID, currentPid: args.CurrentPid, currentConsensusType: ConsensusNone, - }, nil + } + + subroundHandler.consensusCoreHandler.EpochStartRegistrationHandler().RegisterHandler(subroundHandler) + + return subroundHandler, nil } // Start starts the sub-rounds handler @@ -158,6 +166,11 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { return err } + err = s.chronology.Close() + if err != nil { + log.Warn("SubroundsHandler.initSubroundsForEpoch: cannot close the chronology", "error", err) + } + fct.SetOutportHandler(s.outportHandler) err = fct.GenerateSubrounds() if err != nil { @@ -173,3 +186,25 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { func (s *SubroundsHandler) HandleEpochChange(epoch uint32) error { return s.initSubroundsForEpoch(epoch) } + +// EpochStartAction is called when the epoch starts +func (s *SubroundsHandler) EpochStartAction(hdr data.HeaderHandler) { + err := s.initSubroundsForEpoch(hdr.GetEpoch()) + if err != nil { + log.Error("SubroundsHandler.EpochStartAction: cannot initialize subrounds", "error", err) + } +} + +// EpochStartPrepare prepares the subrounds handler for the epoch start +func (s *SubroundsHandler) EpochStartPrepare(_ data.HeaderHandler, _ data.BodyHandler) { +} + +// NotifyOrder returns the order of the subrounds handler +func (s *SubroundsHandler) NotifyOrder() uint32 { + return common.ConsensusHandlerOrder +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *SubroundsHandler) IsInterfaceNil() bool { + return s == nil +} diff --git a/consensus/spos/bls/v1/subroundStartRound.go b/consensus/spos/bls/v1/subroundStartRound.go index f654fa2036d..81f370d565e 100644 --- a/consensus/spos/bls/v1/subroundStartRound.go +++ b/consensus/spos/bls/v1/subroundStartRound.go @@ -374,5 +374,5 @@ func (sr *subroundStartRound) changeEpoch(currentEpoch uint32) { // NotifyOrder returns the notification order for a start of epoch event func (sr *subroundStartRound) NotifyOrder() uint32 { - return common.ConsensusOrder + return common.ConsensusStartRoundOrder } diff --git a/consensus/spos/bls/v2/subroundStartRound.go b/consensus/spos/bls/v2/subroundStartRound.go index 3e2980146cc..7e7c7b71808 100644 --- a/consensus/spos/bls/v2/subroundStartRound.go +++ b/consensus/spos/bls/v2/subroundStartRound.go @@ -355,5 +355,5 @@ func (sr *subroundStartRound) changeEpoch(currentEpoch uint32) { // NotifyOrder returns the notification order for a start of epoch event func (sr *subroundStartRound) NotifyOrder() uint32 { - return common.ConsensusOrder + return common.ConsensusStartRoundOrder } From 308a952099914d51a90f709819d3c66f11bba702 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 12:54:28 +0300 Subject: [PATCH 16/30] remove unused method --- consensus/spos/bls/proxy/subroundsHandler.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go index 6d880ca9ef9..981955b855c 100644 --- a/consensus/spos/bls/proxy/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -181,12 +181,6 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { return nil } -// HandleEpochChange handles the epoch change event -// TODO: register to the epoch change event -func (s *SubroundsHandler) HandleEpochChange(epoch uint32) error { - return s.initSubroundsForEpoch(epoch) -} - // EpochStartAction is called when the epoch starts func (s *SubroundsHandler) EpochStartAction(hdr data.HeaderHandler) { err := s.initSubroundsForEpoch(hdr.GetEpoch()) From 27dbc82c7b62a54bd352401157a02310d6777f4c Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 13:42:52 +0300 Subject: [PATCH 17/30] renaming import aliases --- consensus/chronology/chronology_test.go | 20 ++++++++++---------- consensus/round/round_test.go | 18 +++++++++--------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/consensus/chronology/chronology_test.go b/consensus/chronology/chronology_test.go index 1de6289d1ca..c14a5be13e5 100644 --- a/consensus/chronology/chronology_test.go +++ b/consensus/chronology/chronology_test.go @@ -11,7 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/chronology" "github.com/multiversx/mx-chain-go/consensus/mock" - consensus2 "github.com/multiversx/mx-chain-go/testscommon/consensus" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -118,7 +118,7 @@ func TestChronology_StartRoundShouldReturnWhenRoundIndexIsNegative(t *testing.T) t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &consensus2.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.IndexCalled = func() int64 { return -1 } @@ -152,7 +152,7 @@ func TestChronology_StartRoundShouldReturnWhenDoWorkReturnsFalse(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &consensus2.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock chr, _ := chronology.NewChronology(arg) @@ -169,7 +169,7 @@ func TestChronology_StartRoundShouldWork(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &consensus2.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock chr, _ := chronology.NewChronology(arg) @@ -222,7 +222,7 @@ func TestChronology_InitRoundShouldNotSetSubroundWhenRoundIndexIsNegative(t *tes t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &consensus2.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -243,7 +243,7 @@ func TestChronology_InitRoundShouldSetSubroundWhenRoundIndexIsPositive(t *testin t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &consensus2.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() @@ -260,7 +260,7 @@ func TestChronology_StartRoundShouldNotUpdateRoundWhenCurrentRoundIsNotFinished( t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &consensus2.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -274,7 +274,7 @@ func TestChronology_StartRoundShouldNotUpdateRoundWhenCurrentRoundIsNotFinished( func TestChronology_StartRoundShouldUpdateRoundWhenCurrentRoundIsFinished(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &consensus2.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -318,8 +318,8 @@ func TestChronology_CheckIfStatusHandlerWorks(t *testing.T) { func getDefaultChronologyArg() chronology.ArgChronology { return chronology.ArgChronology{ GenesisTime: time.Now(), - RoundHandler: &consensus2.RoundHandlerMock{}, - SyncTimer: &consensus2.SyncTimerMock{}, + RoundHandler: &consensusMocks.RoundHandlerMock{}, + SyncTimer: &consensusMocks.SyncTimerMock{}, AppStatusHandler: statusHandlerMock.NewAppStatusHandlerMock(), Watchdog: &mock.WatchdogMock{}, } diff --git a/consensus/round/round_test.go b/consensus/round/round_test.go index b306ebe8f22..ec1f08ec82d 100644 --- a/consensus/round/round_test.go +++ b/consensus/round/round_test.go @@ -7,7 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/consensus/round" - "github.com/multiversx/mx-chain-go/testscommon/consensus" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/stretchr/testify/assert" ) @@ -30,7 +30,7 @@ func TestRound_NewRoundShouldWork(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &consensus.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, err := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) @@ -43,7 +43,7 @@ func TestRound_UpdateRoundShouldNotChangeAnything(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &consensus.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) oldIndex := rnd.Index() @@ -63,7 +63,7 @@ func TestRound_UpdateRoundShouldAdvanceOneRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &consensus.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) oldIndex := rnd.Index() @@ -78,7 +78,7 @@ func TestRound_IndexShouldReturnFirstIndex(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &consensus.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) rnd.UpdateRound(genesisTime, genesisTime.Add(roundTimeDuration/2)) @@ -92,7 +92,7 @@ func TestRound_TimeStampShouldReturnTimeStampOfTheNextRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &consensus.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) rnd.UpdateRound(genesisTime, genesisTime.Add(roundTimeDuration+roundTimeDuration/2)) @@ -106,7 +106,7 @@ func TestRound_TimeDurationShouldReturnTheDurationOfOneRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &consensus.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) timeDuration := rnd.TimeDuration() @@ -119,7 +119,7 @@ func TestRound_RemainingTimeInCurrentRoundShouldReturnPositiveValue(t *testing.T genesisTime := time.Unix(0, 0) - syncTimerMock := &consensus.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} timeElapsed := int64(roundTimeDuration - 1) @@ -140,7 +140,7 @@ func TestRound_RemainingTimeInCurrentRoundShouldReturnNegativeValue(t *testing.T genesisTime := time.Unix(0, 0) - syncTimerMock := &consensus.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} timeElapsed := int64(roundTimeDuration + 1) From 9f1432835a39779e9da43e8a4a21f9641d8d5927 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 15:30:03 +0300 Subject: [PATCH 18/30] extract interfaces for consensus state --- consensus/interface.go | 1 + consensus/spos/interface.go | 94 +++++++++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) diff --git a/consensus/interface.go b/consensus/interface.go index cd05efeadaa..95df29736ed 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/p2p" ) diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index a063b4b7139..9d2fb77a380 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -170,3 +170,97 @@ type EquivalentMessagesDebugger interface { DeleteEquivalentMessage(headerHash []byte) IsInterfaceNil() bool } + +// ConsensusStateHandler encapsulates all needed data for the Consensus +type ConsensusStateHandler interface { + ResetConsensusState() + AddReceivedHeader(headerHandler data.HeaderHandler) + GetReceivedHeaders() []data.HeaderHandler + AddMessageWithSignature(key string, message p2p.MessageP2P) + GetMessageWithSignature(key string) (p2p.MessageP2P, bool) + IsNodeLeaderInCurrentRound(node string) bool + GetLeader() (string, error) + GetNextConsensusGroup( + randomSource []byte, + round uint64, + shardId uint32, + nodesCoordinator nodesCoordinator.NodesCoordinator, + epoch uint32, + ) (string, []string, error) + IsConsensusDataSet() bool + IsConsensusDataEqual(data []byte) bool + IsJobDone(node string, currentSubroundId int) bool + IsSubroundFinished(subroundID int) bool + IsNodeSelf(node string) bool + IsBlockBodyAlreadyReceived() bool + IsHeaderAlreadyReceived() bool + CanDoSubroundJob(currentSubroundId int) bool + CanProcessReceivedMessage(cnsDta *consensus.Message, currentRoundIndex int64, currentSubroundId int) bool + GenerateBitmap(subroundId int) []byte + ProcessingBlock() bool + SetProcessingBlock(processingBlock bool) + GetData() []byte + SetData(data []byte) + IsMultiKeyLeaderInCurrentRound() bool + IsLeaderJobDone(currentSubroundId int) bool + IsMultiKeyJobDone(currentSubroundId int) bool + IsSelfJobDone(currentSubroundID int) bool + GetMultikeyRedundancyStepInReason() string + ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) + GetRoundCanceled() bool + SetRoundCanceled(state bool) + GetRoundIndex() int64 + GetRoundTimeStamp() time.Time + GetExtendedCalled() bool + GetBody() data.BodyHandler + SetBody(body data.BodyHandler) + GetHeader() data.HeaderHandler + SetHeader(header data.HeaderHandler) + GetWaitingAllSignaturesTimeOut() bool + SetWaitingAllSignaturesTimeOut(bool) + RoundConsensusHandler + RoundStatusHandler + RoundThresholdHandler + IsInterfaceNil() bool +} + +// RoundConsensusHandler encapsulates the methods needed for a consensus round +type RoundConsensusHandler interface { + ConsensusGroupIndex(pubKey string) (int, error) + SelfConsensusGroupIndex() (int, error) + SetEligibleList(eligibleList map[string]struct{}) + ConsensusGroup() []string + SetConsensusGroup(consensusGroup []string) + SetLeader(leader string) + ConsensusGroupSize() int + SetConsensusGroupSize(consensusGroupSize int) + SelfPubKey() string + SetSelfPubKey(selfPubKey string) + JobDone(key string, subroundId int) (bool, error) + SetJobDone(key string, subroundId int, value bool) error + SelfJobDone(subroundId int) (bool, error) + IsNodeInConsensusGroup(node string) bool + IsNodeInEligibleList(node string) bool + ComputeSize(subroundId int) int + ResetRoundState() + IsMultiKeyInConsensusGroup() bool + IsKeyManagedBySelf(pkBytes []byte) bool + IncrementRoundsWithoutReceivedMessages(pkBytes []byte) + GetKeysHandler() consensus.KeysHandler + Leader() string +} + +// RoundStatusHandler encapsulates the methods needed for the status of a subround +type RoundStatusHandler interface { + Status(subroundId int) SubroundStatus + SetStatus(subroundId int, subroundStatus SubroundStatus) + ResetRoundStatus() +} + +// RoundThresholdHandler encapsulates the methods needed for the round consensus threshold +type RoundThresholdHandler interface { + Threshold(subroundId int) int + SetThreshold(subroundId int, threshold int) + FallbackThreshold(subroundId int) int + SetFallbackThreshold(subroundId int, threshold int) +} From 488dd06ebf8e0eb85e2d2ad2bdd3c023a1bebfac Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 15:40:10 +0300 Subject: [PATCH 19/30] adapt to use interface instead of struct pointer --- consensus/spos/bls/proxy/subroundsHandler.go | 9 +- consensus/spos/bls/v1/blsSubroundsFactory.go | 8 +- .../spos/bls/v1/blsSubroundsFactory_test.go | 11 +- consensus/spos/bls/v1/subroundBlock.go | 60 ++++----- consensus/spos/bls/v1/subroundEndRound.go | 2 +- consensus/spos/bls/v2/benchmark_test.go | 3 +- consensus/spos/bls/v2/blsSubroundsFactory.go | 6 +- .../spos/bls/v2/blsSubroundsFactory_test.go | 11 +- consensus/spos/bls/v2/subroundBlock.go | 74 +++++------ consensus/spos/bls/v2/subroundBlock_test.go | 8 +- consensus/spos/bls/v2/subroundEndRound.go | 118 +++++++++--------- .../spos/bls/v2/subroundEndRound_test.go | 38 +++--- .../spos/bls/v2/subroundSignature_test.go | 39 +++--- .../spos/bls/v2/subroundStartRound_test.go | 36 +++--- consensus/spos/consensusState.go | 70 +++++++++++ consensus/spos/roundConsensus.go | 5 + consensus/spos/subround.go | 12 +- .../consensus}/sposWorkerMock.go | 2 +- 18 files changed, 290 insertions(+), 222 deletions(-) rename {consensus/mock => testscommon/consensus}/sposWorkerMock.go (99%) diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go index 981955b855c..19ff56357d9 100644 --- a/consensus/spos/bls/proxy/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -18,13 +18,11 @@ import ( var log = logger.GetOrCreate("consensus/spos/bls/proxy") -// pick up stuff from consensusComponents and intermediate it here - // SubroundsHandlerArgs struct contains the needed data for the SubroundsHandler type SubroundsHandlerArgs struct { Chronology consensus.ChronologyHandler ConsensusCoreHandler spos.ConsensusCoreHandler - ConsensusState *spos.ConsensusState + ConsensusState spos.ConsensusStateHandler Worker factory.ConsensusWorker SignatureThrottler core.Throttler AppStatusHandler core.AppStatusHandler @@ -48,7 +46,7 @@ type ConsensusStateMachineType int type SubroundsHandler struct { chronology consensus.ChronologyHandler consensusCoreHandler spos.ConsensusCoreHandler - consensusState *spos.ConsensusState + consensusState spos.ConsensusStateHandler worker factory.ConsensusWorker signatureThrottler core.Throttler appStatusHandler core.AppStatusHandler @@ -73,8 +71,7 @@ func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) if check.IfNil(args.ConsensusCoreHandler) { return nil, bls.ErrNilConsensusCoreHandler } - // TODO: use an interface instead - if args.ConsensusState == nil { + if check.IfNil(args.ConsensusState) { return nil, bls.ErrNilConsensusState } if check.IfNil(args.Worker) { diff --git a/consensus/spos/bls/v1/blsSubroundsFactory.go b/consensus/spos/bls/v1/blsSubroundsFactory.go index f06c3e0af55..12cb0c59982 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory.go @@ -15,7 +15,7 @@ import ( // functionality type factory struct { consensusCore spos.ConsensusCoreHandler - consensusState *spos.ConsensusState + consensusState spos.ConsensusStateHandler worker spos.WorkerHandler appStatusHandler core.AppStatusHandler @@ -28,7 +28,7 @@ type factory struct { // NewSubroundsFactory creates a new consensusState object func NewSubroundsFactory( consensusDataContainer spos.ConsensusCoreHandler, - consensusState *spos.ConsensusState, + consensusState spos.ConsensusStateHandler, worker spos.WorkerHandler, chainID []byte, currentPid core.PeerID, @@ -62,7 +62,7 @@ func NewSubroundsFactory( func checkNewFactoryParams( container spos.ConsensusCoreHandler, - state *spos.ConsensusState, + state spos.ConsensusStateHandler, worker spos.WorkerHandler, chainID []byte, appStatusHandler core.AppStatusHandler, @@ -72,7 +72,7 @@ func checkNewFactoryParams( if err != nil { return err } - if state == nil { + if check.IfNil(state) { return spos.ErrNilConsensusState } if check.IfNil(worker) { diff --git a/consensus/spos/bls/v1/blsSubroundsFactory_test.go b/consensus/spos/bls/v1/blsSubroundsFactory_test.go index b5c9e6c4d03..280c0c74bf3 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" @@ -57,7 +56,7 @@ func initRoundHandlerMock() *consensusMock.RoundHandlerMock { } func initWorker() spos.WorkerHandler { - sposWorker := &mock.SposWorkerMock{} + sposWorker := &consensusMock.SposWorkerMock{} sposWorker.GetConsensusStateChangedChannelsCalled = func() chan bool { return make(chan bool) } @@ -494,7 +493,7 @@ func TestFactory_GenerateSubroundStartRoundShouldFailWhenNewSubroundFail(t *test t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*consensusMock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -519,7 +518,7 @@ func TestFactory_GenerateSubroundBlockShouldFailWhenNewSubroundFail(t *testing.T t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*consensusMock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -544,7 +543,7 @@ func TestFactory_GenerateSubroundSignatureShouldFailWhenNewSubroundFail(t *testi t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*consensusMock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -569,7 +568,7 @@ func TestFactory_GenerateSubroundEndRoundShouldFailWhenNewSubroundFail(t *testin t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*consensusMock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } diff --git a/consensus/spos/bls/v1/subroundBlock.go b/consensus/spos/bls/v1/subroundBlock.go index f7d36bfff33..eac4a7c9204 100644 --- a/consensus/spos/bls/v1/subroundBlock.go +++ b/consensus/spos/bls/v1/subroundBlock.go @@ -54,7 +54,7 @@ func checkNewSubroundBlockParams( return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -116,7 +116,7 @@ func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { // placeholder for subroundBlock.doBlockJob script - sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(header, body, sr.RoundTimeStamp) + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(header, body, sr.GetRoundTimeStamp()) return true } @@ -165,7 +165,7 @@ func (sr *subroundBlock) couldBeSentTogether(marshalizedBody []byte, marshalized } func (sr *subroundBlock) createBlock(header data.HeaderHandler) (data.HeaderHandler, data.BodyHandler, error) { - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := time.Duration(sr.EndTime()) haveTimeInCurrentSubround := func() bool { return sr.RoundHandler().RemainingTime(startTime, maxTime) > 0 @@ -224,9 +224,9 @@ func (sr *subroundBlock) sendHeaderAndBlockBody( "nonce", headerHandler.GetNonce(), "hash", headerHash) - sr.Data = headerHash - sr.Body = bodyHandler - sr.Header = headerHandler + sr.SetData(headerHash) + sr.SetBody(bodyHandler) + sr.SetHeader(headerHandler) return true } @@ -264,7 +264,7 @@ func (sr *subroundBlock) sendBlockBody(bodyHandler data.BodyHandler, marshalized log.Debug("step 1: block body has been sent") - sr.Body = bodyHandler + sr.SetBody(bodyHandler) return true } @@ -306,8 +306,8 @@ func (sr *subroundBlock) sendBlockHeader(headerHandler data.HeaderHandler, marsh "nonce", headerHandler.GetNonce(), "hash", headerHash) - sr.Data = headerHash - sr.Header = headerHandler + sr.SetData(headerHash) + sr.SetHeader(headerHandler) return true } @@ -415,17 +415,17 @@ func (sr *subroundBlock) receivedBlockBodyAndHeader(ctx context.Context, cnsDta return false } - sr.Data = cnsDta.BlockHeaderHash - sr.Body = sr.BlockProcessor().DecodeBlockBody(cnsDta.Body) - sr.Header = sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header) + sr.SetData(cnsDta.BlockHeaderHash) + sr.SetBody(sr.BlockProcessor().DecodeBlockBody(cnsDta.Body)) + sr.SetHeader(sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header)) - isInvalidData := check.IfNil(sr.Body) || sr.isInvalidHeaderOrData() + isInvalidData := check.IfNil(sr.GetBody()) || sr.isInvalidHeaderOrData() if isInvalidData { return false } log.Debug("step 1: block body and header have been received", - "nonce", sr.Header.GetNonce(), + "nonce", sr.GetHeader().GetNonce(), "hash", cnsDta.BlockHeaderHash) sw.Start("processReceivedBlock") @@ -442,7 +442,7 @@ func (sr *subroundBlock) receivedBlockBodyAndHeader(ctx context.Context, cnsDta } func (sr *subroundBlock) isInvalidHeaderOrData() bool { - return sr.Data == nil || check.IfNil(sr.Header) || sr.Header.CheckFieldsForNil() != nil + return sr.GetData() == nil || check.IfNil(sr.GetHeader()) || sr.GetHeader().CheckFieldsForNil() != nil } // receivedBlockBody method is called when a block body is received through the block body channel @@ -467,9 +467,9 @@ func (sr *subroundBlock) receivedBlockBody(ctx context.Context, cnsDta *consensu return false } - sr.Body = sr.BlockProcessor().DecodeBlockBody(cnsDta.Body) + sr.SetBody(sr.BlockProcessor().DecodeBlockBody(cnsDta.Body)) - if check.IfNil(sr.Body) { + if check.IfNil(sr.GetBody()) { return false } @@ -514,15 +514,15 @@ func (sr *subroundBlock) receivedBlockHeader(ctx context.Context, cnsDta *consen return false } - sr.Data = cnsDta.BlockHeaderHash - sr.Header = sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header) + sr.SetData(cnsDta.BlockHeaderHash) + sr.SetHeader(sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header)) if sr.isInvalidHeaderOrData() { return false } log.Debug("step 1: block header has been received", - "nonce", sr.Header.GetNonce(), + "nonce", sr.GetHeader().GetNonce(), "hash", cnsDta.BlockHeaderHash) blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta) @@ -536,10 +536,10 @@ func (sr *subroundBlock) receivedBlockHeader(ctx context.Context, cnsDta *consen } func (sr *subroundBlock) processReceivedBlock(ctx context.Context, cnsDta *consensus.Message) bool { - if check.IfNil(sr.Body) { + if check.IfNil(sr.GetBody()) { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } @@ -549,20 +549,20 @@ func (sr *subroundBlock) processReceivedBlock(ctx context.Context, cnsDta *conse sr.SetProcessingBlock(true) - shouldNotProcessBlock := sr.ExtendedCalled || cnsDta.RoundIndex < sr.RoundHandler().Index() + shouldNotProcessBlock := sr.GetExtendedCalled() || cnsDta.RoundIndex < sr.RoundHandler().Index() if shouldNotProcessBlock { log.Debug("canceled round, extended has been called or round index has been changed", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "cnsDta round", cnsDta.RoundIndex, - "extended called", sr.ExtendedCalled, + "extended called", sr.GetExtendedCalled(), ) return false } node := string(cnsDta.PubKey) - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 remainingTimeInCurrentRound := func() time.Duration { return sr.RoundHandler().RemainingTime(startTime, maxTime) @@ -572,8 +572,8 @@ func (sr *subroundBlock) processReceivedBlock(ctx context.Context, cnsDta *conse defer sr.computeSubroundProcessingMetric(metricStatTime, common.MetricProcessedProposedBlock) err := sr.BlockProcessor().ProcessBlock( - sr.Header, - sr.Body, + sr.GetHeader(), + sr.GetBody(), remainingTimeInCurrentRound, ) @@ -588,7 +588,7 @@ func (sr *subroundBlock) processReceivedBlock(ctx context.Context, cnsDta *conse if err != nil { sr.printCancelRoundLogMessage(ctx, err) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -599,7 +599,7 @@ func (sr *subroundBlock) processReceivedBlock(ctx context.Context, cnsDta *conse return false } - sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(sr.Header, sr.Body, sr.RoundTimeStamp) + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(sr.GetHeader(), sr.GetBody(), sr.GetRoundTimeStamp()) return true } @@ -629,7 +629,7 @@ func (sr *subroundBlock) computeSubroundProcessingMetric(startTime time.Time, me // doBlockConsensusCheck method checks if the consensus in the subround Block is achieved func (sr *subroundBlock) doBlockConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go index 0c2e7197e21..855c4f70203 100644 --- a/consensus/spos/bls/v1/subroundEndRound.go +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -75,7 +75,7 @@ func checkNewSubroundEndRoundParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } diff --git a/consensus/spos/bls/v2/benchmark_test.go b/consensus/spos/bls/v2/benchmark_test.go index b48058eef56..5b0492be6b5 100644 --- a/consensus/spos/bls/v2/benchmark_test.go +++ b/consensus/spos/bls/v2/benchmark_test.go @@ -15,7 +15,6 @@ import ( "github.com/stretchr/testify/require" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" @@ -119,7 +118,7 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number mutex.Unlock() }, }, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, &nodeMock.ThrottlerStub{}, ) diff --git a/consensus/spos/bls/v2/blsSubroundsFactory.go b/consensus/spos/bls/v2/blsSubroundsFactory.go index 977f78f14d7..756cf1956f7 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory.go @@ -15,7 +15,7 @@ import ( // functionality type factory struct { consensusCore spos.ConsensusCoreHandler - consensusState *spos.ConsensusState + consensusState spos.ConsensusStateHandler worker spos.WorkerHandler appStatusHandler core.AppStatusHandler @@ -29,7 +29,7 @@ type factory struct { // NewSubroundsFactory creates a new consensusState object func NewSubroundsFactory( consensusDataContainer spos.ConsensusCoreHandler, - consensusState *spos.ConsensusState, + consensusState spos.ConsensusStateHandler, worker spos.WorkerHandler, chainID []byte, currentPid core.PeerID, @@ -66,7 +66,7 @@ func NewSubroundsFactory( func checkNewFactoryParams( container spos.ConsensusCoreHandler, - state *spos.ConsensusState, + state spos.ConsensusStateHandler, worker spos.WorkerHandler, chainID []byte, appStatusHandler core.AppStatusHandler, diff --git a/consensus/spos/bls/v2/blsSubroundsFactory_test.go b/consensus/spos/bls/v2/blsSubroundsFactory_test.go index a3ac6ed432e..89fd8406c7c 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" @@ -46,7 +45,7 @@ func initRoundHandlerMock() *testscommonConsensus.RoundHandlerMock { } func initWorker() spos.WorkerHandler { - sposWorker := &mock.SposWorkerMock{} + sposWorker := &testscommonConsensus.SposWorkerMock{} sposWorker.GetConsensusStateChangedChannelsCalled = func() chan bool { return make(chan bool) } @@ -523,7 +522,7 @@ func TestFactory_GenerateSubroundStartRoundShouldFailWhenNewSubroundFail(t *test t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*testscommonConsensus.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -548,7 +547,7 @@ func TestFactory_GenerateSubroundBlockShouldFailWhenNewSubroundFail(t *testing.T t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*testscommonConsensus.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -573,7 +572,7 @@ func TestFactory_GenerateSubroundSignatureShouldFailWhenNewSubroundFail(t *testi t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*testscommonConsensus.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -598,7 +597,7 @@ func TestFactory_GenerateSubroundEndRoundShouldFailWhenNewSubroundFail(t *testin t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*testscommonConsensus.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } diff --git a/consensus/spos/bls/v2/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go index 1db0ed87ae2..6e4a115c043 100644 --- a/consensus/spos/bls/v2/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -61,7 +61,7 @@ func checkNewSubroundBlockParams( return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -129,7 +129,7 @@ func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { // placeholder for subroundBlock.doBlockJob script - sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(header, body, sr.RoundTimeStamp) + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(header, body, sr.GetRoundTimeStamp()) return true } @@ -183,7 +183,7 @@ func (sr *subroundBlock) couldBeSentTogether(marshalizedBody []byte, marshalized } func (sr *subroundBlock) createBlock(header data.HeaderHandler) (data.HeaderHandler, data.BodyHandler, error) { - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := time.Duration(sr.EndTime()) haveTimeInCurrentSubround := func() bool { return sr.RoundHandler().RemainingTime(startTime, maxTime) > 0 @@ -242,9 +242,9 @@ func (sr *subroundBlock) sendHeaderAndBlockBody( "nonce", headerHandler.GetNonce(), "hash", headerHash) - sr.Data = headerHash - sr.Body = bodyHandler - sr.Header = headerHandler + sr.SetData(headerHash) + sr.SetBody(bodyHandler) + sr.SetHeader(headerHandler) return true } @@ -285,7 +285,7 @@ func (sr *subroundBlock) sendBlockBody( log.Debug("step 1: block body has been sent") - sr.Body = bodyHandler + sr.SetBody(bodyHandler) return true } @@ -317,8 +317,8 @@ func (sr *subroundBlock) sendBlockHeader( "nonce", headerHandler.GetNonce(), "hash", headerHash) - sr.Data = headerHash - sr.Header = headerHandler + sr.SetData(headerHash) + sr.SetHeader(headerHandler) return true } @@ -363,8 +363,8 @@ func (sr *subroundBlock) sendBlockHeaderBeforeEquivalentProofs( "nonce", headerHandler.GetNonce(), "hash", headerHash) - sr.Data = headerHash - sr.Header = headerHandler + sr.SetData(headerHash) + sr.SetHeader(headerHandler) return true } @@ -525,11 +525,11 @@ func (sr *subroundBlock) receivedBlockBodyAndHeader(ctx context.Context, cnsDta header := sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header) - sr.Data = cnsDta.BlockHeaderHash - sr.Body = sr.BlockProcessor().DecodeBlockBody(cnsDta.Body) - sr.Header = header + sr.SetData(cnsDta.BlockHeaderHash) + sr.SetBody(sr.BlockProcessor().DecodeBlockBody(cnsDta.Body)) + sr.SetHeader(header) - isInvalidData := check.IfNil(sr.Body) || sr.isInvalidHeaderOrData() + isInvalidData := check.IfNil(sr.GetBody()) || sr.isInvalidHeaderOrData() if isInvalidData { return false } @@ -537,7 +537,7 @@ func (sr *subroundBlock) receivedBlockBodyAndHeader(ctx context.Context, cnsDta sr.saveProofForPreviousHeaderIfNeeded() log.Debug("step 1: block body and header have been received", - "nonce", sr.Header.GetNonce(), + "nonce", sr.GetHeader().GetNonce(), "hash", cnsDta.BlockHeaderHash) sw.Start("processReceivedBlock") @@ -576,7 +576,7 @@ func (sr *subroundBlock) saveProofForPreviousHeaderIfNeeded() { return } - proof = sr.Header.GetPreviousProof() + proof = sr.GetHeader().GetPreviousProof() err = sr.EquivalentProofsPool().AddProof(proof) if err != nil { log.Debug("saveProofForPreviousHeaderIfNeeded: failed to add proof, %w", err) @@ -585,7 +585,7 @@ func (sr *subroundBlock) saveProofForPreviousHeaderIfNeeded() { } func (sr *subroundBlock) isInvalidHeaderOrData() bool { - return sr.Data == nil || check.IfNil(sr.Header) || sr.Header.CheckFieldsForNil() != nil + return sr.GetData() == nil || check.IfNil(sr.GetHeader()) || sr.GetHeader().CheckFieldsForNil() != nil } // receivedBlockBody method is called when a block body is received through the block body channel @@ -610,9 +610,9 @@ func (sr *subroundBlock) receivedBlockBody(ctx context.Context, cnsDta *consensu return false } - sr.Body = sr.BlockProcessor().DecodeBlockBody(cnsDta.Body) + sr.SetBody(sr.BlockProcessor().DecodeBlockBody(cnsDta.Body)) - if check.IfNil(sr.Body) { + if check.IfNil(sr.GetBody()) { return false } @@ -660,8 +660,8 @@ func (sr *subroundBlock) receivedBlockHeaderBeforeEquivalentProofs(ctx context.C header := sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header) - sr.Data = cnsDta.BlockHeaderHash - sr.Header = header + sr.SetData(cnsDta.BlockHeaderHash) + sr.SetHeader(header) if sr.isInvalidHeaderOrData() { return false @@ -670,7 +670,7 @@ func (sr *subroundBlock) receivedBlockHeaderBeforeEquivalentProofs(ctx context.C sr.saveProofForPreviousHeaderIfNeeded() log.Debug("step 1: block header has been received", - "nonce", sr.Header.GetNonce(), + "nonce", sr.GetHeader().GetNonce(), "hash", cnsDta.BlockHeaderHash) blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta.RoundIndex, cnsDta.PubKey) @@ -723,14 +723,14 @@ func (sr *subroundBlock) receivedBlockHeader(headerHandler data.HeaderHandler) { return } - sr.Data = sr.Hasher().Compute(string(marshalledHeader)) - sr.Header = headerHandler + sr.SetData(sr.Hasher().Compute(string(marshalledHeader))) + sr.SetHeader(headerHandler) sr.saveProofForPreviousHeaderIfNeeded() log.Debug("step 1: block header has been received", - "nonce", sr.Header.GetNonce(), - "hash", sr.Data) + "nonce", sr.GetHeader().GetNonce(), + "hash", sr.GetData()) sr.PeerHonestyHandler().ChangeScore( sr.Leader(), @@ -751,10 +751,10 @@ func (sr *subroundBlock) processReceivedBlock( round int64, senderPK []byte, ) bool { - if check.IfNil(sr.Body) { + if check.IfNil(sr.GetBody()) { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } @@ -764,13 +764,13 @@ func (sr *subroundBlock) processReceivedBlock( sr.SetProcessingBlock(true) - shouldNotProcessBlock := sr.ExtendedCalled || round < sr.RoundHandler().Index() + shouldNotProcessBlock := sr.GetExtendedCalled() || round < sr.RoundHandler().Index() if shouldNotProcessBlock { log.Debug("canceled round, extended has been called or round index has been changed", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "cnsDta round", round, - "extended called", sr.ExtendedCalled, + "extended called", sr.GetExtendedCalled(), ) return false } @@ -783,7 +783,7 @@ func (sr *subroundBlock) processBlock( roundIndex int64, pubkey []byte, ) bool { - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 remainingTimeInCurrentRound := func() time.Duration { return sr.RoundHandler().RemainingTime(startTime, maxTime) @@ -793,8 +793,8 @@ func (sr *subroundBlock) processBlock( defer sr.computeSubroundProcessingMetric(metricStatTime, common.MetricProcessedProposedBlock) err := sr.BlockProcessor().ProcessBlock( - sr.Header, - sr.Body, + sr.GetHeader(), + sr.GetBody(), remainingTimeInCurrentRound, ) @@ -809,7 +809,7 @@ func (sr *subroundBlock) processBlock( if err != nil { sr.printCancelRoundLogMessage(ctx, err) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -821,7 +821,7 @@ func (sr *subroundBlock) processBlock( return false } - sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(sr.Header, sr.Body, sr.RoundTimeStamp) + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(sr.GetHeader(), sr.GetBody(), sr.GetRoundTimeStamp()) return true } @@ -851,7 +851,7 @@ func (sr *subroundBlock) computeSubroundProcessingMetric(startTime time.Time, me // doBlockConsensusCheck method checks if the consensus in the subround Block is achieved func (sr *subroundBlock) doBlockConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } diff --git a/consensus/spos/bls/v2/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go index 663a3ece1d7..d75b526b477 100644 --- a/consensus/spos/bls/v2/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -68,7 +68,7 @@ func defaultSubroundBlockFromSubround(sr *spos.Subround) (v2.SubroundBlock, erro srBlock, err := v2.NewSubroundBlock( sr, v2.ProcessingThresholdPercent, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, ) return srBlock, err @@ -78,7 +78,7 @@ func defaultSubroundBlockWithoutErrorFromSubround(sr *spos.Subround) v2.Subround srBlock, _ := v2.NewSubroundBlock( sr, v2.ProcessingThresholdPercent, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, ) return srBlock @@ -159,7 +159,7 @@ func TestSubroundBlock_NewSubroundBlockNilSubroundShouldFail(t *testing.T) { srBlock, err := v2.NewSubroundBlock( nil, v2.ProcessingThresholdPercent, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, ) assert.Nil(t, srBlock) assert.Equal(t, spos.ErrNilSubround, err) @@ -488,7 +488,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { srBlock, _ := v2.NewSubroundBlock( baseSr, v2.ProcessingThresholdPercent, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, ) sr := *srBlock diff --git a/consensus/spos/bls/v2/subroundEndRound.go b/consensus/spos/bls/v2/subroundEndRound.go index fbe58f4c6b4..554868bbcd1 100644 --- a/consensus/spos/bls/v2/subroundEndRound.go +++ b/consensus/spos/bls/v2/subroundEndRound.go @@ -84,7 +84,7 @@ func checkNewSubroundEndRoundParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -103,13 +103,13 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD if !sr.IsConsensusDataSet() { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } // TODO[cleanup cns finality]: remove if statement isSenderAllowed := sr.IsNodeInConsensusGroup(messageSender) - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { isSenderAllowed = sr.IsNodeLeaderInCurrentRound(messageSender) } if !isSenderAllowed { // is NOT this node leader in current round? @@ -124,7 +124,7 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD // TODO[cleanup cns finality]: remove if isSelfSender := sr.IsNodeSelf(messageSender) || sr.IsKeyManagedBySelf([]byte(messageSender)) - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { isSelfSender = sr.IsSelfLeader() } if isSelfSender { @@ -140,7 +140,7 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD } hasProof := sr.EquivalentProofsPool().HasProof(sr.ShardCoordinator().SelfId(), cnsDta.BlockHeaderHash) - if hasProof && sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if hasProof && sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -163,11 +163,11 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD } func (sr *subroundEndRound) isBlockHeaderFinalInfoValid(cnsDta *consensus.Message) bool { - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } - header := sr.Header.ShallowClone() + header := sr.GetHeader().ShallowClone() // TODO[cleanup cns finality]: remove this if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, header.GetEpoch()) { @@ -223,13 +223,13 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta if !sr.IsConsensusDataSet() { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } // TODO[cleanup cns finality]: remove if statement isSenderAllowed := sr.IsNodeInConsensusGroup(messageSender) - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { isSenderAllowed = sr.IsNodeLeaderInCurrentRound(messageSender) } if !isSenderAllowed { // is NOT this node leader in current round? @@ -244,7 +244,7 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta // TODO[cleanup cns finality]: update this check isSelfSender := sr.IsNodeSelf(messageSender) || sr.IsKeyManagedBySelf([]byte(messageSender)) - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { isSelfSender = sr.IsSelfLeader() } if isSelfSender { @@ -348,12 +348,12 @@ func (sr *subroundEndRound) receivedHeader(headerHandler data.HeaderHandler) { // doEndRoundJob method does the job of the subround EndRound func (sr *subroundEndRound) doEndRoundJob(_ context.Context) bool { - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } // TODO[cleanup cns finality]: remove this code block - isFlagEnabled := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) + isFlagEnabled := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) if !sr.IsSelfLeader() && !isFlagEnabled { if sr.IsSelfInConsensusGroup() { err := sr.prepareBroadcastBlockDataForValidator() @@ -404,15 +404,15 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { // broadcast header // TODO[cleanup cns finality]: remove this, header already broadcast during subroundBlock - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { - err = sr.BroadcastMessenger().BroadcastHeader(sr.Header, sender) + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { + err = sr.BroadcastMessenger().BroadcastHeader(sr.GetHeader(), sender) if err != nil { log.Warn("doEndRoundJobByLeader.BroadcastHeader", "error", err.Error()) } } startTime := time.Now() - err = sr.BlockProcessor().CommitBlock(sr.Header, sr.Body) + err = sr.BlockProcessor().CommitBlock(sr.GetHeader(), sr.GetBody()) elapsedTime := time.Since(startTime) if elapsedTime >= common.CommitMaxTime { log.Warn("doEndRoundJobByLeader.CommitBlock", "elapsed time", elapsedTime) @@ -426,7 +426,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { err = sr.EquivalentProofsPool().AddProof(proof) if err != nil { log.Debug("doEndRoundJobByLeader.AddProof", "error", err) @@ -445,7 +445,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { log.Debug("doEndRoundJobByLeader.broadcastBlockDataLeader", "error", err.Error()) } - msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", sr.Header.GetNonce()) + msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", sr.GetHeader().GetNonce()) log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "+")) sr.updateMetricsForLeader() @@ -469,14 +469,14 @@ func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandle } // TODO[cleanup cns finality]: remove this code block - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { - err = sr.Header.SetPubKeysBitmap(bitmap) + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { + err = sr.GetHeader().SetPubKeysBitmap(bitmap) if err != nil { log.Debug("sendFinalInfo.SetPubKeysBitmap", "error", err.Error()) return nil, false } - err = sr.Header.SetSignature(sig) + err = sr.GetHeader().SetSignature(sig) if err != nil { log.Debug("sendFinalInfo.SetSignature", "error", err.Error()) return nil, false @@ -489,7 +489,7 @@ func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandle return nil, false } - err = sr.Header.SetLeaderSignature(leaderSignature) + err = sr.GetHeader().SetLeaderSignature(leaderSignature) if err != nil { log.Debug("sendFinalInfo.SetLeaderSignature", "error", err.Error()) return nil, false @@ -512,8 +512,8 @@ func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandle // broadcast header and final info section // TODO[cleanup cns finality]: remove leaderSigToBroadcast - leaderSigToBroadcast := sr.Header.GetLeaderSignature() - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + leaderSigToBroadcast := sr.GetHeader().GetLeaderSignature() + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { leaderSigToBroadcast = nil } @@ -525,15 +525,15 @@ func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandle PubKeysBitmap: bitmap, AggregatedSignature: sig, HeaderHash: sr.GetData(), - HeaderEpoch: sr.Header.GetEpoch(), - HeaderNonce: sr.Header.GetNonce(), - HeaderShardId: sr.Header.GetShardID(), + HeaderEpoch: sr.GetHeader().GetEpoch(), + HeaderNonce: sr.GetHeader().GetNonce(), + HeaderShardId: sr.GetHeader().GetShardID(), }, true } func (sr *subroundEndRound) shouldSendFinalInfo() bool { // TODO[cleanup cns finality]: remove this check - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -547,7 +547,7 @@ func (sr *subroundEndRound) shouldSendFinalInfo() bool { } func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) ([]byte, []byte, error) { - sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) + sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.GetHeader().GetEpoch()) if err != nil { log.Debug("doEndRoundJobByLeader.AggregateSigs", "error", err.Error()) @@ -560,7 +560,7 @@ func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) return nil, nil, err } - err = sr.SigningHandler().Verify(sr.GetData(), bitmap, sr.Header.GetEpoch()) + err = sr.SigningHandler().Verify(sr.GetData(), bitmap, sr.GetHeader().GetEpoch()) if err != nil { log.Debug("doEndRoundJobByLeader.Verify", "error", err.Error()) @@ -588,7 +588,7 @@ func (sr *subroundEndRound) checkGoRoutinesThrottler(ctx context.Context) error // verifySignature implements parallel signature verification func (sr *subroundEndRound) verifySignature(i int, pk string, sigShare []byte) error { - err := sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.Header.GetEpoch()) + err := sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.GetHeader().GetEpoch()) if err != nil { log.Trace("VerifySignatureShare returned an error: ", err) errSetJob := sr.SetJobDone(pk, bls.SrSignature, false) @@ -617,7 +617,7 @@ func (sr *subroundEndRound) verifyNodesOnAggSigFail(ctx context.Context) ([]stri invalidPubKeys := make([]string, 0) pubKeys := sr.ConsensusGroup() - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return nil, spos.ErrNilHeader } @@ -712,7 +712,7 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) threshold := sr.Threshold(bls.SrSignature) numValidSigShares := sr.ComputeSize(bls.SrSignature) - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return nil, nil, spos.ErrNilHeader } @@ -727,7 +727,7 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) return nil, nil, err } - sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) + sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.GetHeader().GetEpoch()) if err != nil { return nil, nil, err } @@ -764,7 +764,7 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfoForKey(signature [] return false } - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { err = sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) if err != nil { log.Debug("createAndBroadcastHeaderFinalInfoForKey.BroadcastConsensusMessage", "error", err.Error()) @@ -791,7 +791,7 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfoForKey(signature [] func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) { // TODO[cleanup cns finality]: remove the leader check - isEquivalentMessagesFlagEnabled := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) + isEquivalentMessagesFlagEnabled := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) if !sr.IsSelfLeader() && !isEquivalentMessagesFlagEnabled { return } @@ -830,7 +830,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by } func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message) bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } if !sr.IsConsensusDataSet() { @@ -854,13 +854,13 @@ func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message sr.SetProcessingBlock(true) - shouldNotCommitBlock := sr.ExtendedCalled || int64(header.GetRound()) < sr.RoundHandler().Index() + shouldNotCommitBlock := sr.GetExtendedCalled() || int64(header.GetRound()) < sr.RoundHandler().Index() if shouldNotCommitBlock { log.Debug("canceled round, extended has been called or round index has been changed", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "header round", header.GetRound(), - "extended called", sr.ExtendedCalled, + "extended called", sr.GetExtendedCalled(), ) return false } @@ -875,7 +875,7 @@ func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message } startTime := time.Now() - err := sr.BlockProcessor().CommitBlock(header, sr.Body) + err := sr.BlockProcessor().CommitBlock(header, sr.GetBody()) elapsedTime := time.Since(startTime) if elapsedTime >= common.CommitMaxTime { log.Warn("doEndRoundJobByParticipant.CommitBlock", "elapsed time", elapsedTime) @@ -936,11 +936,11 @@ func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Me return sr.isConsensusHeaderReceived() } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false, nil } - header := sr.Header.ShallowClone() + header := sr.GetHeader().ShallowClone() // TODO[cleanup cns finality]: remove this if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, header.GetEpoch()) { err := header.SetPubKeysBitmap(cnsDta.PubKeysBitmap) @@ -965,11 +965,11 @@ func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Me } func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandler) { - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false, nil } - consensusHeaderHash, err := core.CalculateHash(sr.Marshalizer(), sr.Hasher(), sr.Header) + consensusHeaderHash, err := core.CalculateHash(sr.Marshalizer(), sr.Hasher(), sr.GetHeader()) if err != nil { log.Debug("isConsensusHeaderReceived: calculate consensus header hash", "error", err.Error()) return false, nil @@ -1016,7 +1016,7 @@ func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandle } func (sr *subroundEndRound) signBlockHeader(leader []byte) ([]byte, error) { - headerClone := sr.Header.ShallowClone() + headerClone := sr.GetHeader().ShallowClone() err := headerClone.SetLeaderSignature(nil) if err != nil { return nil, err @@ -1039,16 +1039,16 @@ func (sr *subroundEndRound) updateMetricsForLeader() { func (sr *subroundEndRound) broadcastBlockDataLeader(sender []byte) error { // TODO[cleanup cns finality]: remove this method, block data was already broadcast during subroundBlock - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return nil } - miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.Header, sr.Body) + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.GetHeader(), sr.GetBody()) if err != nil { return err } - return sr.BroadcastMessenger().BroadcastBlockDataLeader(sr.Header, miniBlocks, transactions, sender) + return sr.BroadcastMessenger().BroadcastBlockDataLeader(sr.GetHeader(), miniBlocks, transactions, sender) } func (sr *subroundEndRound) setHeaderForValidator(header data.HeaderHandler) error { @@ -1068,14 +1068,14 @@ func (sr *subroundEndRound) prepareBroadcastBlockDataForValidator() error { return err } - go sr.BroadcastMessenger().PrepareBroadcastBlockDataValidator(sr.Header, miniBlocks, transactions, idx, pk) + go sr.BroadcastMessenger().PrepareBroadcastBlockDataValidator(sr.GetHeader(), miniBlocks, transactions, idx, pk) return nil } // doEndRoundConsensusCheck method checks if the consensus is achieved func (sr *subroundEndRound) doEndRoundConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -1105,7 +1105,7 @@ func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { func (sr *subroundEndRound) hasProposerSignature(bitmap []byte) bool { // TODO[cleanup cns finality]: remove this check - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -1113,14 +1113,14 @@ func (sr *subroundEndRound) hasProposerSignature(bitmap []byte) bool { } func (sr *subroundEndRound) isOutOfTime() bool { - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { log.Debug("canceled round, time is out", "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), "subround", sr.Name()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return true } @@ -1141,7 +1141,7 @@ func (sr *subroundEndRound) getIndexPkAndDataToBroadcast() (int, []byte, map[uin return -1, nil, nil, nil, err } - miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.Header, sr.Body) + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.GetHeader(), sr.GetBody()) if err != nil { return -1, nil, nil, nil, err } @@ -1170,7 +1170,7 @@ func (sr *subroundEndRound) getMinConsensusGroupIndexOfManagedKeys() int { func (sr *subroundEndRound) getSender() ([]byte, error) { // TODO[cleanup cns finality]: remove this code block - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { leader, errGetLeader := sr.GetLeader() if errGetLeader != nil { log.Debug("GetLeader", "error", errGetLeader) @@ -1194,7 +1194,7 @@ func (sr *subroundEndRound) getSender() ([]byte, error) { func (sr *subroundEndRound) waitForSignalSync() bool { // TODO[cleanup cns finality]: remove this - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -1239,7 +1239,7 @@ func (sr *subroundEndRound) waitAllSignatures() { return } - sr.WaitingAllSignaturesTimeOut = true + sr.SetWaitingAllSignaturesTimeOut(true) select { case sr.ConsensusChannel() <- true: @@ -1260,7 +1260,7 @@ func (sr *subroundEndRound) remainingTime() time.Duration { // is set on true for the subround Signature func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consensus.Message) bool { // TODO[cleanup cns finality]: remove this check - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -1326,7 +1326,7 @@ func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consens func (sr *subroundEndRound) checkReceivedSignatures() bool { threshold := sr.Threshold(bls.SrSignature) - if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.Header) { + if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.GetHeader()) { threshold = sr.FallbackThreshold(bls.SrSignature) log.Warn("subroundEndRound.checkReceivedSignatures: fallback validation has been applied", "minimum number of signatures required", threshold, @@ -1337,7 +1337,7 @@ func (sr *subroundEndRound) checkReceivedSignatures() bool { areSignaturesCollected, numSigs := sr.areSignaturesCollected(threshold) areAllSignaturesCollected := numSigs == sr.ConsensusGroupSize() - isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.WaitingAllSignaturesTimeOut) + isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.GetWaitingAllSignaturesTimeOut()) isSelfJobDone := sr.IsSelfJobDone(bls.SrSignature) diff --git a/consensus/spos/bls/v2/subroundEndRound_test.go b/consensus/spos/bls/v2/subroundEndRound_test.go index 68c12d31674..705f830ee22 100644 --- a/consensus/spos/bls/v2/subroundEndRound_test.go +++ b/consensus/spos/bls/v2/subroundEndRound_test.go @@ -66,7 +66,7 @@ func initSubroundEndRoundWithContainer( v2.ProcessingThresholdPercent, appStatusHandler, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -104,7 +104,7 @@ func initSubroundEndRoundWithContainerAndConsensusState( v2.ProcessingThresholdPercent, appStatusHandler, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, signatureThrottler, ) @@ -150,7 +150,7 @@ func TestNewSubroundEndRound(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -165,7 +165,7 @@ func TestNewSubroundEndRound(t *testing.T) { v2.ProcessingThresholdPercent, nil, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -180,7 +180,7 @@ func TestNewSubroundEndRound(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, nil, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -232,7 +232,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -268,7 +268,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -305,7 +305,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -341,7 +341,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -377,7 +377,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -413,7 +413,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -449,7 +449,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilThrottlerShouldFail(t *testing.T v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, nil, ) @@ -485,7 +485,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1079,7 +1079,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1225,7 +1225,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1586,7 +1586,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1754,7 +1754,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1878,7 +1878,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -2246,7 +2246,7 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) diff --git a/consensus/spos/bls/v2/subroundSignature_test.go b/consensus/spos/bls/v2/subroundSignature_test.go index 24289498d83..36811e4c62b 100644 --- a/consensus/spos/bls/v2/subroundSignature_test.go +++ b/consensus/spos/bls/v2/subroundSignature_test.go @@ -16,7 +16,6 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" @@ -54,7 +53,7 @@ func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreM sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -96,7 +95,7 @@ func TestNewSubroundSignature(t *testing.T) { nil, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -124,7 +123,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, nil, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -138,7 +137,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, &statusHandler.AppStatusHandlerStub{}, nil, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -153,7 +152,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, nil, ) @@ -190,7 +189,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -225,7 +224,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -260,7 +259,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -296,7 +295,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -331,7 +330,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -366,7 +365,7 @@ func TestSubroundSignature_NewSubroundSignatureNilAppStatusHandlerShouldFail(t * sr, nil, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -401,7 +400,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -558,7 +557,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { mutex.Unlock() }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -665,7 +664,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { mutex.Unlock() }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -771,7 +770,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { signatureSentForPks[string(pkBytes)] = struct{}{} }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -838,7 +837,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { signatureSentForPks[string(pkBytes)] = struct{}{} }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -907,7 +906,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { varCalled = true }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -974,7 +973,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { mutex.Unlock() }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -1066,7 +1065,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{ CanProcessCalled: func() bool { return false diff --git a/consensus/spos/bls/v2/subroundStartRound_test.go b/consensus/spos/bls/v2/subroundStartRound_test.go index da1ee8c1b04..6fd4ff7488f 100644 --- a/consensus/spos/bls/v2/subroundStartRound_test.go +++ b/consensus/spos/bls/v2/subroundStartRound_test.go @@ -35,7 +35,7 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (v2.SubroundStartR sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) return startRound, err @@ -46,7 +46,7 @@ func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) v2.Sub sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) return startRound @@ -83,7 +83,7 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) v2 sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) return srStartRound @@ -123,7 +123,7 @@ func TestNewSubroundStartRound(t *testing.T) { nil, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) assert.Nil(t, srStartRound) @@ -136,7 +136,7 @@ func TestNewSubroundStartRound(t *testing.T) { sr, v2.ProcessingThresholdPercent, nil, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) assert.Nil(t, srStartRound) @@ -534,7 +534,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -586,7 +586,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -637,7 +637,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -699,7 +699,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) @@ -765,7 +765,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) @@ -814,7 +814,7 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldErrNilHeader(t *test sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -841,7 +841,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenResetErr(t *tes sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -877,7 +877,7 @@ func TestSubroundStartRound_IndexRoundIfNeededFailShardIdForEpoch(t *testing.T) sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -921,7 +921,7 @@ func TestSubroundStartRound_IndexRoundIfNeededFailGetValidatorsIndexes(t *testin sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -960,7 +960,7 @@ func TestSubroundStartRound_IndexRoundIfNeededShouldFullyWork(t *testing.T) { sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -1003,7 +1003,7 @@ func TestSubroundStartRound_IndexRoundIfNeededDifferentShardIdFail(t *testing.T) sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -1055,7 +1055,7 @@ func TestSubroundStartRound_changeEpoch(t *testing.T) { sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) startRound.ChangeEpoch(1) @@ -1084,7 +1084,7 @@ func TestSubroundStartRound_changeEpoch(t *testing.T) { sr, v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) startRound.ChangeEpoch(1) diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index ff336ad3fae..6a913634788 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -320,6 +320,11 @@ func (cns *ConsensusState) GetData() []byte { return cns.Data } +// SetData sets the Data of the consensusState +func (cns *ConsensusState) SetData(data []byte) { + cns.Data = data +} + // IsMultiKeyLeaderInCurrentRound method checks if one of the nodes which are controlled by this instance // is leader in the current round func (cns *ConsensusState) IsMultiKeyLeaderInCurrentRound() bool { @@ -384,3 +389,68 @@ func (cns *ConsensusState) GetMultikeyRedundancyStepInReason() string { func (cns *ConsensusState) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { cns.keysHandler.ResetRoundsWithoutReceivedMessages(pkBytes, pid) } + +// GetRoundCanceled returns the state of the current round +func (cns *ConsensusState) GetRoundCanceled() bool { + return cns.RoundCanceled +} + +// SetRoundCanceled sets the state of the current round +func (cns *ConsensusState) SetRoundCanceled(roundCanceled bool) { + cns.RoundCanceled = roundCanceled +} + +// GetRoundIndex returns the index of the current round +func (cns *ConsensusState) GetRoundIndex() int64 { + return cns.RoundIndex +} + +// GetRoundTimeStamp returns the time stamp of the current round +func (cns *ConsensusState) GetRoundTimeStamp() time.Time { + return cns.RoundTimeStamp +} + +// GetExtendedCalled returns the state of the extended called +func (cns *ConsensusState) GetExtendedCalled() bool { + return cns.ExtendedCalled +} + +// SetExtendedCalled sets the state of the extended called +func (cns *ConsensusState) SetExtendedCalled(extendedCalled bool) { + cns.ExtendedCalled = extendedCalled +} + +// GetBody returns the body of the current round +func (cns *ConsensusState) GetBody() data.BodyHandler { + return cns.Body +} + +// SetBody sets the body of the current round +func (cns *ConsensusState) SetBody(body data.BodyHandler) { + cns.Body = body +} + +// GetHeader returns the header of the current round +func (cns *ConsensusState) GetHeader() data.HeaderHandler { + return cns.Header +} + +// GetWaitingAllSignaturesTimeOut returns the state of the waiting all signatures time out +func (cns *ConsensusState) GetWaitingAllSignaturesTimeOut() bool { + return cns.WaitingAllSignaturesTimeOut +} + +// SetWaitingAllSignaturesTimeOut sets the state of the waiting all signatures time out +func (cns *ConsensusState) SetWaitingAllSignaturesTimeOut(waitingAllSignaturesTimeOut bool) { + cns.WaitingAllSignaturesTimeOut = waitingAllSignaturesTimeOut +} + +// SetHeader sets the header of the current round +func (cns *ConsensusState) SetHeader(header data.HeaderHandler) { + cns.Header = header +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cns *ConsensusState) IsInterfaceNil() bool { + return cns == nil +} diff --git a/consensus/spos/roundConsensus.go b/consensus/spos/roundConsensus.go index cda20e33224..503eb0b2a2a 100644 --- a/consensus/spos/roundConsensus.go +++ b/consensus/spos/roundConsensus.go @@ -234,3 +234,8 @@ func (rcns *roundConsensus) IsKeyManagedBySelf(pkBytes []byte) bool { func (rcns *roundConsensus) IncrementRoundsWithoutReceivedMessages(pkBytes []byte) { rcns.keysHandler.IncrementRoundsWithoutReceivedMessages(pkBytes) } + +// GetKeysHandler returns the keysHandler instance +func (rcns *roundConsensus) GetKeysHandler() consensus.KeysHandler { + return rcns.keysHandler +} diff --git a/consensus/spos/subround.go b/consensus/spos/subround.go index e124475407b..00b2c55fe6c 100644 --- a/consensus/spos/subround.go +++ b/consensus/spos/subround.go @@ -23,7 +23,7 @@ const ( // situation of the Subround and Check function will decide if in this Subround the consensus is achieved type Subround struct { ConsensusCoreHandler - *ConsensusState + ConsensusStateHandler previous int current int @@ -51,7 +51,7 @@ func NewSubround( startTime int64, endTime int64, name string, - consensusState *ConsensusState, + consensusState ConsensusStateHandler, consensusStateChangedChannel chan bool, executeStoredMessages func(), container ConsensusCoreHandler, @@ -73,7 +73,7 @@ func NewSubround( sr := Subround{ ConsensusCoreHandler: container, - ConsensusState: consensusState, + ConsensusStateHandler: consensusState, previous: previous, current: current, next: next, @@ -94,7 +94,7 @@ func NewSubround( } func checkNewSubroundParams( - state *ConsensusState, + state ConsensusStateHandler, consensusStateChangedChannel chan bool, executeStoredMessages func(), container ConsensusCoreHandler, @@ -151,7 +151,7 @@ func (sr *Subround) DoWork(ctx context.Context, roundHandler consensus.RoundHand } case <-time.After(roundHandler.RemainingTime(startTime, maxTime)): if sr.Extend != nil { - sr.RoundCanceled = true + sr.SetRoundCanceled(true) sr.Extend(sr.current) } @@ -212,7 +212,7 @@ func (sr *Subround) ConsensusChannel() chan bool { // GetAssociatedPid returns the associated PeerID to the provided public key bytes func (sr *Subround) GetAssociatedPid(pkBytes []byte) core.PeerID { - return sr.keysHandler.GetAssociatedPid(pkBytes) + return sr.GetKeysHandler().GetAssociatedPid(pkBytes) } // ShouldConsiderSelfKeyInConsensus returns true if current machine is the main one, or it is a backup machine but the main diff --git a/consensus/mock/sposWorkerMock.go b/testscommon/consensus/sposWorkerMock.go similarity index 99% rename from consensus/mock/sposWorkerMock.go rename to testscommon/consensus/sposWorkerMock.go index 734ce65c326..c34eeebcc8e 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/testscommon/consensus/sposWorkerMock.go @@ -1,4 +1,4 @@ -package mock +package consensus import ( "context" From ae7f18a87a3ed21f0c25ff9e581240b180c862f0 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 15:59:13 +0300 Subject: [PATCH 20/30] fixes --- consensus/spos/bls/v1/export_test.go | 2 +- consensus/spos/bls/v1/subroundEndRound.go | 79 +++++++++++---------- consensus/spos/bls/v1/subroundSignature.go | 16 ++--- consensus/spos/bls/v1/subroundStartRound.go | 22 +++--- consensus/spos/bls/v2/subroundSignature.go | 26 +++---- consensus/spos/bls/v2/subroundStartRound.go | 22 +++--- consensus/spos/consensusState.go | 10 +++ consensus/spos/interface.go | 2 + 8 files changed, 98 insertions(+), 81 deletions(-) diff --git a/consensus/spos/bls/v1/export_test.go b/consensus/spos/bls/v1/export_test.go index 6cb39895b7e..3ef8b963d2e 100644 --- a/consensus/spos/bls/v1/export_test.go +++ b/consensus/spos/bls/v1/export_test.go @@ -47,7 +47,7 @@ func (fct *factory) ChronologyHandler() consensus.ChronologyHandler { } // ConsensusState gets the consensus state struct pointer -func (fct *factory) ConsensusState() *spos.ConsensusState { +func (fct *factory) ConsensusState() spos.ConsensusStateHandler { return fct.consensusState } diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go index 855c4f70203..c591c736aca 100644 --- a/consensus/spos/bls/v1/subroundEndRound.go +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -133,11 +133,11 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD } func (sr *subroundEndRound) isBlockHeaderFinalInfoValid(cnsDta *consensus.Message) bool { - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } - header := sr.Header.ShallowClone() + header := sr.GetHeader().ShallowClone() err := header.SetPubKeysBitmap(cnsDta.PubKeysBitmap) if err != nil { log.Debug("isBlockHeaderFinalInfoValid.SetPubKeysBitmap", "error", err.Error()) @@ -302,7 +302,8 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } - if check.IfNil(sr.Header) { + header := sr.GetHeader() + if check.IfNil(header) { log.Error("doEndRoundJobByLeader.CheckNilHeader", "error", spos.ErrNilHeader) return false } @@ -314,13 +315,13 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } - err = sr.Header.SetPubKeysBitmap(bitmap) + err = header.SetPubKeysBitmap(bitmap) if err != nil { log.Debug("doEndRoundJobByLeader.SetPubKeysBitmap", "error", err.Error()) return false } - err = sr.Header.SetSignature(sig) + err = header.SetSignature(sig) if err != nil { log.Debug("doEndRoundJobByLeader.SetSignature", "error", err.Error()) return false @@ -333,7 +334,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } - err = sr.Header.SetLeaderSignature(leaderSignature) + err = header.SetLeaderSignature(leaderSignature) if err != nil { log.Debug("doEndRoundJobByLeader.SetLeaderSignature", "error", err.Error()) return false @@ -364,13 +365,13 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { } // broadcast header - err = sr.BroadcastMessenger().BroadcastHeader(sr.Header, []byte(leader)) + err = sr.BroadcastMessenger().BroadcastHeader(header, []byte(leader)) if err != nil { log.Debug("doEndRoundJobByLeader.BroadcastHeader", "error", err.Error()) } startTime := time.Now() - err = sr.BlockProcessor().CommitBlock(sr.Header, sr.Body) + err = sr.BlockProcessor().CommitBlock(header, sr.GetBody()) elapsedTime := time.Since(startTime) if elapsedTime >= common.CommitMaxTime { log.Warn("doEndRoundJobByLeader.CommitBlock", "elapsed time", elapsedTime) @@ -395,7 +396,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { log.Debug("doEndRoundJobByLeader.broadcastBlockDataLeader", "error", err.Error()) } - msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", sr.Header.GetNonce()) + msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", header.GetNonce()) log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "+")) sr.updateMetricsForLeader() @@ -404,7 +405,8 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { } func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) ([]byte, []byte, error) { - sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) + header := sr.GetHeader() + sig, err := sr.SigningHandler().AggregateSigs(bitmap, header.GetEpoch()) if err != nil { log.Debug("doEndRoundJobByLeader.AggregateSigs", "error", err.Error()) @@ -417,7 +419,7 @@ func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) return nil, nil, err } - err = sr.SigningHandler().Verify(sr.GetData(), bitmap, sr.Header.GetEpoch()) + err = sr.SigningHandler().Verify(sr.GetData(), bitmap, header.GetEpoch()) if err != nil { log.Debug("doEndRoundJobByLeader.Verify", "error", err.Error()) @@ -431,7 +433,8 @@ func (sr *subroundEndRound) verifyNodesOnAggSigFail() ([]string, error) { invalidPubKeys := make([]string, 0) pubKeys := sr.ConsensusGroup() - if check.IfNil(sr.Header) { + header := sr.GetHeader() + if check.IfNil(header) { return nil, spos.ErrNilHeader } @@ -447,7 +450,7 @@ func (sr *subroundEndRound) verifyNodesOnAggSigFail() ([]string, error) { } isSuccessfull := true - err = sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.Header.GetEpoch()) + err = sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), header.GetEpoch()) if err != nil { isSuccessfull = false @@ -524,7 +527,8 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) threshold := sr.Threshold(sr.Current()) numValidSigShares := sr.ComputeSize(bls.SrSignature) - if check.IfNil(sr.Header) { + header := sr.GetHeader() + if check.IfNil(header) { return nil, nil, spos.ErrNilHeader } @@ -539,7 +543,7 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) return nil, nil, err } - sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) + sig, err := sr.SigningHandler().AggregateSigs(bitmap, header.GetEpoch()) if err != nil { return nil, nil, err } @@ -559,6 +563,7 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { return } + header := sr.GetHeader() cnsMsg := consensus.NewConsensusMessage( sr.GetData(), nil, @@ -569,9 +574,9 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { int(bls.MtBlockHeaderFinalInfo), sr.RoundHandler().Index(), sr.ChainID(), - sr.Header.GetPubKeysBitmap(), - sr.Header.GetSignature(), - sr.Header.GetLeaderSignature(), + header.GetPubKeysBitmap(), + header.GetSignature(), + header.GetLeaderSignature(), sr.GetAssociatedPid([]byte(leader)), nil, ) @@ -583,9 +588,9 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { } log.Debug("step 3: block header final info has been sent", - "PubKeysBitmap", sr.Header.GetPubKeysBitmap(), - "AggregateSignature", sr.Header.GetSignature(), - "LeaderSignature", sr.Header.GetLeaderSignature()) + "PubKeysBitmap", header.GetPubKeysBitmap(), + "AggregateSignature", header.GetSignature(), + "LeaderSignature", header.GetLeaderSignature()) } func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) { @@ -630,7 +635,7 @@ func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message sr.mutProcessingEndRound.Lock() defer sr.mutProcessingEndRound.Unlock() - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } if !sr.IsConsensusDataSet() { @@ -654,13 +659,13 @@ func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message sr.SetProcessingBlock(true) - shouldNotCommitBlock := sr.ExtendedCalled || int64(header.GetRound()) < sr.RoundHandler().Index() + shouldNotCommitBlock := sr.GetExtendedCalled() || int64(header.GetRound()) < sr.RoundHandler().Index() if shouldNotCommitBlock { log.Debug("canceled round, extended has been called or round index has been changed", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "header round", header.GetRound(), - "extended called", sr.ExtendedCalled, + "extended called", sr.GetExtendedCalled(), ) return false } @@ -675,7 +680,7 @@ func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message } startTime := time.Now() - err := sr.BlockProcessor().CommitBlock(header, sr.Body) + err := sr.BlockProcessor().CommitBlock(header, sr.GetBody()) elapsedTime := time.Since(startTime) if elapsedTime >= common.CommitMaxTime { log.Warn("doEndRoundJobByParticipant.CommitBlock", "elapsed time", elapsedTime) @@ -717,11 +722,11 @@ func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Me return sr.isConsensusHeaderReceived() } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false, nil } - header := sr.Header.ShallowClone() + header := sr.GetHeader().ShallowClone() err := header.SetPubKeysBitmap(cnsDta.PubKeysBitmap) if err != nil { return false, nil @@ -741,11 +746,11 @@ func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Me } func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandler) { - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false, nil } - consensusHeaderHash, err := core.CalculateHash(sr.Marshalizer(), sr.Hasher(), sr.Header) + consensusHeaderHash, err := core.CalculateHash(sr.Marshalizer(), sr.Hasher(), sr.GetHeader()) if err != nil { log.Debug("isConsensusHeaderReceived: calculate consensus header hash", "error", err.Error()) return false, nil @@ -789,7 +794,7 @@ func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandle } func (sr *subroundEndRound) signBlockHeader() ([]byte, error) { - headerClone := sr.Header.ShallowClone() + headerClone := sr.GetHeader().ShallowClone() err := headerClone.SetLeaderSignature(nil) if err != nil { return nil, err @@ -815,7 +820,7 @@ func (sr *subroundEndRound) updateMetricsForLeader() { } func (sr *subroundEndRound) broadcastBlockDataLeader() error { - miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.Header, sr.Body) + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.GetHeader(), sr.GetBody()) if err != nil { return err } @@ -826,7 +831,7 @@ func (sr *subroundEndRound) broadcastBlockDataLeader() error { return errGetLeader } - return sr.BroadcastMessenger().BroadcastBlockDataLeader(sr.Header, miniBlocks, transactions, []byte(leader)) + return sr.BroadcastMessenger().BroadcastBlockDataLeader(sr.GetHeader(), miniBlocks, transactions, []byte(leader)) } func (sr *subroundEndRound) setHeaderForValidator(header data.HeaderHandler) error { @@ -846,14 +851,14 @@ func (sr *subroundEndRound) prepareBroadcastBlockDataForValidator() error { return err } - go sr.BroadcastMessenger().PrepareBroadcastBlockDataValidator(sr.Header, miniBlocks, transactions, idx, pk) + go sr.BroadcastMessenger().PrepareBroadcastBlockDataValidator(sr.GetHeader(), miniBlocks, transactions, idx, pk) return nil } // doEndRoundConsensusCheck method checks if the consensus is achieved func (sr *subroundEndRound) doEndRoundConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -882,14 +887,14 @@ func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { } func (sr *subroundEndRound) isOutOfTime() bool { - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { log.Debug("canceled round, time is out", "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), "subround", sr.Name()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return true } @@ -910,7 +915,7 @@ func (sr *subroundEndRound) getIndexPkAndDataToBroadcast() (int, []byte, map[uin return -1, nil, nil, nil, err } - miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.Header, sr.Body) + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.GetHeader(), sr.GetBody()) if err != nil { return -1, nil, nil, nil, err } diff --git a/consensus/spos/bls/v1/subroundSignature.go b/consensus/spos/bls/v1/subroundSignature.go index 2cf77192925..1d71ac59420 100644 --- a/consensus/spos/bls/v1/subroundSignature.go +++ b/consensus/spos/bls/v1/subroundSignature.go @@ -62,7 +62,7 @@ func checkNewSubroundSignatureParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -76,7 +76,7 @@ func (sr *subroundSignature) doSignatureJob(_ context.Context) bool { if !sr.CanDoSubroundJob(sr.Current()) { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { log.Error("doSignatureJob", "error", spos.ErrNilHeader) return false } @@ -94,7 +94,7 @@ func (sr *subroundSignature) doSignatureJob(_ context.Context) bool { signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( sr.GetData(), uint16(selfIndex), - sr.Header.GetEpoch(), + sr.GetHeader().GetEpoch(), []byte(sr.SelfPubKey()), ) if err != nil { @@ -238,7 +238,7 @@ func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consen // doSignatureConsensusCheck method checks if the consensus in the subround Signature is achieved func (sr *subroundSignature) doSignatureConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -252,7 +252,7 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { isSelfInConsensusGroup := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || sr.IsMultiKeyInConsensusGroup() threshold := sr.Threshold(sr.Current()) - if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.Header) { + if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.GetHeader()) { threshold = sr.FallbackThreshold(sr.Current()) log.Warn("subroundSignature.doSignatureConsensusCheck: fallback validation has been applied", "minimum number of signatures required", threshold, @@ -263,7 +263,7 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { areSignaturesCollected, numSigs := sr.areSignaturesCollected(threshold) areAllSignaturesCollected := numSigs == sr.ConsensusGroupSize() - isJobDoneByLeader := isSelfLeader && (areAllSignaturesCollected || (areSignaturesCollected && sr.WaitingAllSignaturesTimeOut)) + isJobDoneByLeader := isSelfLeader && (areAllSignaturesCollected || (areSignaturesCollected && sr.GetWaitingAllSignaturesTimeOut())) selfJobDone := true if sr.IsNodeInConsensusGroup(sr.SelfPubKey()) { @@ -334,7 +334,7 @@ func (sr *subroundSignature) waitAllSignatures() { return } - sr.WaitingAllSignaturesTimeOut = true + sr.SetWaitingAllSignaturesTimeOut(true) select { case sr.ConsensusChannel() <- true: @@ -372,7 +372,7 @@ func (sr *subroundSignature) doSignatureJobForManagedKeys() bool { signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( sr.GetData(), uint16(selfIndex), - sr.Header.GetEpoch(), + sr.GetHeader().GetEpoch(), pkBytes, ) if err != nil { diff --git a/consensus/spos/bls/v1/subroundStartRound.go b/consensus/spos/bls/v1/subroundStartRound.go index 81f370d565e..a47d9235cd2 100644 --- a/consensus/spos/bls/v1/subroundStartRound.go +++ b/consensus/spos/bls/v1/subroundStartRound.go @@ -81,7 +81,7 @@ func checkNewSubroundStartRoundParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -106,8 +106,8 @@ func (sr *subroundStartRound) SetOutportHandler(outportHandler outport.OutportHa // doStartRoundJob method does the job of the subround StartRound func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool { sr.ResetConsensusState() - sr.RoundIndex = sr.RoundHandler().Index() - sr.RoundTimeStamp = sr.RoundHandler().TimeStamp() + sr.SetRoundIndex(sr.RoundHandler().Index()) + sr.SetRoundTimeStamp(sr.RoundHandler().TimeStamp()) topic := spos.GetConsensusTopicID(sr.ShardCoordinator()) sr.GetAntiFloodHandler().ResetForTopic(topic) sr.resetConsensusMessages() @@ -116,7 +116,7 @@ func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool { // doStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound func (sr *subroundStartRound) doStartRoundConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -145,7 +145,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { "round index", sr.RoundHandler().Index(), "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -164,7 +164,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { if err != nil { log.Debug("initCurrentRound.GetLeader", "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -209,19 +209,19 @@ func (sr *subroundStartRound) initCurrentRound() bool { if err != nil { log.Debug("initCurrentRound.Reset", "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { log.Debug("canceled round, time is out", "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), "subround", sr.Name()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -298,7 +298,7 @@ func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { BlockWasProposed: false, ShardId: shardId, Epoch: epoch, - Timestamp: uint64(sr.RoundTimeStamp.Unix()), + Timestamp: uint64(sr.GetRoundTimeStamp().Unix()), } roundsInfo := &outportcore.RoundsInfo{ ShardID: shardId, @@ -325,7 +325,7 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error leader, nextConsensusGroup, err := sr.GetNextConsensusGroup( randomSeed, - uint64(sr.RoundIndex), + uint64(sr.GetRoundIndex()), shardId, sr.NodesCoordinator(), currentHeader.GetEpoch(), diff --git a/consensus/spos/bls/v2/subroundSignature.go b/consensus/spos/bls/v2/subroundSignature.go index 0e280aee8dc..77c0b5a05eb 100644 --- a/consensus/spos/bls/v2/subroundSignature.go +++ b/consensus/spos/bls/v2/subroundSignature.go @@ -73,7 +73,7 @@ func checkNewSubroundSignatureParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -87,13 +87,13 @@ func (sr *subroundSignature) doSignatureJob(ctx context.Context) bool { if !sr.CanDoSubroundJob(sr.Current()) { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { log.Error("doSignatureJob", "error", spos.ErrNilHeader) return false } isSelfSingleKeyLeader := sr.IsNodeLeaderInCurrentRound(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() - isFlagActive := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) + isFlagActive := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) isSelfSingleKeyInConsensusGroup := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() if isSelfSingleKeyLeader || isSelfSingleKeyInConsensusGroup { if !sr.doSignatureJobForSingleKey(isSelfSingleKeyLeader, isFlagActive) { @@ -169,7 +169,7 @@ func (sr *subroundSignature) completeSignatureSubRound(pk string, shouldWaitForA // is set on true for the subround Signature func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consensus.Message) bool { // TODO[cleanup cns finality]: remove this method, received signatures will be handled on subroundEndRound - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -239,7 +239,7 @@ func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consen // doSignatureConsensusCheck method checks if the consensus in the subround Signature is achieved func (sr *subroundSignature) doSignatureConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -247,7 +247,7 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { return true } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } @@ -261,14 +261,14 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { } // TODO[cleanup cns finality]: simply return false and remove the rest of the method. This will be handled by subroundEndRound - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return false } isSelfLeader := sr.IsSelfLeader() threshold := sr.Threshold(sr.Current()) - if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.Header) { + if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.GetHeader()) { threshold = sr.FallbackThreshold(sr.Current()) log.Warn("subroundSignature.doSignatureConsensusCheck: fallback validation has been applied", "minimum number of signatures required", threshold, @@ -279,7 +279,7 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { areSignaturesCollected, numSigs := sr.areSignaturesCollected(threshold) areAllSignaturesCollected := numSigs == sr.ConsensusGroupSize() - isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.WaitingAllSignaturesTimeOut) + isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.GetWaitingAllSignaturesTimeOut()) isJobDoneByLeader := isSelfLeader && isSignatureCollectionDone isSelfJobDone := sr.IsSelfJobDone(sr.Current()) @@ -347,7 +347,7 @@ func (sr *subroundSignature) waitAllSignatures() { return } - sr.WaitingAllSignaturesTimeOut = true + sr.SetWaitingAllSignaturesTimeOut(true) select { case sr.ConsensusChannel() <- true: @@ -413,14 +413,14 @@ func (sr *subroundSignature) doSignatureJobForManagedKeys(ctx context.Context) b func (sr *subroundSignature) sendSignatureForManagedKey(idx int, pk string) bool { isCurrentNodeMultiKeyLeader := sr.IsMultiKeyLeaderInCurrentRound() - isFlagActive := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) + isFlagActive := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) pkBytes := []byte(pk) signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( sr.GetData(), uint16(idx), - sr.Header.GetEpoch(), + sr.GetHeader().GetEpoch(), pkBytes, ) if err != nil { @@ -480,7 +480,7 @@ func (sr *subroundSignature) doSignatureJobForSingleKey(isSelfLeader bool, isFla signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( sr.GetData(), uint16(selfIndex), - sr.Header.GetEpoch(), + sr.GetHeader().GetEpoch(), []byte(sr.SelfPubKey()), ) if err != nil { diff --git a/consensus/spos/bls/v2/subroundStartRound.go b/consensus/spos/bls/v2/subroundStartRound.go index 7e7c7b71808..887532c02fa 100644 --- a/consensus/spos/bls/v2/subroundStartRound.go +++ b/consensus/spos/bls/v2/subroundStartRound.go @@ -71,7 +71,7 @@ func checkNewSubroundStartRoundParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -96,8 +96,8 @@ func (sr *subroundStartRound) SetOutportHandler(outportHandler outport.OutportHa // doStartRoundJob method does the job of the subround StartRound func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool { sr.ResetConsensusState() - sr.RoundIndex = sr.RoundHandler().Index() - sr.RoundTimeStamp = sr.RoundHandler().TimeStamp() + sr.SetRoundIndex(sr.RoundHandler().Index()) + sr.SetRoundTimeStamp(sr.RoundHandler().TimeStamp()) topic := spos.GetConsensusTopicID(sr.ShardCoordinator()) sr.GetAntiFloodHandler().ResetForTopic(topic) sr.worker.ResetConsensusMessages() @@ -107,7 +107,7 @@ func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool { // doStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound func (sr *subroundStartRound) doStartRoundConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -136,7 +136,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { "round index", sr.RoundHandler().Index(), "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -155,7 +155,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { if err != nil { log.Debug("initCurrentRound.GetLeader", "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -194,19 +194,19 @@ func (sr *subroundStartRound) initCurrentRound() bool { if err != nil { log.Debug("initCurrentRound.Reset", "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { log.Debug("canceled round, time is out", "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), "subround", sr.Name()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -279,7 +279,7 @@ func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { BlockWasProposed: false, ShardId: shardId, Epoch: epoch, - Timestamp: uint64(sr.RoundTimeStamp.Unix()), + Timestamp: uint64(sr.GetRoundTimeStamp().Unix()), } roundsInfo := &outportcore.RoundsInfo{ ShardID: shardId, @@ -306,7 +306,7 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error leader, nextConsensusGroup, err := sr.GetNextConsensusGroup( randomSeed, - uint64(sr.RoundIndex), + uint64(sr.GetRoundIndex()), shardId, sr.NodesCoordinator(), currentHeader.GetEpoch(), diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 6a913634788..a7a8ee3de65 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -405,11 +405,21 @@ func (cns *ConsensusState) GetRoundIndex() int64 { return cns.RoundIndex } +// SetRoundIndex sets the index of the current round +func (cns *ConsensusState) SetRoundIndex(roundIndex int64) { + cns.RoundIndex = roundIndex +} + // GetRoundTimeStamp returns the time stamp of the current round func (cns *ConsensusState) GetRoundTimeStamp() time.Time { return cns.RoundTimeStamp } +// SetRoundTimeStamp sets the time stamp of the current round +func (cns *ConsensusState) SetRoundTimeStamp(roundTimeStamp time.Time) { + cns.RoundTimeStamp = roundTimeStamp +} + // GetExtendedCalled returns the state of the extended called func (cns *ConsensusState) GetExtendedCalled() bool { return cns.ExtendedCalled diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 9d2fb77a380..e294ca96212 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -210,7 +210,9 @@ type ConsensusStateHandler interface { GetRoundCanceled() bool SetRoundCanceled(state bool) GetRoundIndex() int64 + SetRoundIndex(roundIndex int64) GetRoundTimeStamp() time.Time + SetRoundTimeStamp(roundTimeStamp time.Time) GetExtendedCalled() bool GetBody() data.BodyHandler SetBody(body data.BodyHandler) From 1b856218d492f6b70935351b41beb03943d88635 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 16:16:36 +0300 Subject: [PATCH 21/30] fixes unit tests --- consensus/spos/bls/v1/subroundBlock_test.go | 56 +++++++-------- .../spos/bls/v1/subroundEndRound_test.go | 72 +++++++++---------- consensus/spos/bls/v2/benchmark_test.go | 2 +- consensus/spos/bls/v2/export_test.go | 2 +- consensus/spos/bls/v2/subroundBlock_test.go | 2 +- 5 files changed, 67 insertions(+), 67 deletions(-) diff --git a/consensus/spos/bls/v1/subroundBlock_test.go b/consensus/spos/bls/v1/subroundBlock_test.go index 16dbc95aebb..e0d4690021d 100644 --- a/consensus/spos/bls/v1/subroundBlock_test.go +++ b/consensus/spos/bls/v1/subroundBlock_test.go @@ -199,7 +199,7 @@ func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srBlock, err := defaultSubroundBlockFromSubround(sr) assert.Nil(t, srBlock) @@ -348,7 +348,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { }) r = sr.DoBlockJob() assert.True(t, r) - assert.Equal(t, uint64(1), sr.Header.GetNonce()) + assert.Equal(t, uint64(1), sr.GetHeader().GetNonce()) } func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { @@ -362,7 +362,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) - sr.Data = []byte("some data") + sr.SetData([]byte("some data")) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -378,7 +378,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t * cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[1]), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -394,7 +394,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) _ = sr.SetJobDone(sr.Leader(), bls.SrBlock, true) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) @@ -419,7 +419,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -436,8 +436,8 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing. cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) - sr.Data = nil - sr.Body = &block.Body{} + sr.SetData(nil) + sr.SetBody(&block.Body{}) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -454,8 +454,8 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testin cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) - sr.Data = nil - sr.Header = &block.Header{Nonce: 1} + sr.SetData(nil) + sr.SetHeader(&block.Header{Nonce: 1}) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -472,7 +472,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { leader, err := sr.GetLeader() require.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.True(t, r) }) @@ -484,7 +484,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { leader, err := sr.GetLeader() require.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) }) @@ -541,11 +541,11 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { currentPid, nil, ) - sr.Body = &block.Body{} + sr.SetBody(&block.Body{}) r := sr.ReceivedBlockBody(cnsMsg) assert.False(t, r) - sr.Body = nil + sr.SetBody(nil) cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) r = sr.ReceivedBlockBody(cnsMsg) assert.False(t, r) @@ -582,12 +582,12 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { r = sr.ReceivedBlockHeader(cnsMsg) assert.False(t, r) - sr.Data = nil - sr.Header = hdr + sr.SetData(nil) + sr.SetHeader(hdr) r = sr.ReceivedBlockHeader(cnsMsg) assert.False(t, r) - sr.Header = nil + sr.SetHeader(nil) cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) r = sr.ReceivedBlockHeader(cnsMsg) assert.False(t, r) @@ -599,8 +599,8 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { sr.SetStatus(bls.SrBlock, spos.SsNotFinished) container.SetBlockProcessor(blockProcessorMock) - sr.Data = nil - sr.Header = nil + sr.SetData(nil) + sr.SetHeader(nil) hdr = createDefaultHeader() hdr.Nonce = 1 hdrStr, _ = marshallerMock.MarshalizerMock{}.Marshal(hdr) @@ -665,8 +665,8 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) } @@ -694,8 +694,8 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockRetu currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) blockProcessorMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) blockProcessorMock.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errors.New("error") @@ -732,8 +732,8 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) assert.True(t, sr.ProcessReceivedBlock(cnsMsg)) } } @@ -776,7 +776,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled t.Parallel() container := consensusMock.InitConsensusCore() sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) assert.False(t, sr.DoBlockConsensusCheck()) } @@ -1107,8 +1107,8 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) minimumExpectedValue := uint64(delay * 100 / srDuration) _ = sr.ProcessReceivedBlock(cnsMsg) diff --git a/consensus/spos/bls/v1/subroundEndRound_test.go b/consensus/spos/bls/v1/subroundEndRound_test.go index d1d2e920fdc..c3388302557 100644 --- a/consensus/spos/bls/v1/subroundEndRound_test.go +++ b/consensus/spos/bls/v1/subroundEndRound_test.go @@ -248,7 +248,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test &statusHandler.AppStatusHandlerStub{}, ) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srEndRound, err := v1.NewSubroundEndRound( sr, extend, @@ -418,7 +418,7 @@ func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) sr.SetSelfPubKey("A") sr.SetLeader("A") @@ -445,7 +445,7 @@ func TestSubroundEndRound_DoEndRoundJobErrCommitBlockShouldFail(t *testing.T) { } container.SetBlockProcessor(blProcMock) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.False(t, r) @@ -467,7 +467,7 @@ func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { } container.SetRoundHandler(roundHandlerMock) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -492,7 +492,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastBlockOK(t *testing.T) { sr.SetSelfPubKey("A") sr.SetLeader("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -527,7 +527,7 @@ func TestSubroundEndRound_DoEndRoundJobErrMarshalizedDataToBroadcastOK(t *testin sr.SetSelfPubKey("A") sr.SetLeader("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -563,7 +563,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastMiniBlocksOK(t *testing.T) { sr.SetSelfPubKey("A") sr.SetLeader("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -600,7 +600,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastTransactionsOK(t *testing.T) sr.SetSelfPubKey("A") sr.SetLeader("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -622,7 +622,7 @@ func TestSubroundEndRound_DoEndRoundJobAllOK(t *testing.T) { sr.SetSelfPubKey("A") sr.SetLeader("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -651,18 +651,18 @@ func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { sr.SetSelfPubKey("A") sr.SetLeader("A") - sr.Header = &block.Header{Nonce: 5} + sr.SetHeader(&block.Header{Nonce: 5}) r := sr.DoEndRoundJob() assert.True(t, r) - assert.Equal(t, expectedSignature, sr.Header.GetLeaderSignature()) + assert.Equal(t, expectedSignature, sr.GetHeader().GetLeaderSignature()) } func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) ok := sr.DoEndRoundConsensusCheck() assert.False(t, ok) @@ -711,7 +711,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_RoundCanceledShouldReturnFa t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) cnsData := consensus.Message{} res := sr.DoEndRoundJobByParticipant(&cnsData) @@ -722,7 +722,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusDataNotSetShouldRe t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Data = nil + sr.SetData(nil) cnsData := consensus.Message{} res := sr.DoEndRoundJobByParticipant(&cnsData) @@ -776,7 +776,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ShouldReturnTrue(t *testing hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) // set previous as finished @@ -795,7 +795,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceived_NoReceivedHeadersShouldRetur hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) res, retHdr := sr.IsConsensusHeaderReceived() assert.False(t, res) @@ -809,7 +809,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceived_HeaderNotReceivedShouldRetur hdrToSearchFor := &block.Header{Nonce: 38} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.AddReceivedHeader(hdr) - sr.Header = hdrToSearchFor + sr.SetHeader(hdrToSearchFor) res, retHdr := sr.IsConsensusHeaderReceived() assert.False(t, res) @@ -821,7 +821,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceivedShouldReturnTrue(t *testing.T hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) res, retHdr := sr.IsConsensusHeaderReceived() @@ -856,7 +856,7 @@ func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoShouldWork(t *testing.T LeaderSignature: originalLeaderSig, } sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = &hdr + sr.SetHeader(&hdr) cnsData := consensus.Message{ PubKeysBitmap: newPubKeyBitMap, @@ -886,7 +886,7 @@ func TestSubroundEndRound_CreateAndBroadcastHeaderFinalInfoBroadcastShouldBeCall } container.SetBroadcastMessenger(messenger) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{LeaderSignature: leaderSigInHdr} + sr.SetHeader(&block.Header{LeaderSignature: leaderSigInHdr}) sr.CreateAndBroadcastHeaderFinalInfo() @@ -902,7 +902,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) sr.SetStatus(2, spos.SsFinished) @@ -938,7 +938,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinal BlockHeaderHash: []byte("X"), PubKey: []byte("A"), } - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) assert.False(t, res) } @@ -967,7 +967,7 @@ func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { container.SetRoundHandler(&roundHandler) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.RoundTimeStamp = time.Now().AddDate(0, 0, -1) + sr.SetRoundTimeStamp(time.Now().AddDate(0, 0, -1)) res := sr.IsOutOfTime() assert.True(t, res) @@ -990,7 +990,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.False(t, isValid) } @@ -1012,7 +1012,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.False(t, isValid) } @@ -1034,7 +1034,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnTrue(t *testing container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.True(t, isValid) } @@ -1057,7 +1057,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _, err := sr.VerifyNodesOnAggSigFail() @@ -1080,7 +1080,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { }, } - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) container.SetSigningHandler(signingHandler) @@ -1110,7 +1110,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) @@ -1128,7 +1128,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) sr.SetThreshold(bls.SrEndRound, 2) _, _, err := sr.ComputeAggSigOnValidNodes() @@ -1149,7 +1149,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _, _, err := sr.ComputeAggSigOnValidNodes() @@ -1169,7 +1169,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { }, } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _, _, err := sr.ComputeAggSigOnValidNodes() @@ -1181,7 +1181,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) bitmap, sig, err := sr.ComputeAggSigOnValidNodes() @@ -1232,7 +1232,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJobByLeader() require.False(t, r) @@ -1280,7 +1280,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[2], bls.SrSignature, true) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJobByLeader() require.True(t, r) @@ -1299,7 +1299,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.ConsensusState.Data = nil + sr.ConsensusStateHandler.SetData(nil) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), diff --git a/consensus/spos/bls/v2/benchmark_test.go b/consensus/spos/bls/v2/benchmark_test.go index 5b0492be6b5..37d217e0aa8 100644 --- a/consensus/spos/bls/v2/benchmark_test.go +++ b/consensus/spos/bls/v2/benchmark_test.go @@ -122,7 +122,7 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number &nodeMock.ThrottlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) sr.SetSelfPubKey("OTHER") b.ResetTimer() diff --git a/consensus/spos/bls/v2/export_test.go b/consensus/spos/bls/v2/export_test.go index 72bdfb1790d..696fec6a98c 100644 --- a/consensus/spos/bls/v2/export_test.go +++ b/consensus/spos/bls/v2/export_test.go @@ -47,7 +47,7 @@ func (fct *factory) ChronologyHandler() consensus.ChronologyHandler { } // ConsensusState gets the consensus state struct pointer -func (fct *factory) ConsensusState() *spos.ConsensusState { +func (fct *factory) ConsensusState() spos.ConsensusStateHandler { return fct.consensusState } diff --git a/consensus/spos/bls/v2/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go index d75b526b477..d68b8fb012f 100644 --- a/consensus/spos/bls/v2/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -204,7 +204,7 @@ func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srBlock, err := defaultSubroundBlockFromSubround(sr) assert.Nil(t, srBlock) From 30df93e6c61fc083b21de7c71c6a16b90c521742 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 16:35:44 +0300 Subject: [PATCH 22/30] fixes unit tests - part 2 --- .../spos/bls/v1/subroundSignature_test.go | 56 ++++----- consensus/spos/bls/v2/subroundBlock_test.go | 68 +++++----- .../spos/bls/v2/subroundEndRound_test.go | 118 +++++++++--------- 3 files changed, 121 insertions(+), 121 deletions(-) diff --git a/consensus/spos/bls/v1/subroundSignature_test.go b/consensus/spos/bls/v1/subroundSignature_test.go index d9eb9260f46..73d765cb67b 100644 --- a/consensus/spos/bls/v1/subroundSignature_test.go +++ b/consensus/spos/bls/v1/subroundSignature_test.go @@ -154,7 +154,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te &statusHandler.AppStatusHandlerStub{}, ) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srSignature, err := v1.NewSubroundSignature( sr, extend, @@ -343,12 +343,12 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundSignatureWithContainer(container) - sr.Header = &block.Header{} - sr.Data = nil + sr.SetHeader(&block.Header{}) + sr.SetData(nil) r := sr.DoSignatureJob() assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) err := errors.New("create signature share error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -372,14 +372,14 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { assert.True(t, r) _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) - sr.RoundCanceled = false + sr.SetRoundCanceled(false) leader, err := sr.GetLeader() assert.Nil(t, err) sr.SetSelfPubKey(leader) r = sr.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) } func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { @@ -423,12 +423,12 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { }, ) - srSignature.Header = &block.Header{} - srSignature.Data = nil + srSignature.SetHeader(&block.Header{}) + srSignature.SetData(nil) r := srSignature.DoSignatureJob() assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) err := errors.New("create signature share error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -452,7 +452,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { assert.True(t, r) _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) - sr.RoundCanceled = false + sr.SetRoundCanceled(false) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -460,7 +460,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { sr.SetSelfPubKey(leader) r = srSignature.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) expectedMap := map[string]struct{}{ "A": {}, "B": {}, @@ -481,7 +481,7 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { sr := initSubroundSignature() signature := []byte("signature") cnsMsg := consensus.NewConsensusMessage( - sr.Data, + sr.GetData(), signature, nil, nil, @@ -497,16 +497,16 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { nil, ) - sr.Header = &block.Header{} - sr.Data = nil + sr.SetHeader(&block.Header{}) + sr.SetData(nil) r := sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("Y") + sr.SetData([]byte("Y")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) leader, err := sr.GetLeader() @@ -552,11 +552,11 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { container := consensusMocks.InitConsensusCore() container.SetSigningHandler(signingHandler) sr := initSubroundSignatureWithContainer(container) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signature := []byte("signature") cnsMsg := consensus.NewConsensusMessage( - sr.Data, + sr.GetData(), signature, nil, nil, @@ -572,15 +572,15 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { nil, ) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("Y") + sr.SetData([]byte("Y")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) @@ -642,7 +642,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIs t.Parallel() sr := initSubroundSignature() - sr.RoundCanceled = true + sr.SetRoundCanceled(true) assert.False(t, sr.DoSignatureConsensusCheck()) } @@ -678,7 +678,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenNotAllS container := consensusMocks.InitConsensusCore() sr := initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = false + sr.SetWaitingAllSignaturesTimeOut(false) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -696,7 +696,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenAllSigna container := consensusMocks.InitConsensusCore() sr := initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = false + sr.SetWaitingAllSignaturesTimeOut(false) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -714,7 +714,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenEnoughBu container := consensusMocks.InitConsensusCore() sr := initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = true + sr.SetWaitingAllSignaturesTimeOut(true) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -737,7 +737,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbac }, }) sr := initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = false + sr.SetWaitingAllSignaturesTimeOut(false) sr.SetSelfPubKey(sr.ConsensusGroup()[0]) @@ -758,7 +758,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback }, }) sr := initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = true + sr.SetWaitingAllSignaturesTimeOut(true) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -779,7 +779,7 @@ func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqu leader, err := sr.GetLeader() assert.Nil(t, err) cnsMsg := consensus.NewConsensusMessage( - append(sr.Data, []byte("X")...), + append(sr.GetData(), []byte("X")...), []byte("signature"), nil, nil, diff --git a/consensus/spos/bls/v2/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go index d68b8fb012f..94b1bd1060c 100644 --- a/consensus/spos/bls/v2/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -549,9 +549,9 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { r := sr.DoBlockJob() assert.True(t, r) - assert.Equal(t, uint64(1), sr.Header.GetNonce()) + assert.Equal(t, uint64(1), sr.GetHeader().GetNonce()) - proof := sr.Header.GetPreviousProof() + proof := sr.GetHeader().GetPreviousProof() assert.Equal(t, providedSignature, proof.GetAggregatedSignature()) assert.Equal(t, providedBitmap, proof.GetPubKeysBitmap()) }) @@ -582,7 +582,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { }) r := sr.DoBlockJob() assert.True(t, r) - assert.Equal(t, uint64(1), sr.Header.GetNonce()) + assert.Equal(t, uint64(1), sr.GetHeader().GetNonce()) }) } @@ -600,7 +600,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = []byte("some data") + sr.SetData([]byte("some data")) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -616,7 +616,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t * cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[1]), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -634,7 +634,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) _ = sr.SetJobDone(leader, bls.SrBlock, true) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) @@ -661,7 +661,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -680,8 +680,8 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing. assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil - sr.Body = &block.Body{} + sr.SetData(nil) + sr.SetBody(&block.Body{}) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -700,8 +700,8 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testin assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil - sr.Header = &block.Header{Nonce: 1} + sr.SetData(nil) + sr.SetHeader(&block.Header{Nonce: 1}) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -719,7 +719,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { leader, err := sr.GetLeader() assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.True(t, r) }) @@ -733,7 +733,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { } blkBody := &block.Body{} cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) }) @@ -776,7 +776,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) cnsMsg.SignatureShare = []byte("signature") - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.True(t, r) }) @@ -833,11 +833,11 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { currentPid, nil, ) - sr.Body = &block.Body{} + sr.SetBody(&block.Body{}) r := sr.ReceivedBlockBody(cnsMsg) assert.False(t, r) - sr.Body = nil + sr.SetBody(nil) cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) r = sr.ReceivedBlockBody(cnsMsg) assert.False(t, r) @@ -875,12 +875,12 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { r = sr.ReceivedBlockHeaderBeforeEquivalentProofs(cnsMsg) assert.False(t, r) - sr.Data = nil - sr.Header = hdr + sr.SetData(nil) + sr.SetHeader(hdr) r = sr.ReceivedBlockHeaderBeforeEquivalentProofs(cnsMsg) assert.False(t, r) - sr.Header = nil + sr.SetHeader(nil) cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) r = sr.ReceivedBlockHeaderBeforeEquivalentProofs(cnsMsg) assert.False(t, r) @@ -892,8 +892,8 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { sr.SetStatus(bls.SrBlock, spos.SsNotFinished) container.SetBlockProcessor(blockProcessorMock) - sr.Data = nil - sr.Header = nil + sr.SetData(nil) + sr.SetHeader(nil) hdr = createDefaultHeader() hdr.Nonce = 1 hdrStr, _ = mock.MarshalizerMock{}.Marshal(hdr) @@ -958,8 +958,8 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) } @@ -987,8 +987,8 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockRetu currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) blockProcessorMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) blockProcessorMock.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return expectedErr @@ -1025,8 +1025,8 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) assert.True(t, sr.ProcessReceivedBlock(cnsMsg)) } } @@ -1069,7 +1069,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled t.Parallel() container := consensusMocks.InitConsensusCore() sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) assert.False(t, sr.DoBlockConsensusCheck()) } @@ -1400,8 +1400,8 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) minimumExpectedValue := uint64(delay * 100 / srDuration) _ = sr.ProcessReceivedBlock(cnsMsg) @@ -1465,14 +1465,14 @@ func TestSubroundBlock_ReceivedBlockHeader(t *testing.T) { sr.SetLeader(defaultLeader) // consensus data already set - sr.Data = []byte("some data") + sr.SetData([]byte("some data")) sr.ReceivedBlockHeader(&testscommon.HeaderHandlerStub{}) - sr.Data = nil + sr.SetData(nil) // header already received - sr.Header = &testscommon.HeaderHandlerStub{} + sr.SetHeader(&testscommon.HeaderHandlerStub{}) sr.ReceivedBlockHeader(&testscommon.HeaderHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) // self job already done _ = sr.SetJobDone(sr.SelfPubKey(), sr.Current(), true) diff --git a/consensus/spos/bls/v2/subroundEndRound_test.go b/consensus/spos/bls/v2/subroundEndRound_test.go index 705f830ee22..f43d0e6024a 100644 --- a/consensus/spos/bls/v2/subroundEndRound_test.go +++ b/consensus/spos/bls/v2/subroundEndRound_test.go @@ -57,9 +57,9 @@ func initSubroundEndRoundWithContainer( currentPid, appStatusHandler, ) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) srEndRound, _ := v2.NewSubroundEndRound( sr, @@ -95,9 +95,9 @@ func initSubroundEndRoundWithContainerAndConsensusState( currentPid, appStatusHandler, ) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) srEndRound, _ := v2.NewSubroundEndRound( sr, @@ -114,9 +114,9 @@ func initSubroundEndRoundWithContainerAndConsensusState( func initSubroundEndRound(appStatusHandler core.AppStatusHandler) v2.SubroundEndRound { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, appStatusHandler) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) return sr } @@ -299,7 +299,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test &statusHandler.AppStatusHandlerStub{}, ) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srEndRound, err := v2.NewSubroundEndRound( sr, v2.ProcessingThresholdPercent, @@ -498,7 +498,7 @@ func TestSubroundEndRound_DoEndRoundJobNilHeaderShouldFail(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) r := sr.DoEndRoundJob() assert.False(t, r) @@ -516,7 +516,7 @@ func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) sr.SetSelfPubKey("A") @@ -541,7 +541,7 @@ func TestSubroundEndRound_DoEndRoundJobErrCommitBlockShouldFail(t *testing.T) { } container.SetBlockProcessor(blProcMock) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.False(t, r) @@ -562,7 +562,7 @@ func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { } container.SetRoundHandler(roundHandlerMock) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -586,7 +586,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastBlockOK(t *testing.T) { sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -620,7 +620,7 @@ func TestSubroundEndRound_DoEndRoundJobErrMarshalizedDataToBroadcastOK(t *testin sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -655,7 +655,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastMiniBlocksOK(t *testing.T) { sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -691,7 +691,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastTransactionsOK(t *testing.T) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -712,7 +712,7 @@ func TestSubroundEndRound_DoEndRoundJobAllOK(t *testing.T) { sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -740,18 +740,18 @@ func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{Nonce: 5} + sr.SetHeader(&block.Header{Nonce: 5}) r := sr.DoEndRoundJob() assert.True(t, r) - assert.Equal(t, expectedSignature, sr.Header.GetLeaderSignature()) + assert.Equal(t, expectedSignature, sr.GetHeader().GetLeaderSignature()) } func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) ok := sr.DoEndRoundConsensusCheck() assert.False(t, ok) @@ -800,7 +800,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_RoundCanceledShouldReturnFa t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) cnsData := consensus.Message{} res := sr.DoEndRoundJobByParticipant(&cnsData) @@ -811,7 +811,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusDataNotSetShouldRe t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Data = nil + sr.SetData(nil) cnsData := consensus.Message{} res := sr.DoEndRoundJobByParticipant(&cnsData) @@ -848,7 +848,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusHeaderNotReceivedS t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) // set previous as finished sr.SetStatus(2, spos.SsFinished) @@ -866,7 +866,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ShouldReturnTrue(t *testing hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) // set previous as finished @@ -885,7 +885,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceived_NoReceivedHeadersShouldRetur hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) res, retHdr := sr.IsConsensusHeaderReceived() assert.False(t, res) @@ -899,7 +899,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceived_HeaderNotReceivedShouldRetur hdrToSearchFor := &block.Header{Nonce: 38} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.AddReceivedHeader(hdr) - sr.Header = hdrToSearchFor + sr.SetHeader(hdrToSearchFor) res, retHdr := sr.IsConsensusHeaderReceived() assert.False(t, res) @@ -911,7 +911,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceivedShouldReturnTrue(t *testing.T hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) res, retHdr := sr.IsConsensusHeaderReceived() @@ -923,7 +923,7 @@ func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoNilHdrShouldNotWork(t * t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) cnsData := consensus.Message{} @@ -947,7 +947,7 @@ func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoShouldWork(t *testing.T LeaderSignature: originalLeaderSig, } sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = &hdr + sr.SetHeader(&hdr) cnsData := consensus.Message{ PubKeysBitmap: newPubKeyBitMap, @@ -977,7 +977,7 @@ func TestSubroundEndRound_CreateAndBroadcastHeaderFinalInfoBroadcastShouldBeCall } container.SetBroadcastMessenger(messenger) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{LeaderSignature: leaderSigInHdr} + sr.SetHeader(&block.Header{LeaderSignature: leaderSigInHdr}) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -999,7 +999,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) sr.SetStatus(2, spos.SsFinished) @@ -1083,7 +1083,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { &dataRetrieverMocks.ThrottlerStub{}, ) - srEndRound.Header = hdr + srEndRound.SetHeader(hdr) srEndRound.AddReceivedHeader(hdr) srEndRound.SetStatus(2, spos.SsFinished) @@ -1096,7 +1096,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) cnsData := consensus.Message{ // apply the data which is mocked in consensus state so the checks will pass @@ -1127,7 +1127,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { BlockHeaderHash: []byte("X"), PubKey: []byte("A"), } - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) assert.False(t, res) }) @@ -1136,7 +1136,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Data = nil + sr.SetData(nil) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), @@ -1174,7 +1174,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Data = []byte("Y") + sr.SetData([]byte("Y")) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), @@ -1216,9 +1216,9 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) srEndRound, _ := v2.NewSubroundEndRound( sr, @@ -1264,7 +1264,7 @@ func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { container.SetRoundHandler(&roundHandler) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.RoundTimeStamp = time.Now().AddDate(0, 0, -1) + sr.SetRoundTimeStamp(time.Now().AddDate(0, 0, -1)) res := sr.IsOutOfTime() assert.True(t, res) @@ -1287,7 +1287,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.False(t, isValid) } @@ -1309,7 +1309,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.False(t, isValid) } @@ -1331,7 +1331,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnTrue(t *testing container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.True(t, isValid) } @@ -1353,7 +1353,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) leader, err := sr.GetLeader() require.Nil(t, err) _ = sr.SetJobDone(leader, bls.SrSignature, true) @@ -1377,7 +1377,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { }, } - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) leader, err := sr.GetLeader() require.Nil(t, err) _ = sr.SetJobDone(leader, bls.SrSignature, true) @@ -1411,7 +1411,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[2], bls.SrSignature, true) @@ -1454,7 +1454,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) invalidSigners, err := sr.VerifyNodesOnAggSigFail(context.TODO()) @@ -1471,7 +1471,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) sr.SetThreshold(bls.SrEndRound, 2) _, _, err := sr.ComputeAggSigOnValidNodes() @@ -1491,7 +1491,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) for _, participant := range sr.ConsensusGroup() { _ = sr.SetJobDone(participant, bls.SrSignature, true) } @@ -1512,7 +1512,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { }, } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) for _, participant := range sr.ConsensusGroup() { _ = sr.SetJobDone(participant, bls.SrSignature, true) } @@ -1526,7 +1526,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) for _, participant := range sr.ConsensusGroup() { _ = sr.SetJobDone(participant, bls.SrSignature, true) } @@ -1577,9 +1577,9 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) srEndRound, _ := v2.NewSubroundEndRound( sr, @@ -1642,7 +1642,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJobByLeader() require.False(t, r) @@ -1695,7 +1695,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { _ = sr.SetJobDone(participant, bls.SrSignature, true) } - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJobByLeader() require.True(t, r) @@ -1764,13 +1764,13 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { _ = srEndRound.SetJobDone(participant, bls.SrSignature, true) } - srEndRound.Header = &block.HeaderV2{ + srEndRound.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), ScheduledRootHash: []byte("sch root hash"), ScheduledAccumulatedFees: big.NewInt(0), ScheduledDeveloperFees: big.NewInt(0), PreviousHeaderProof: nil, - } + }) r := srEndRound.DoEndRoundJobByLeader() require.True(t, r) @@ -1787,7 +1787,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.ConsensusState.Data = nil + sr.ConsensusStateHandler.SetData(nil) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), @@ -1803,7 +1803,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), @@ -1967,9 +1967,9 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), From 07c6afe2ca9e54e0a00994a9063d2caee64963b8 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 17:16:08 +0300 Subject: [PATCH 23/30] fixes unit tests - part 3 --- .../spos/bls/v1/subroundStartRound_test.go | 4 +- .../spos/bls/v2/subroundSignature_test.go | 84 +++++++++---------- .../spos/bls/v2/subroundStartRound_test.go | 4 +- 3 files changed, 46 insertions(+), 46 deletions(-) diff --git a/consensus/spos/bls/v1/subroundStartRound_test.go b/consensus/spos/bls/v1/subroundStartRound_test.go index d343cf75266..5ab4523bf94 100644 --- a/consensus/spos/bls/v1/subroundStartRound_test.go +++ b/consensus/spos/bls/v1/subroundStartRound_test.go @@ -234,7 +234,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * sr, _ := defaultSubround(consensusState, ch, container) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srStartRound, err := defaultSubroundStartRoundFromSubround(sr) assert.Nil(t, srStartRound) @@ -342,7 +342,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRound sr := initSubroundStartRound() - sr.RoundCanceled = true + sr.SetRoundCanceled(true) ok := sr.DoStartRoundConsensusCheck() assert.False(t, ok) diff --git a/consensus/spos/bls/v2/subroundSignature_test.go b/consensus/spos/bls/v2/subroundSignature_test.go index 36811e4c62b..bedacbcf163 100644 --- a/consensus/spos/bls/v2/subroundSignature_test.go +++ b/consensus/spos/bls/v2/subroundSignature_test.go @@ -184,7 +184,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te &statusHandler.AppStatusHandlerStub{}, ) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, @@ -417,18 +417,18 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundSignatureWithContainer(container) - sr.Header = &block.Header{} - sr.Data = nil + sr.SetHeader(&block.Header{}) + sr.SetData(nil) r := sr.DoSignatureJob() assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) - sr.Header = nil + sr.SetHeader(nil) r = sr.DoSignatureJob() assert.False(t, r) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) err := errors.New("create signature share error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -470,14 +470,14 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { }, }) _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) - sr.RoundCanceled = false + sr.SetRoundCanceled(false) leader, err := sr.GetLeader() assert.Nil(t, err) sr.SetSelfPubKey(leader) r = sr.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) }) t.Run("with equivalent messages flag active should work", func(t *testing.T) { t.Parallel() @@ -491,7 +491,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { container.SetEnableEpochsHandler(enableEpochsHandler) sr := initSubroundSignatureWithContainer(container) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) leader, err := sr.GetLeader() assert.Nil(t, err) sr.SetSelfPubKey(leader) @@ -504,7 +504,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { r := sr.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) assert.Nil(t, err) leaderJobDone, err := sr.JobDone(leader, bls.SrSignature) assert.NoError(t, err) @@ -561,12 +561,12 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { &dataRetrieverMock.ThrottlerStub{}, ) - srSignature.Header = &block.Header{} - srSignature.Data = nil + srSignature.SetHeader(&block.Header{}) + srSignature.SetData(nil) r := srSignature.DoSignatureJob() assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) err := errors.New("create signature share error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -590,13 +590,13 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { assert.True(t, r) _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) - sr.RoundCanceled = false + sr.SetRoundCanceled(false) leader, err := sr.GetLeader() assert.Nil(t, err) sr.SetSelfPubKey(leader) r = srSignature.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) expectedMap := map[string]struct{}{ "A": {}, "B": {}, @@ -668,7 +668,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { &dataRetrieverMock.ThrottlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signaturesBroadcast := make(map[string]int) container.SetBroadcastMessenger(&consensusMocks.BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { @@ -684,7 +684,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { r := srSignature.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) assert.True(t, sr.IsSubroundFinished(bls.SrSignature)) for _, pk := range sr.ConsensusGroup() { @@ -759,7 +759,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signatureSentForPks := make(map[string]struct{}) srSignature, _ := v2.NewSubroundSignature( @@ -826,7 +826,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signatureSentForPks := make(map[string]struct{}) srSignature, _ := v2.NewSubroundSignature( @@ -893,7 +893,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signatureSentForPks := make(map[string]struct{}) varCalled := false @@ -977,7 +977,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { &dataRetrieverMock.ThrottlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signaturesBroadcast := make(map[string]int) container.SetBroadcastMessenger(&consensusMocks.BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { @@ -1073,7 +1073,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { }, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) ctx, cancel := context.WithCancel(context.TODO()) cancel() r := srSignature.DoSignatureJobForManagedKeys(ctx) @@ -1090,7 +1090,7 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { sr := initSubroundSignatureWithContainer(container) signature := []byte("signature") cnsMsg := consensus.NewConsensusMessage( - sr.Data, + sr.GetData(), signature, nil, nil, @@ -1106,16 +1106,16 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { nil, ) - sr.Header = &block.Header{} - sr.Data = nil + sr.SetHeader(&block.Header{}) + sr.SetData(nil) r := sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("Y") + sr.SetData([]byte("Y")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) @@ -1170,11 +1170,11 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { container := consensusMocks.InitConsensusCore() container.SetSigningHandler(signingHandler) sr := initSubroundSignatureWithContainer(container) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signature := []byte("signature") cnsMsg := consensus.NewConsensusMessage( - sr.Data, + sr.GetData(), signature, nil, nil, @@ -1190,15 +1190,15 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { nil, ) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("Y") + sr.SetData([]byte("Y")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) @@ -1260,7 +1260,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIs t.Parallel() sr := initSubroundSignature() - sr.RoundCanceled = true + sr.SetRoundCanceled(true) assert.False(t, sr.DoSignatureConsensusCheck()) } @@ -1281,7 +1281,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignatur _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } - sr.Header = &block.HeaderV2{} + sr.SetHeader(&block.HeaderV2{}) assert.True(t, sr.DoSignatureConsensusCheck()) } @@ -1289,7 +1289,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenSignatu t.Parallel() sr := initSubroundSignature() - sr.Header = &block.HeaderV2{Header: createDefaultHeader()} + sr.SetHeader(&block.HeaderV2{Header: createDefaultHeader()}) assert.False(t, sr.DoSignatureConsensusCheck()) } @@ -1364,7 +1364,7 @@ func testSubroundSignatureDoSignatureConsensusCheck(args argTestSubroundSignatur }, }) sr := initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = args.waitingAllSignaturesTimeOut + sr.SetWaitingAllSignaturesTimeOut(args.waitingAllSignaturesTimeOut) if !args.flagActive { leader, err := sr.GetLeader() @@ -1380,7 +1380,7 @@ func testSubroundSignatureDoSignatureConsensusCheck(args argTestSubroundSignatur _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } - sr.Header = &block.HeaderV2{} + sr.SetHeader(&block.HeaderV2{}) assert.Equal(t, args.expectedResult, sr.DoSignatureConsensusCheck()) } } @@ -1395,7 +1395,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbac }, }) sr := initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = false + sr.SetWaitingAllSignaturesTimeOut(false) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -1418,7 +1418,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback }, }) sr := initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = true + sr.SetWaitingAllSignaturesTimeOut(true) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -1428,7 +1428,7 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } - sr.Header = &block.HeaderV2{} + sr.SetHeader(&block.HeaderV2{}) assert.True(t, sr.DoSignatureConsensusCheck()) } @@ -1441,7 +1441,7 @@ func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqu require.Nil(t, err) cnsMsg := consensus.NewConsensusMessage( - append(sr.Data, []byte("X")...), + append(sr.GetData(), []byte("X")...), []byte("signature"), nil, nil, @@ -1457,6 +1457,6 @@ func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqu nil, ) - sr.Header = &block.HeaderV2{} + sr.SetHeader(&block.HeaderV2{}) assert.False(t, sr.ReceivedSignature(cnsMsg)) } diff --git a/consensus/spos/bls/v2/subroundStartRound_test.go b/consensus/spos/bls/v2/subroundStartRound_test.go index 6fd4ff7488f..28f063277c0 100644 --- a/consensus/spos/bls/v2/subroundStartRound_test.go +++ b/consensus/spos/bls/v2/subroundStartRound_test.go @@ -198,7 +198,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * sr, _ := defaultSubround(consensusState, ch, container) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srStartRound, err := defaultSubroundStartRoundFromSubround(sr) assert.Nil(t, srStartRound) @@ -306,7 +306,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRound sr := initSubroundStartRound() - sr.RoundCanceled = true + sr.SetRoundCanceled(true) ok := sr.DoStartRoundConsensusCheck() assert.False(t, ok) From c7b4ef7bca574972964d42e40b73045cd1bfe470 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 18:06:03 +0300 Subject: [PATCH 24/30] add constructor tests --- consensus/spos/bls/{ => proxy}/errors.go | 2 +- consensus/spos/bls/proxy/subroundsHandler.go | 23 +- .../spos/bls/proxy/subroundsHandler_test.go | 148 ++++++ consensus/spos/bls/v2/benchmark_test.go | 2 +- .../common}/throttlerStub.go | 2 +- testscommon/consensus/consensusStateMock.go | 431 ++++++++++++++++-- 6 files changed, 556 insertions(+), 52 deletions(-) rename consensus/spos/bls/{ => proxy}/errors.go (99%) create mode 100644 consensus/spos/bls/proxy/subroundsHandler_test.go rename {node/mock => testscommon/common}/throttlerStub.go (98%) diff --git a/consensus/spos/bls/errors.go b/consensus/spos/bls/proxy/errors.go similarity index 99% rename from consensus/spos/bls/errors.go rename to consensus/spos/bls/proxy/errors.go index 9f889ed50f0..4036ecf1c63 100644 --- a/consensus/spos/bls/errors.go +++ b/consensus/spos/bls/proxy/errors.go @@ -1,4 +1,4 @@ -package bls +package proxy import ( "errors" diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go index 19ff56357d9..63991781911 100644 --- a/consensus/spos/bls/proxy/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" - "github.com/multiversx/mx-chain-go/consensus/spos/bls" v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" "github.com/multiversx/mx-chain-go/factory" @@ -66,37 +65,37 @@ const ( func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) { if check.IfNil(args.Chronology) { - return nil, bls.ErrNilChronologyHandler + return nil, ErrNilChronologyHandler } if check.IfNil(args.ConsensusCoreHandler) { - return nil, bls.ErrNilConsensusCoreHandler + return nil, ErrNilConsensusCoreHandler } if check.IfNil(args.ConsensusState) { - return nil, bls.ErrNilConsensusState + return nil, ErrNilConsensusState } if check.IfNil(args.Worker) { - return nil, bls.ErrNilWorker + return nil, ErrNilWorker } if check.IfNil(args.SignatureThrottler) { - return nil, bls.ErrNilSignatureThrottler + return nil, ErrNilSignatureThrottler } if check.IfNil(args.AppStatusHandler) { - return nil, bls.ErrNilAppStatusHandler + return nil, ErrNilAppStatusHandler } if check.IfNil(args.OutportHandler) { - return nil, bls.ErrNilOutportHandler + return nil, ErrNilOutportHandler } if check.IfNil(args.SentSignatureTracker) { - return nil, bls.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } if check.IfNil(args.EnableEpochsHandler) { - return nil, bls.ErrNilEnableEpochsHandler + return nil, ErrNilEnableEpochsHandler } if args.ChainID == nil { - return nil, bls.ErrNilChainID + return nil, ErrNilChainID } if len(args.CurrentPid) == 0 { - return nil, bls.ErrNilCurrentPid + return nil, ErrNilCurrentPid } subroundHandler := &SubroundsHandler{ diff --git a/consensus/spos/bls/proxy/subroundsHandler_test.go b/consensus/spos/bls/proxy/subroundsHandler_test.go new file mode 100644 index 00000000000..21711c6d30d --- /dev/null +++ b/consensus/spos/bls/proxy/subroundsHandler_test.go @@ -0,0 +1,148 @@ +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/common" + "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + mock "github.com/multiversx/mx-chain-go/testscommon/epochstartmock" + outportStub "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func getDefaultArgumentsSubroundHandler() *SubroundsHandlerArgs { + handlerArgs := &SubroundsHandlerArgs{ + Chronology: &consensus.ChronologyHandlerMock{}, + ConsensusState: &consensus.ConsensusStateMock{}, + Worker: &consensus.SposWorkerMock{}, + SignatureThrottler: &common.ThrottlerStub{}, + AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, + OutportHandler: &outportStub.OutportStub{}, + SentSignatureTracker: &testscommon.SentSignatureTrackerStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ChainID: []byte("chainID"), + CurrentPid: "peerID", + } + + consensusCore := &consensus.ConsensusCoreMock{} + consensusCore.SetEpochStartNotifier(&mock.EpochStartNotifierStub{}) + handlerArgs.ConsensusCoreHandler = consensusCore + + return handlerArgs +} + +func TestNewSubroundsHandler(t *testing.T) { + t.Parallel() + + t.Run("nil chronology should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.Chronology = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilChronologyHandler, err) + require.Nil(t, sh) + }) + t.Run("nil consensus core should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.ConsensusCoreHandler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilConsensusCoreHandler, err) + require.Nil(t, sh) + }) + t.Run("nil consensus state should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.ConsensusState = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilConsensusState, err) + require.Nil(t, sh) + }) + t.Run("nil worker should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.Worker = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilWorker, err) + require.Nil(t, sh) + }) + t.Run("nil signature throttler should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.SignatureThrottler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilSignatureThrottler, err) + require.Nil(t, sh) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.AppStatusHandler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilAppStatusHandler, err) + require.Nil(t, sh) + }) + t.Run("nil outport handler should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.OutportHandler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilOutportHandler, err) + require.Nil(t, sh) + }) + t.Run("nil sent signature tracker should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.SentSignatureTracker = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilSentSignatureTracker, err) + require.Nil(t, sh) + }) + t.Run("nil enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.EnableEpochsHandler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilEnableEpochsHandler, err) + require.Nil(t, sh) + }) + t.Run("nil chain ID should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.ChainID = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilChainID, err) + require.Nil(t, sh) + }) + t.Run("empty current PID should error", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs.CurrentPid = "" + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilCurrentPid, err) + require.Nil(t, sh) + }) + t.Run("OK", func(t *testing.T) { + t.Parallel() + + handlerArgs := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + }) +} diff --git a/consensus/spos/bls/v2/benchmark_test.go b/consensus/spos/bls/v2/benchmark_test.go index 37d217e0aa8..b7c4b962071 100644 --- a/consensus/spos/bls/v2/benchmark_test.go +++ b/consensus/spos/bls/v2/benchmark_test.go @@ -19,8 +19,8 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos/bls" v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" cryptoFactory "github.com/multiversx/mx-chain-go/factory/crypto" - nodeMock "github.com/multiversx/mx-chain-go/node/mock" "github.com/multiversx/mx-chain-go/testscommon" + nodeMock "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" diff --git a/node/mock/throttlerStub.go b/testscommon/common/throttlerStub.go similarity index 98% rename from node/mock/throttlerStub.go rename to testscommon/common/throttlerStub.go index 24ab94c45c3..f4f5e0a34d0 100644 --- a/node/mock/throttlerStub.go +++ b/testscommon/common/throttlerStub.go @@ -1,4 +1,4 @@ -package mock +package common // ThrottlerStub - type ThrottlerStub struct { diff --git a/testscommon/consensus/consensusStateMock.go b/testscommon/consensus/consensusStateMock.go index 943b0f5b5b4..d43adc4c769 100644 --- a/testscommon/consensus/consensusStateMock.go +++ b/testscommon/consensus/consensusStateMock.go @@ -1,32 +1,351 @@ package consensus import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) // ConsensusStateMock - type ConsensusStateMock struct { - ResetConsensusStateCalled func() - IsNodeLeaderInCurrentRoundCalled func(node string) bool - IsSelfLeaderInCurrentRoundCalled func() bool - GetLeaderCalled func() (string, error) - GetNextConsensusGroupCalled func(randomSource string, vgs nodesCoordinator.NodesCoordinator) ([]string, error) - IsConsensusDataSetCalled func() bool - IsConsensusDataEqualCalled func(data []byte) bool - IsJobDoneCalled func(node string, currentSubroundId int) bool - IsSelfJobDoneCalled func(currentSubroundId int) bool - IsCurrentSubroundFinishedCalled func(currentSubroundId int) bool - IsNodeSelfCalled func(node string) bool - IsBlockBodyAlreadyReceivedCalled func() bool - IsHeaderAlreadyReceivedCalled func() bool - CanDoSubroundJobCalled func(currentSubroundId int) bool - CanProcessReceivedMessageCalled func(cnsDta consensus.Message, currentRoundIndex int32, currentSubroundId int) bool - GenerateBitmapCalled func(subroundId int) []byte - ProcessingBlockCalled func() bool - SetProcessingBlockCalled func(processingBlock bool) - ConsensusGroupSizeCalled func() int - SetThresholdCalled func(subroundId int, threshold int) + ResetConsensusStateCalled func() + IsNodeLeaderInCurrentRoundCalled func(node string) bool + IsSelfLeaderInCurrentRoundCalled func() bool + GetLeaderCalled func() (string, error) + GetNextConsensusGroupCalled func(randomSource []byte, round uint64, shardId uint32, nodesCoordinator nodesCoordinator.NodesCoordinator, epoch uint32) (string, []string, error) + IsConsensusDataSetCalled func() bool + IsConsensusDataEqualCalled func(data []byte) bool + IsJobDoneCalled func(node string, currentSubroundId int) bool + IsSelfJobDoneCalled func(currentSubroundId int) bool + IsCurrentSubroundFinishedCalled func(currentSubroundId int) bool + IsNodeSelfCalled func(node string) bool + IsBlockBodyAlreadyReceivedCalled func() bool + IsHeaderAlreadyReceivedCalled func() bool + CanDoSubroundJobCalled func(currentSubroundId int) bool + CanProcessReceivedMessageCalled func(cnsDta *consensus.Message, currentRoundIndex int64, currentSubroundId int) bool + GenerateBitmapCalled func(subroundId int) []byte + ProcessingBlockCalled func() bool + SetProcessingBlockCalled func(processingBlock bool) + ConsensusGroupSizeCalled func() int + SetThresholdCalled func(subroundId int, threshold int) + AddReceivedHeaderCalled func(headerHandler data.HeaderHandler) + GetReceivedHeadersCalled func() []data.HeaderHandler + AddMessageWithSignatureCalled func(key string, message p2p.MessageP2P) + GetMessageWithSignatureCalled func(key string) (p2p.MessageP2P, bool) + IsSubroundFinishedCalled func(subroundID int) bool + GetDataCalled func() []byte + SetDataCalled func(data []byte) + IsMultiKeyLeaderInCurrentRoundCalled func() bool + IsLeaderJobDoneCalled func(currentSubroundId int) bool + IsMultiKeyJobDoneCalled func(currentSubroundId int) bool + GetMultikeyRedundancyStepInReasonCalled func() string + ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) + GetRoundCanceledCalled func() bool + SetRoundCanceledCalled func(state bool) + GetRoundIndexCalled func() int64 + SetRoundIndexCalled func(roundIndex int64) + GetRoundTimeStampCalled func() time.Time + SetRoundTimeStampCalled func(roundTimeStamp time.Time) + GetExtendedCalledCalled func() bool + GetBodyCalled func() data.BodyHandler + SetBodyCalled func(body data.BodyHandler) + GetHeaderCalled func() data.HeaderHandler + SetHeaderCalled func(header data.HeaderHandler) + GetWaitingAllSignaturesTimeOutCalled func() bool + SetWaitingAllSignaturesTimeOutCalled func(b bool) + ConsensusGroupIndexCalled func(pubKey string) (int, error) + SelfConsensusGroupIndexCalled func() (int, error) + SetEligibleListCalled func(eligibleList map[string]struct{}) + ConsensusGroupCalled func() []string + SetConsensusGroupCalled func(consensusGroup []string) + SetLeaderCalled func(leader string) + SetConsensusGroupSizeCalled func(consensusGroupSize int) + SelfPubKeyCalled func() string + SetSelfPubKeyCalled func(selfPubKey string) + JobDoneCalled func(key string, subroundId int) (bool, error) + SetJobDoneCalled func(key string, subroundId int, value bool) error + SelfJobDoneCalled func(subroundId int) (bool, error) + IsNodeInConsensusGroupCalled func(node string) bool + IsNodeInEligibleListCalled func(node string) bool + ComputeSizeCalled func(subroundId int) int + ResetRoundStateCalled func() + IsMultiKeyInConsensusGroupCalled func() bool + IsKeyManagedBySelfCalled func(pkBytes []byte) bool + IncrementRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) + GetKeysHandlerCalled func() consensus.KeysHandler + LeaderCalled func() string + StatusCalled func(subroundId int) spos.SubroundStatus + SetStatusCalled func(subroundId int, subroundStatus spos.SubroundStatus) + ResetRoundStatusCalled func() + ThresholdCalled func(subroundId int) int + FallbackThresholdCalled func(subroundId int) int + SetFallbackThresholdCalled func(subroundId int, threshold int) +} + +func (cnsm *ConsensusStateMock) AddReceivedHeader(headerHandler data.HeaderHandler) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetReceivedHeaders() []data.HeaderHandler { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) AddMessageWithSignature(key string, message p2p.MessageP2P) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetMessageWithSignature(key string) (p2p.MessageP2P, bool) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IsSubroundFinished(subroundID int) bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetData() []byte { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetData(data []byte) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IsMultiKeyLeaderInCurrentRound() bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IsLeaderJobDone(currentSubroundId int) bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IsMultiKeyJobDone(currentSubroundId int) bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetMultikeyRedundancyStepInReason() string { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetRoundCanceled() bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetRoundCanceled(state bool) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetRoundIndex() int64 { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetRoundIndex(roundIndex int64) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetRoundTimeStamp() time.Time { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetRoundTimeStamp(roundTimeStamp time.Time) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetExtendedCalled() bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetBody() data.BodyHandler { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetBody(body data.BodyHandler) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetHeader() data.HeaderHandler { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetHeader(header data.HeaderHandler) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetWaitingAllSignaturesTimeOut() bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetWaitingAllSignaturesTimeOut(b bool) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) ConsensusGroupIndex(pubKey string) (int, error) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SelfConsensusGroupIndex() (int, error) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetEligibleList(eligibleList map[string]struct{}) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) ConsensusGroup() []string { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetConsensusGroup(consensusGroup []string) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetLeader(leader string) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetConsensusGroupSize(consensusGroupSize int) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SelfPubKey() string { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetSelfPubKey(selfPubKey string) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) JobDone(key string, subroundId int) (bool, error) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetJobDone(key string, subroundId int, value bool) error { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SelfJobDone(subroundId int) (bool, error) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IsNodeInConsensusGroup(node string) bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IsNodeInEligibleList(node string) bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) ComputeSize(subroundId int) int { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) ResetRoundState() { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IsMultiKeyInConsensusGroup() bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IsKeyManagedBySelf(pkBytes []byte) bool { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) IncrementRoundsWithoutReceivedMessages(pkBytes []byte) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) GetKeysHandler() consensus.KeysHandler { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) Leader() string { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) Status(subroundId int) spos.SubroundStatus { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetStatus(subroundId int, subroundStatus spos.SubroundStatus) { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) ResetRoundStatus() { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) Threshold(subroundId int) int { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) FallbackThreshold(subroundId int) int { + // TODO implement me + panic("implement me") +} + +func (cnsm *ConsensusStateMock) SetFallbackThreshold(subroundId int, threshold int) { + // TODO implement me + panic("implement me") } // ResetConsensusState - @@ -51,10 +370,13 @@ func (cnsm *ConsensusStateMock) GetLeader() (string, error) { // GetNextConsensusGroup - func (cnsm *ConsensusStateMock) GetNextConsensusGroup( - randomSource string, - vgs nodesCoordinator.NodesCoordinator, -) ([]string, error) { - return cnsm.GetNextConsensusGroupCalled(randomSource, vgs) + randomSource []byte, + round uint64, + shardId uint32, + nodesCoordinator nodesCoordinator.NodesCoordinator, + epoch uint32, +) (string, []string, error) { + return cnsm.GetNextConsensusGroupCalled(randomSource, round, shardId, nodesCoordinator, epoch) } // IsConsensusDataSet - @@ -74,38 +396,56 @@ func (cnsm *ConsensusStateMock) IsJobDone(node string, currentSubroundId int) bo // IsSelfJobDone - func (cnsm *ConsensusStateMock) IsSelfJobDone(currentSubroundId int) bool { - return cnsm.IsSelfJobDoneCalled(currentSubroundId) + if cnsm.IsSelfJobDoneCalled != nil { + return cnsm.IsSelfJobDoneCalled(currentSubroundId) + } + return false } // IsCurrentSubroundFinished - func (cnsm *ConsensusStateMock) IsCurrentSubroundFinished(currentSubroundId int) bool { - return cnsm.IsCurrentSubroundFinishedCalled(currentSubroundId) + if cnsm.IsCurrentSubroundFinishedCalled != nil { + return cnsm.IsCurrentSubroundFinishedCalled(currentSubroundId) + } + return false } // IsNodeSelf - func (cnsm *ConsensusStateMock) IsNodeSelf(node string) bool { - return cnsm.IsNodeSelfCalled(node) + if cnsm.IsNodeSelfCalled != nil { + return cnsm.IsNodeSelfCalled(node) + } + return false } // IsBlockBodyAlreadyReceived - func (cnsm *ConsensusStateMock) IsBlockBodyAlreadyReceived() bool { - return cnsm.IsBlockBodyAlreadyReceivedCalled() + if cnsm.IsBlockBodyAlreadyReceivedCalled != nil { + return cnsm.IsBlockBodyAlreadyReceivedCalled() + } + return false } // IsHeaderAlreadyReceived - func (cnsm *ConsensusStateMock) IsHeaderAlreadyReceived() bool { - return cnsm.IsHeaderAlreadyReceivedCalled() + if cnsm.IsHeaderAlreadyReceivedCalled != nil { + return cnsm.IsHeaderAlreadyReceivedCalled() + } + return false } // CanDoSubroundJob - func (cnsm *ConsensusStateMock) CanDoSubroundJob(currentSubroundId int) bool { - return cnsm.CanDoSubroundJobCalled(currentSubroundId) + if cnsm.CanDoSubroundJobCalled != nil { + return cnsm.CanDoSubroundJobCalled(currentSubroundId) + } + return false } // CanProcessReceivedMessage - func (cnsm *ConsensusStateMock) CanProcessReceivedMessage( - cnsDta consensus.Message, - currentRoundIndex int32, + cnsDta *consensus.Message, + currentRoundIndex int64, currentSubroundId int, ) bool { return cnsm.CanProcessReceivedMessageCalled(cnsDta, currentRoundIndex, currentSubroundId) @@ -113,25 +453,42 @@ func (cnsm *ConsensusStateMock) CanProcessReceivedMessage( // GenerateBitmap - func (cnsm *ConsensusStateMock) GenerateBitmap(subroundId int) []byte { - return cnsm.GenerateBitmapCalled(subroundId) + if cnsm.GenerateBitmapCalled != nil { + return cnsm.GenerateBitmapCalled(subroundId) + } + return nil } // ProcessingBlock - func (cnsm *ConsensusStateMock) ProcessingBlock() bool { - return cnsm.ProcessingBlockCalled() + if cnsm.ProcessingBlockCalled != nil { + return cnsm.ProcessingBlockCalled() + } + return false } // SetProcessingBlock - func (cnsm *ConsensusStateMock) SetProcessingBlock(processingBlock bool) { - cnsm.SetProcessingBlockCalled(processingBlock) + if cnsm.SetProcessingBlockCalled != nil { + cnsm.SetProcessingBlockCalled(processingBlock) + } } // ConsensusGroupSize - func (cnsm *ConsensusStateMock) ConsensusGroupSize() int { - return cnsm.ConsensusGroupSizeCalled() + if cnsm.ConsensusGroupSizeCalled != nil { + return cnsm.ConsensusGroupSizeCalled() + } + return 0 } // SetThreshold - func (cnsm *ConsensusStateMock) SetThreshold(subroundId int, threshold int) { - cnsm.SetThresholdCalled(subroundId, threshold) + if cnsm.SetThresholdCalled != nil { + cnsm.SetThresholdCalled(subroundId, threshold) + } +} + +func (cnsm *ConsensusStateMock) IsInterfaceNil() bool { + return cnsm == nil } From 351c118eea7155cd5efc9b562b34b8dad9c9dae8 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Fri, 27 Sep 2024 18:17:37 +0300 Subject: [PATCH 25/30] fix cyclic import --- consensus/spos/roundStatus.go | 2 +- testscommon/consensus/consensusStateMock.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/consensus/spos/roundStatus.go b/consensus/spos/roundStatus.go index 8517396904a..7d3b67fdc15 100644 --- a/consensus/spos/roundStatus.go +++ b/consensus/spos/roundStatus.go @@ -5,7 +5,7 @@ import ( ) // SubroundStatus defines the type used to refer the state of the current subround -type SubroundStatus int +type SubroundStatus = int const ( // SsNotFinished defines the un-finished state of the subround diff --git a/testscommon/consensus/consensusStateMock.go b/testscommon/consensus/consensusStateMock.go index d43adc4c769..7eade4e70ea 100644 --- a/testscommon/consensus/consensusStateMock.go +++ b/testscommon/consensus/consensusStateMock.go @@ -7,7 +7,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) @@ -80,8 +79,8 @@ type ConsensusStateMock struct { IncrementRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) GetKeysHandlerCalled func() consensus.KeysHandler LeaderCalled func() string - StatusCalled func(subroundId int) spos.SubroundStatus - SetStatusCalled func(subroundId int, subroundStatus spos.SubroundStatus) + StatusCalled func(subroundId int) int + SetStatusCalled func(subroundId int, subroundStatus int) ResetRoundStatusCalled func() ThresholdCalled func(subroundId int) int FallbackThresholdCalled func(subroundId int) int @@ -318,12 +317,12 @@ func (cnsm *ConsensusStateMock) Leader() string { panic("implement me") } -func (cnsm *ConsensusStateMock) Status(subroundId int) spos.SubroundStatus { +func (cnsm *ConsensusStateMock) Status(subroundId int) int { // TODO implement me panic("implement me") } -func (cnsm *ConsensusStateMock) SetStatus(subroundId int, subroundStatus spos.SubroundStatus) { +func (cnsm *ConsensusStateMock) SetStatus(subroundId int, subroundStatus int) { // TODO implement me panic("implement me") } @@ -489,6 +488,7 @@ func (cnsm *ConsensusStateMock) SetThreshold(subroundId int, threshold int) { } } +// IsInterfaceNil returns true if there is no value under the interface func (cnsm *ConsensusStateMock) IsInterfaceNil() bool { return cnsm == nil } From 7052ecb62e7141da08264a5e2d837e61524d1834 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Mon, 30 Sep 2024 14:10:25 +0300 Subject: [PATCH 26/30] update consensusStateMock --- testscommon/consensus/consensusStateMock.go | 382 ++++++++++++++------ 1 file changed, 270 insertions(+), 112 deletions(-) diff --git a/testscommon/consensus/consensusStateMock.go b/testscommon/consensus/consensusStateMock.go index 7eade4e70ea..dae02a0323c 100644 --- a/testscommon/consensus/consensusStateMock.go +++ b/testscommon/consensus/consensusStateMock.go @@ -87,284 +87,430 @@ type ConsensusStateMock struct { SetFallbackThresholdCalled func(subroundId int, threshold int) } +// AddReceivedHeader - func (cnsm *ConsensusStateMock) AddReceivedHeader(headerHandler data.HeaderHandler) { - // TODO implement me - panic("implement me") + if cnsm.AddReceivedHeaderCalled != nil { + cnsm.AddReceivedHeaderCalled(headerHandler) + } } +// GetReceivedHeaders - func (cnsm *ConsensusStateMock) GetReceivedHeaders() []data.HeaderHandler { - // TODO implement me - panic("implement me") + if cnsm.GetReceivedHeadersCalled != nil { + return cnsm.GetReceivedHeadersCalled() + } + return nil } +// AddMessageWithSignature - func (cnsm *ConsensusStateMock) AddMessageWithSignature(key string, message p2p.MessageP2P) { - // TODO implement me - panic("implement me") + if cnsm.AddMessageWithSignatureCalled != nil { + cnsm.AddMessageWithSignatureCalled(key, message) + } } +// GetMessageWithSignature - func (cnsm *ConsensusStateMock) GetMessageWithSignature(key string) (p2p.MessageP2P, bool) { - // TODO implement me - panic("implement me") + if cnsm.GetMessageWithSignatureCalled != nil { + return cnsm.GetMessageWithSignatureCalled(key) + } + return nil, false } +// IsSubroundFinished - func (cnsm *ConsensusStateMock) IsSubroundFinished(subroundID int) bool { - // TODO implement me - panic("implement me") + if cnsm.IsSubroundFinishedCalled != nil { + return cnsm.IsSubroundFinishedCalled(subroundID) + } + return false } +// GetData - func (cnsm *ConsensusStateMock) GetData() []byte { - // TODO implement me - panic("implement me") + if cnsm.GetDataCalled != nil { + return cnsm.GetDataCalled() + } + return nil } +// SetData - func (cnsm *ConsensusStateMock) SetData(data []byte) { - // TODO implement me - panic("implement me") + if cnsm.SetDataCalled != nil { + cnsm.SetDataCalled(data) + } } +// IsMultiKeyLeaderInCurrentRound - func (cnsm *ConsensusStateMock) IsMultiKeyLeaderInCurrentRound() bool { - // TODO implement me - panic("implement me") + if cnsm.IsMultiKeyLeaderInCurrentRoundCalled != nil { + return cnsm.IsMultiKeyLeaderInCurrentRoundCalled() + } + return false } +// IsLeaderJobDone - func (cnsm *ConsensusStateMock) IsLeaderJobDone(currentSubroundId int) bool { - // TODO implement me - panic("implement me") + if cnsm.IsLeaderJobDoneCalled != nil { + return cnsm.IsLeaderJobDoneCalled(currentSubroundId) + } + return false } +// IsMultiKeyJobDone - func (cnsm *ConsensusStateMock) IsMultiKeyJobDone(currentSubroundId int) bool { - // TODO implement me - panic("implement me") + if cnsm.IsMultiKeyJobDoneCalled != nil { + return cnsm.IsMultiKeyJobDoneCalled(currentSubroundId) + } + return false } +// GetMultikeyRedundancyStepInReason - func (cnsm *ConsensusStateMock) GetMultikeyRedundancyStepInReason() string { - // TODO implement me - panic("implement me") + if cnsm.GetMultikeyRedundancyStepInReasonCalled != nil { + return cnsm.GetMultikeyRedundancyStepInReasonCalled() + } + return "" } +// ResetRoundsWithoutReceivedMessages - func (cnsm *ConsensusStateMock) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { - // TODO implement me - panic("implement me") + if cnsm.ResetRoundsWithoutReceivedMessagesCalled != nil { + cnsm.ResetRoundsWithoutReceivedMessagesCalled(pkBytes, pid) + } } +// GetRoundCanceled - func (cnsm *ConsensusStateMock) GetRoundCanceled() bool { - // TODO implement me - panic("implement me") + if cnsm.GetRoundCanceledCalled != nil { + return cnsm.GetRoundCanceledCalled() + } + return false } +// SetRoundCanceled - func (cnsm *ConsensusStateMock) SetRoundCanceled(state bool) { - // TODO implement me - panic("implement me") + if cnsm.SetRoundCanceledCalled != nil { + cnsm.SetRoundCanceledCalled(state) + } } +// GetRoundIndex - func (cnsm *ConsensusStateMock) GetRoundIndex() int64 { - // TODO implement me - panic("implement me") + if cnsm.GetRoundIndexCalled != nil { + return cnsm.GetRoundIndexCalled() + } + return 0 } +// SetRoundIndex - func (cnsm *ConsensusStateMock) SetRoundIndex(roundIndex int64) { - // TODO implement me - panic("implement me") + if cnsm.SetRoundIndexCalled != nil { + cnsm.SetRoundIndexCalled(roundIndex) + } } +// GetRoundTimeStamp - func (cnsm *ConsensusStateMock) GetRoundTimeStamp() time.Time { - // TODO implement me - panic("implement me") + if cnsm.GetRoundTimeStampCalled != nil { + return cnsm.GetRoundTimeStampCalled() + } + return time.Time{} } +// SetRoundTimeStamp - func (cnsm *ConsensusStateMock) SetRoundTimeStamp(roundTimeStamp time.Time) { - // TODO implement me - panic("implement me") + if cnsm.SetRoundTimeStampCalled != nil { + cnsm.SetRoundTimeStampCalled(roundTimeStamp) + } } +// GetExtendedCalled - func (cnsm *ConsensusStateMock) GetExtendedCalled() bool { - // TODO implement me - panic("implement me") + if cnsm.GetExtendedCalledCalled != nil { + return cnsm.GetExtendedCalledCalled() + } + return false } +// GetBody - func (cnsm *ConsensusStateMock) GetBody() data.BodyHandler { - // TODO implement me - panic("implement me") + if cnsm.GetBodyCalled != nil { + return cnsm.GetBodyCalled() + } + return nil } +// SetBody - func (cnsm *ConsensusStateMock) SetBody(body data.BodyHandler) { - // TODO implement me - panic("implement me") + if cnsm.SetBodyCalled != nil { + cnsm.SetBodyCalled(body) + } } +// GetHeader - func (cnsm *ConsensusStateMock) GetHeader() data.HeaderHandler { - // TODO implement me - panic("implement me") + if cnsm.GetHeaderCalled != nil { + return cnsm.GetHeaderCalled() + } + return nil } +// SetHeader - func (cnsm *ConsensusStateMock) SetHeader(header data.HeaderHandler) { - // TODO implement me - panic("implement me") + if cnsm.SetHeaderCalled != nil { + cnsm.SetHeaderCalled(header) + } } +// GetWaitingAllSignaturesTimeOut - func (cnsm *ConsensusStateMock) GetWaitingAllSignaturesTimeOut() bool { - // TODO implement me - panic("implement me") + if cnsm.GetWaitingAllSignaturesTimeOutCalled != nil { + return cnsm.GetWaitingAllSignaturesTimeOutCalled() + } + return false } +// SetWaitingAllSignaturesTimeOut - func (cnsm *ConsensusStateMock) SetWaitingAllSignaturesTimeOut(b bool) { - // TODO implement me - panic("implement me") + if cnsm.SetWaitingAllSignaturesTimeOutCalled != nil { + cnsm.SetWaitingAllSignaturesTimeOutCalled(b) + } } +// ConsensusGroupIndex - func (cnsm *ConsensusStateMock) ConsensusGroupIndex(pubKey string) (int, error) { - // TODO implement me - panic("implement me") + if cnsm.ConsensusGroupIndexCalled != nil { + return cnsm.ConsensusGroupIndexCalled(pubKey) + } + return 0, nil } +// SelfConsensusGroupIndex - func (cnsm *ConsensusStateMock) SelfConsensusGroupIndex() (int, error) { - // TODO implement me - panic("implement me") + if cnsm.SelfConsensusGroupIndexCalled != nil { + return cnsm.SelfConsensusGroupIndexCalled() + } + return 0, nil } +// SetEligibleList - func (cnsm *ConsensusStateMock) SetEligibleList(eligibleList map[string]struct{}) { - // TODO implement me - panic("implement me") + if cnsm.SetEligibleListCalled != nil { + cnsm.SetEligibleListCalled(eligibleList) + } } +// ConsensusGroup - func (cnsm *ConsensusStateMock) ConsensusGroup() []string { - // TODO implement me - panic("implement me") + if cnsm.ConsensusGroupCalled != nil { + return cnsm.ConsensusGroupCalled() + } + return nil } +// SetConsensusGroup - func (cnsm *ConsensusStateMock) SetConsensusGroup(consensusGroup []string) { - // TODO implement me - panic("implement me") + if cnsm.SetConsensusGroupCalled != nil { + cnsm.SetConsensusGroupCalled(consensusGroup) + } } +// SetLeader - func (cnsm *ConsensusStateMock) SetLeader(leader string) { - // TODO implement me - panic("implement me") + if cnsm.SetLeaderCalled != nil { + cnsm.SetLeaderCalled(leader) + } } +// SetConsensusGroupSize - func (cnsm *ConsensusStateMock) SetConsensusGroupSize(consensusGroupSize int) { - // TODO implement me - panic("implement me") + if cnsm.SetConsensusGroupSizeCalled != nil { + cnsm.SetConsensusGroupSizeCalled(consensusGroupSize) + } } +// SelfPubKey - func (cnsm *ConsensusStateMock) SelfPubKey() string { - // TODO implement me - panic("implement me") + if cnsm.SelfPubKeyCalled != nil { + return cnsm.SelfPubKeyCalled() + } + return "" } +// SetSelfPubKey - func (cnsm *ConsensusStateMock) SetSelfPubKey(selfPubKey string) { - // TODO implement me - panic("implement me") + if cnsm.SetSelfPubKeyCalled != nil { + cnsm.SetSelfPubKeyCalled(selfPubKey) + } } +// JobDone - func (cnsm *ConsensusStateMock) JobDone(key string, subroundId int) (bool, error) { - // TODO implement me - panic("implement me") + if cnsm.JobDoneCalled != nil { + return cnsm.JobDoneCalled(key, subroundId) + } + return false, nil } +// SetJobDone - func (cnsm *ConsensusStateMock) SetJobDone(key string, subroundId int, value bool) error { - // TODO implement me - panic("implement me") + if cnsm.SetJobDoneCalled != nil { + return cnsm.SetJobDoneCalled(key, subroundId, value) + } + return nil } +// SelfJobDone - func (cnsm *ConsensusStateMock) SelfJobDone(subroundId int) (bool, error) { - // TODO implement me - panic("implement me") + if cnsm.SelfJobDoneCalled != nil { + return cnsm.SelfJobDoneCalled(subroundId) + } + return false, nil } +// IsNodeInConsensusGroup - func (cnsm *ConsensusStateMock) IsNodeInConsensusGroup(node string) bool { - // TODO implement me - panic("implement me") + if cnsm.IsNodeInConsensusGroupCalled != nil { + return cnsm.IsNodeInConsensusGroupCalled(node) + } + return false } +// IsNodeInEligibleList - func (cnsm *ConsensusStateMock) IsNodeInEligibleList(node string) bool { - // TODO implement me - panic("implement me") + if cnsm.IsNodeInEligibleListCalled != nil { + return cnsm.IsNodeInEligibleListCalled(node) + } + return false } +// ComputeSize - func (cnsm *ConsensusStateMock) ComputeSize(subroundId int) int { - // TODO implement me - panic("implement me") + if cnsm.ComputeSizeCalled != nil { + return cnsm.ComputeSizeCalled(subroundId) + } + return 0 } +// ResetRoundState - func (cnsm *ConsensusStateMock) ResetRoundState() { - // TODO implement me - panic("implement me") + if cnsm.ResetRoundStateCalled != nil { + cnsm.ResetRoundStateCalled() + } } +// IsMultiKeyInConsensusGroup - func (cnsm *ConsensusStateMock) IsMultiKeyInConsensusGroup() bool { - // TODO implement me - panic("implement me") + if cnsm.IsMultiKeyInConsensusGroupCalled != nil { + return cnsm.IsMultiKeyInConsensusGroupCalled() + } + return false } +// IsKeyManagedBySelf - func (cnsm *ConsensusStateMock) IsKeyManagedBySelf(pkBytes []byte) bool { - // TODO implement me - panic("implement me") + if cnsm.IsKeyManagedBySelfCalled != nil { + return cnsm.IsKeyManagedBySelfCalled(pkBytes) + } + return false } +// IncrementRoundsWithoutReceivedMessages - func (cnsm *ConsensusStateMock) IncrementRoundsWithoutReceivedMessages(pkBytes []byte) { - // TODO implement me - panic("implement me") + if cnsm.IncrementRoundsWithoutReceivedMessagesCalled != nil { + cnsm.IncrementRoundsWithoutReceivedMessagesCalled(pkBytes) + } } +// GetKeysHandler - func (cnsm *ConsensusStateMock) GetKeysHandler() consensus.KeysHandler { - // TODO implement me - panic("implement me") + if cnsm.GetKeysHandlerCalled != nil { + return cnsm.GetKeysHandlerCalled() + } + return nil } +// Leader - func (cnsm *ConsensusStateMock) Leader() string { - // TODO implement me - panic("implement me") + if cnsm.LeaderCalled != nil { + return cnsm.LeaderCalled() + } + return "" } +// Status - func (cnsm *ConsensusStateMock) Status(subroundId int) int { - // TODO implement me - panic("implement me") + if cnsm.StatusCalled != nil { + return cnsm.StatusCalled(subroundId) + } + return 0 } +// SetStatus - func (cnsm *ConsensusStateMock) SetStatus(subroundId int, subroundStatus int) { - // TODO implement me - panic("implement me") + if cnsm.SetStatusCalled != nil { + cnsm.SetStatusCalled(subroundId, subroundStatus) + } } +// ResetRoundStatus - func (cnsm *ConsensusStateMock) ResetRoundStatus() { - // TODO implement me - panic("implement me") + if cnsm.ResetRoundStatusCalled != nil { + cnsm.ResetRoundStatusCalled() + } } +// Threshold - func (cnsm *ConsensusStateMock) Threshold(subroundId int) int { - // TODO implement me - panic("implement me") + if cnsm.ThresholdCalled != nil { + return cnsm.ThresholdCalled(subroundId) + } + return 0 } +// FallbackThreshold - func (cnsm *ConsensusStateMock) FallbackThreshold(subroundId int) int { - // TODO implement me - panic("implement me") + if cnsm.FallbackThresholdCalled != nil { + return cnsm.FallbackThresholdCalled(subroundId) + } + return 0 } func (cnsm *ConsensusStateMock) SetFallbackThreshold(subroundId int, threshold int) { - // TODO implement me - panic("implement me") + if cnsm.SetFallbackThresholdCalled != nil { + cnsm.SetFallbackThresholdCalled(subroundId, threshold) + } } // ResetConsensusState - func (cnsm *ConsensusStateMock) ResetConsensusState() { - cnsm.ResetConsensusStateCalled() + if cnsm.ResetConsensusStateCalled != nil { + cnsm.ResetConsensusStateCalled() + } } // IsNodeLeaderInCurrentRound - func (cnsm *ConsensusStateMock) IsNodeLeaderInCurrentRound(node string) bool { - return cnsm.IsNodeLeaderInCurrentRoundCalled(node) + if cnsm.IsNodeLeaderInCurrentRoundCalled != nil { + return cnsm.IsNodeLeaderInCurrentRoundCalled(node) + } + return false } // IsSelfLeaderInCurrentRound - func (cnsm *ConsensusStateMock) IsSelfLeaderInCurrentRound() bool { - return cnsm.IsSelfLeaderInCurrentRoundCalled() + if cnsm.IsSelfLeaderInCurrentRoundCalled != nil { + return cnsm.IsSelfLeaderInCurrentRoundCalled() + } + return false } // GetLeader - func (cnsm *ConsensusStateMock) GetLeader() (string, error) { - return cnsm.GetLeaderCalled() + if cnsm.GetLeaderCalled != nil { + return cnsm.GetLeaderCalled() + } + return "", nil } // GetNextConsensusGroup - @@ -375,22 +521,34 @@ func (cnsm *ConsensusStateMock) GetNextConsensusGroup( nodesCoordinator nodesCoordinator.NodesCoordinator, epoch uint32, ) (string, []string, error) { - return cnsm.GetNextConsensusGroupCalled(randomSource, round, shardId, nodesCoordinator, epoch) + if cnsm.GetNextConsensusGroupCalled != nil { + return cnsm.GetNextConsensusGroupCalled(randomSource, round, shardId, nodesCoordinator, epoch) + } + return "", nil, nil } // IsConsensusDataSet - func (cnsm *ConsensusStateMock) IsConsensusDataSet() bool { - return cnsm.IsConsensusDataSetCalled() + if cnsm.IsConsensusDataSetCalled != nil { + return cnsm.IsConsensusDataSetCalled() + } + return false } // IsConsensusDataEqual - func (cnsm *ConsensusStateMock) IsConsensusDataEqual(data []byte) bool { - return cnsm.IsConsensusDataEqualCalled(data) + if cnsm.IsConsensusDataEqualCalled != nil { + return cnsm.IsConsensusDataEqualCalled(data) + } + return false } // IsJobDone - func (cnsm *ConsensusStateMock) IsJobDone(node string, currentSubroundId int) bool { - return cnsm.IsJobDoneCalled(node, currentSubroundId) + if cnsm.IsJobDoneCalled != nil { + return cnsm.IsJobDoneCalled(node, currentSubroundId) + } + return false } // IsSelfJobDone - From 9edc7c70efdb4313d736f43d2ea2828fbd2f2d97 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Mon, 30 Sep 2024 18:02:31 +0300 Subject: [PATCH 27/30] add initSubroundsForEpoch tests --- .../spos/bls/proxy/subroundsHandler_test.go | 209 ++++++++++++++++-- testscommon/consensus/sposWorkerMock.go | 33 ++- 2 files changed, 216 insertions(+), 26 deletions(-) diff --git a/consensus/spos/bls/proxy/subroundsHandler_test.go b/consensus/spos/bls/proxy/subroundsHandler_test.go index 21711c6d30d..25118cfc45c 100644 --- a/consensus/spos/bls/proxy/subroundsHandler_test.go +++ b/consensus/spos/bls/proxy/subroundsHandler_test.go @@ -1,38 +1,85 @@ package proxy import ( + "sync/atomic" "testing" + "github.com/multiversx/mx-chain-core-go/core" + crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/stretchr/testify/require" + mock2 "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" mock "github.com/multiversx/mx-chain-go/testscommon/epochstartmock" outportStub "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) -func getDefaultArgumentsSubroundHandler() *SubroundsHandlerArgs { +func getDefaultArgumentsSubroundHandler() (*SubroundsHandlerArgs, *consensus.ConsensusCoreMock) { + x := make(chan bool) + chronology := &consensus.ChronologyHandlerMock{} + epochsEnable := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + epochStartNotifier := &mock.EpochStartNotifierStub{} + consensusState := &consensus.ConsensusStateMock{} + worker := &consensus.SposWorkerMock{ + RemoveAllReceivedMessagesCallsCalled: func() {}, + GetConsensusStateChangedChannelsCalled: func() chan bool { + return x + }, + } + antiFloodHandler := &mock2.P2PAntifloodHandlerStub{} handlerArgs := &SubroundsHandlerArgs{ - Chronology: &consensus.ChronologyHandlerMock{}, - ConsensusState: &consensus.ConsensusStateMock{}, - Worker: &consensus.SposWorkerMock{}, + Chronology: chronology, + ConsensusState: consensusState, + Worker: worker, SignatureThrottler: &common.ThrottlerStub{}, AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, OutportHandler: &outportStub.OutportStub{}, SentSignatureTracker: &testscommon.SentSignatureTrackerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: epochsEnable, ChainID: []byte("chainID"), CurrentPid: "peerID", } consensusCore := &consensus.ConsensusCoreMock{} - consensusCore.SetEpochStartNotifier(&mock.EpochStartNotifierStub{}) + consensusCore.SetEpochStartNotifier(epochStartNotifier) + consensusCore.SetBlockchain(&testscommon.ChainHandlerStub{}) + consensusCore.SetBlockProcessor(&testscommon.BlockProcessorStub{}) + consensusCore.SetBootStrapper(&bootstrapperStubs.BootstrapperStub{}) + consensusCore.SetBroadcastMessenger(&consensus.BroadcastMessengerMock{}) + consensusCore.SetChronology(chronology) + consensusCore.SetAntifloodHandler(antiFloodHandler) + consensusCore.SetHasher(&testscommon.HasherStub{}) + consensusCore.SetMarshalizer(&testscommon.MarshallerStub{}) + consensusCore.SetMultiSignerContainer(&cryptoMocks.MultiSignerContainerStub{ + GetMultiSignerCalled: func(epoch uint32) (crypto.MultiSigner, error) { + return &cryptoMocks.MultisignerMock{}, nil + }, + }) + consensusCore.SetRoundHandler(&consensus.RoundHandlerMock{}) + consensusCore.SetShardCoordinator(&testscommon.ShardsCoordinatorMock{}) + consensusCore.SetSyncTimer(&testscommon.SyncTimerStub{}) + consensusCore.SetValidatorGroupSelector(&shardingMocks.NodesCoordinatorMock{}) + consensusCore.SetPeerHonestyHandler(&testscommon.PeerHonestyHandlerStub{}) + consensusCore.SetHeaderSigVerifier(&consensus.HeaderSigVerifierMock{}) + consensusCore.SetFallbackHeaderValidator(&testscommon.FallBackHeaderValidatorStub{}) + consensusCore.SetNodeRedundancyHandler(&mock2.NodeRedundancyHandlerStub{}) + consensusCore.SetScheduledProcessor(&consensus.ScheduledProcessorStub{}) + consensusCore.SetMessageSigningHandler(&mock2.MessageSigningHandlerStub{}) + consensusCore.SetPeerBlacklistHandler(&mock2.PeerBlacklistHandlerStub{}) + consensusCore.SetSigningHandler(&consensus.SigningHandlerStub{}) + consensusCore.SetEnableEpochsHandler(epochsEnable) + consensusCore.SetEquivalentProofsPool(&dataRetriever.ProofsPoolMock{}) handlerArgs.ConsensusCoreHandler = consensusCore - return handlerArgs + return handlerArgs, consensusCore } func TestNewSubroundsHandler(t *testing.T) { @@ -41,7 +88,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil chronology should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.Chronology = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilChronologyHandler, err) @@ -50,7 +97,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil consensus core should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.ConsensusCoreHandler = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilConsensusCoreHandler, err) @@ -59,7 +106,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil consensus state should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.ConsensusState = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilConsensusState, err) @@ -68,7 +115,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil worker should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.Worker = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilWorker, err) @@ -77,7 +124,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil signature throttler should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.SignatureThrottler = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilSignatureThrottler, err) @@ -86,7 +133,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil app status handler should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.AppStatusHandler = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilAppStatusHandler, err) @@ -95,7 +142,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil outport handler should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.OutportHandler = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilOutportHandler, err) @@ -104,7 +151,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil sent signature tracker should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.SentSignatureTracker = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilSentSignatureTracker, err) @@ -113,7 +160,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil enable epochs handler should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.EnableEpochsHandler = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilEnableEpochsHandler, err) @@ -122,7 +169,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("nil chain ID should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.ChainID = nil sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilChainID, err) @@ -131,7 +178,7 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("empty current PID should error", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() handlerArgs.CurrentPid = "" sh, err := NewSubroundsHandler(handlerArgs) require.Equal(t, ErrNilCurrentPid, err) @@ -140,9 +187,133 @@ func TestNewSubroundsHandler(t *testing.T) { t.Run("OK", func(t *testing.T) { t.Parallel() - handlerArgs := getDefaultArgumentsSubroundHandler() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() sh, err := NewSubroundsHandler(handlerArgs) require.Nil(t, err) require.NotNil(t, sh) }) } + +func TestSubroundsHandler_initSubroundsForEpoch(t *testing.T) { + t.Parallel() + + t.Run("equivalent messages not enabled, with previous consensus type not ConsensusV1", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return false + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = ConsensusNone + + err = sh.initSubroundsForEpoch(0) + require.Nil(t, err) + require.Equal(t, ConsensusV1, sh.currentConsensusType) + require.Equal(t, int32(1), startCalled.Load()) + }) + t.Run("equivalent messages not enabled, with previous consensus type ConsensusV1", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return false + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = ConsensusV1 + + err = sh.initSubroundsForEpoch(0) + require.Nil(t, err) + require.Equal(t, ConsensusV1, sh.currentConsensusType) + require.Equal(t, int32(0), startCalled.Load()) + }) + t.Run("equivalent messages enabled, with previous consensus type not ConsensusV2", func(t *testing.T) { + t.Parallel() + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return true + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = ConsensusNone + + err = sh.initSubroundsForEpoch(0) + require.Nil(t, err) + require.Equal(t, ConsensusV2, sh.currentConsensusType) + require.Equal(t, int32(1), startCalled.Load()) + }) + t.Run("equivalent messages enabled, with previous consensus type ConsensusV2", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return true + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = ConsensusV2 + + err = sh.initSubroundsForEpoch(0) + require.Nil(t, err) + require.Equal(t, ConsensusV2, sh.currentConsensusType) + require.Equal(t, int32(0), startCalled.Load()) + }) +} diff --git a/testscommon/consensus/sposWorkerMock.go b/testscommon/consensus/sposWorkerMock.go index c34eeebcc8e..3aa127287de 100644 --- a/testscommon/consensus/sposWorkerMock.go +++ b/testscommon/consensus/sposWorkerMock.go @@ -34,7 +34,9 @@ type SposWorkerMock struct { // AddReceivedMessageCall - func (sposWorkerMock *SposWorkerMock) AddReceivedMessageCall(messageType consensus.MessageType, receivedMessageCall func(ctx context.Context, cnsDta *consensus.Message) bool) { - sposWorkerMock.AddReceivedMessageCallCalled(messageType, receivedMessageCall) + if sposWorkerMock.AddReceivedMessageCallCalled != nil { + sposWorkerMock.AddReceivedMessageCallCalled(messageType, receivedMessageCall) + } } // AddReceivedHeaderHandler - @@ -46,32 +48,49 @@ func (sposWorkerMock *SposWorkerMock) AddReceivedHeaderHandler(handler func(data // RemoveAllReceivedMessagesCalls - func (sposWorkerMock *SposWorkerMock) RemoveAllReceivedMessagesCalls() { - sposWorkerMock.RemoveAllReceivedMessagesCallsCalled() + if sposWorkerMock.RemoveAllReceivedMessagesCallsCalled != nil { + sposWorkerMock.RemoveAllReceivedMessagesCallsCalled() + } } // ProcessReceivedMessage - func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { - return sposWorkerMock.ProcessReceivedMessageCalled(message) + if sposWorkerMock.ProcessReceivedMessageCalled == nil { + return sposWorkerMock.ProcessReceivedMessageCalled(message) + } + return nil } // SendConsensusMessage - func (sposWorkerMock *SposWorkerMock) SendConsensusMessage(cnsDta *consensus.Message) bool { - return sposWorkerMock.SendConsensusMessageCalled(cnsDta) + if sposWorkerMock.SendConsensusMessageCalled != nil { + return sposWorkerMock.SendConsensusMessageCalled(cnsDta) + } + return false } // Extend - func (sposWorkerMock *SposWorkerMock) Extend(subroundId int) { - sposWorkerMock.ExtendCalled(subroundId) + if sposWorkerMock.ExtendCalled != nil { + sposWorkerMock.ExtendCalled(subroundId) + } } // GetConsensusStateChangedChannel - func (sposWorkerMock *SposWorkerMock) GetConsensusStateChangedChannel() chan bool { - return sposWorkerMock.GetConsensusStateChangedChannelsCalled() + if sposWorkerMock.GetConsensusStateChangedChannelsCalled != nil { + return sposWorkerMock.GetConsensusStateChangedChannelsCalled() + } + + return nil } // BroadcastBlock - func (sposWorkerMock *SposWorkerMock) BroadcastBlock(body data.BodyHandler, header data.HeaderHandler) error { - return sposWorkerMock.GetBroadcastBlockCalled(body, header) + if sposWorkerMock.GetBroadcastBlockCalled != nil { + return sposWorkerMock.GetBroadcastBlockCalled(body, header) + } + return nil } // ExecuteStoredMessages - From c8648e7bc97bb1df6c1d4a0b00a6a03430615c0e Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Mon, 30 Sep 2024 18:21:57 +0300 Subject: [PATCH 28/30] add more unit tests for the consensus proxy --- consensus/spos/bls/proxy/subroundsHandler.go | 5 + .../spos/bls/proxy/subroundsHandler_test.go | 124 ++++++++++++++++++ 2 files changed, 129 insertions(+) diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go index 63991781911..79924ccc12d 100644 --- a/consensus/spos/bls/proxy/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -179,6 +179,11 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { // EpochStartAction is called when the epoch starts func (s *SubroundsHandler) EpochStartAction(hdr data.HeaderHandler) { + if check.IfNil(hdr) { + log.Error("SubroundsHandler.EpochStartAction: nil header") + return + } + err := s.initSubroundsForEpoch(hdr.GetEpoch()) if err != nil { log.Error("SubroundsHandler.EpochStartAction: cannot initialize subrounds", "error", err) diff --git a/consensus/spos/bls/proxy/subroundsHandler_test.go b/consensus/spos/bls/proxy/subroundsHandler_test.go index 25118cfc45c..d9e3af58096 100644 --- a/consensus/spos/bls/proxy/subroundsHandler_test.go +++ b/consensus/spos/bls/proxy/subroundsHandler_test.go @@ -8,6 +8,7 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/stretchr/testify/require" + chainCommon "github.com/multiversx/mx-chain-go/common" mock2 "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" @@ -317,3 +318,126 @@ func TestSubroundsHandler_initSubroundsForEpoch(t *testing.T) { require.Equal(t, int32(0), startCalled.Load()) }) } + +func TestSubroundsHandler_Start(t *testing.T) { + t.Parallel() + + // the Start is tested via initSubroundsForEpoch, adding one of the test cases here as well + t.Run("equivalent messages not enabled, with previous consensus type not ConsensusV1", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return false + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = ConsensusNone + + err = sh.Start(0) + require.Nil(t, err) + require.Equal(t, ConsensusV1, sh.currentConsensusType) + require.Equal(t, int32(1), startCalled.Load()) + }) +} + +func TestSubroundsHandler_NotifyOrder(t *testing.T) { + t.Parallel() + + t.Run("OK", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + + order := sh.NotifyOrder() + require.Equal(t, uint32(chainCommon.ConsensusHandlerOrder), order) + }) +} + +func TestSubroundsHandler_IsInterfaceNil(t *testing.T) { + t.Parallel() + + t.Run("nil handler", func(t *testing.T) { + t.Parallel() + + var sh *SubroundsHandler + require.True(t, sh.IsInterfaceNil()) + }) + t.Run("not nil handler", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + + require.False(t, sh.IsInterfaceNil()) + }) +} + +func TestSubroundsHandler_EpochStartAction(t *testing.T) { + t.Parallel() + + t.Run("nil handler does not panic", func(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r != nil { + t.Errorf("The code panicked") + } + }() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + sh.EpochStartAction(&testscommon.HeaderHandlerStub{}) + }) + + // tested through initSubroundsForEpoch + t.Run("OK", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return false + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + + sh.currentConsensusType = ConsensusNone + sh.EpochStartAction(&testscommon.HeaderHandlerStub{}) + require.Nil(t, err) + require.Equal(t, ConsensusV1, sh.currentConsensusType) + require.Equal(t, int32(1), startCalled.Load()) + }) +} From 492041c93b8d64106f4339ab1b303ac3fd202b4e Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Tue, 1 Oct 2024 18:37:38 +0300 Subject: [PATCH 29/30] fixes after review --- consensus/spos/bls/constants.go | 1 + consensus/spos/bls/proxy/subroundsHandler.go | 93 ++++++++++--------- .../spos/bls/proxy/subroundsHandler_test.go | 50 +++++----- consensus/spos/bls/v1/export_test.go | 1 + .../consensus/initializers/initializers.go | 6 ++ 5 files changed, 82 insertions(+), 69 deletions(-) diff --git a/consensus/spos/bls/constants.go b/consensus/spos/bls/constants.go index 4b93cae65be..88667da3003 100644 --- a/consensus/spos/bls/constants.go +++ b/consensus/spos/bls/constants.go @@ -56,6 +56,7 @@ const ( BlockDefaultStringValue = "Undefined message type" ) +// GetStringValue returns the string value of a given MessageType func GetStringValue(msgType consensus.MessageType) string { switch msgType { case MtBlockBodyAndHeader: diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go index 79924ccc12d..b15f53b03c9 100644 --- a/consensus/spos/bls/proxy/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -32,14 +32,14 @@ type SubroundsHandlerArgs struct { CurrentPid core.PeerID } -// SubroundsFactory defines the methods needed to generate the subrounds -type SubroundsFactory interface { +// subroundsFactory defines the methods needed to generate the subrounds +type subroundsFactory interface { GenerateSubrounds() error SetOutportHandler(driver outport.OutportHandler) IsInterfaceNil() bool } -type ConsensusStateMachineType int +type consensusStateMachineType int // SubroundsHandler struct contains the needed data for the SubroundsHandler type SubroundsHandler struct { @@ -54,68 +54,77 @@ type SubroundsHandler struct { enableEpochsHandler core.EnableEpochsHandler chainID []byte currentPid core.PeerID - currentConsensusType ConsensusStateMachineType + currentConsensusType consensusStateMachineType } const ( - ConsensusNone ConsensusStateMachineType = iota - ConsensusV1 - ConsensusV2 + consensusNone consensusStateMachineType = iota + consensusV1 + consensusV2 ) +// NewSubroundsHandler creates a new SubroundsHandler object func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) { + err := checkArgs(args) + if err != nil { + return nil, err + } + + subroundHandler := &SubroundsHandler{ + chronology: args.Chronology, + consensusCoreHandler: args.ConsensusCoreHandler, + consensusState: args.ConsensusState, + worker: args.Worker, + signatureThrottler: args.SignatureThrottler, + appStatusHandler: args.AppStatusHandler, + outportHandler: args.OutportHandler, + sentSignatureTracker: args.SentSignatureTracker, + enableEpochsHandler: args.EnableEpochsHandler, + chainID: args.ChainID, + currentPid: args.CurrentPid, + currentConsensusType: consensusNone, + } + + subroundHandler.consensusCoreHandler.EpochStartRegistrationHandler().RegisterHandler(subroundHandler) + + return subroundHandler, nil +} + +func checkArgs(args *SubroundsHandlerArgs) error { if check.IfNil(args.Chronology) { - return nil, ErrNilChronologyHandler + return ErrNilChronologyHandler } if check.IfNil(args.ConsensusCoreHandler) { - return nil, ErrNilConsensusCoreHandler + return ErrNilConsensusCoreHandler } if check.IfNil(args.ConsensusState) { - return nil, ErrNilConsensusState + return ErrNilConsensusState } if check.IfNil(args.Worker) { - return nil, ErrNilWorker + return ErrNilWorker } if check.IfNil(args.SignatureThrottler) { - return nil, ErrNilSignatureThrottler + return ErrNilSignatureThrottler } if check.IfNil(args.AppStatusHandler) { - return nil, ErrNilAppStatusHandler + return ErrNilAppStatusHandler } if check.IfNil(args.OutportHandler) { - return nil, ErrNilOutportHandler + return ErrNilOutportHandler } if check.IfNil(args.SentSignatureTracker) { - return nil, ErrNilSentSignatureTracker + return ErrNilSentSignatureTracker } if check.IfNil(args.EnableEpochsHandler) { - return nil, ErrNilEnableEpochsHandler + return ErrNilEnableEpochsHandler } if args.ChainID == nil { - return nil, ErrNilChainID + return ErrNilChainID } if len(args.CurrentPid) == 0 { - return nil, ErrNilCurrentPid - } - - subroundHandler := &SubroundsHandler{ - chronology: args.Chronology, - consensusCoreHandler: args.ConsensusCoreHandler, - consensusState: args.ConsensusState, - worker: args.Worker, - signatureThrottler: args.SignatureThrottler, - appStatusHandler: args.AppStatusHandler, - outportHandler: args.OutportHandler, - sentSignatureTracker: args.SentSignatureTracker, - enableEpochsHandler: args.EnableEpochsHandler, - chainID: args.ChainID, - currentPid: args.CurrentPid, - currentConsensusType: ConsensusNone, + return ErrNilCurrentPid } - - subroundHandler.consensusCoreHandler.EpochStartRegistrationHandler().RegisterHandler(subroundHandler) - - return subroundHandler, nil + return nil } // Start starts the sub-rounds handler @@ -125,13 +134,13 @@ func (s *SubroundsHandler) Start(epoch uint32) error { func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { var err error - var fct SubroundsFactory + var fct subroundsFactory if s.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, epoch) { - if s.currentConsensusType == ConsensusV2 { + if s.currentConsensusType == consensusV2 { return nil } - s.currentConsensusType = ConsensusV2 + s.currentConsensusType = consensusV2 fct, err = v2.NewSubroundsFactory( s.consensusCoreHandler, s.consensusState, @@ -143,11 +152,11 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { s.signatureThrottler, ) } else { - if s.currentConsensusType == ConsensusV1 { + if s.currentConsensusType == consensusV1 { return nil } - s.currentConsensusType = ConsensusV1 + s.currentConsensusType = consensusV1 fct, err = v1.NewSubroundsFactory( s.consensusCoreHandler, s.consensusState, diff --git a/consensus/spos/bls/proxy/subroundsHandler_test.go b/consensus/spos/bls/proxy/subroundsHandler_test.go index d9e3af58096..148e9bc2fd7 100644 --- a/consensus/spos/bls/proxy/subroundsHandler_test.go +++ b/consensus/spos/bls/proxy/subroundsHandler_test.go @@ -198,7 +198,7 @@ func TestNewSubroundsHandler(t *testing.T) { func TestSubroundsHandler_initSubroundsForEpoch(t *testing.T) { t.Parallel() - t.Run("equivalent messages not enabled, with previous consensus type not ConsensusV1", func(t *testing.T) { + t.Run("equivalent messages not enabled, with previous consensus type not consensusV1", func(t *testing.T) { t.Parallel() startCalled := atomic.Int32{} @@ -221,14 +221,14 @@ func TestSubroundsHandler_initSubroundsForEpoch(t *testing.T) { sh, err := NewSubroundsHandler(handlerArgs) require.Nil(t, err) require.NotNil(t, sh) - sh.currentConsensusType = ConsensusNone + sh.currentConsensusType = consensusNone err = sh.initSubroundsForEpoch(0) require.Nil(t, err) - require.Equal(t, ConsensusV1, sh.currentConsensusType) + require.Equal(t, consensusV1, sh.currentConsensusType) require.Equal(t, int32(1), startCalled.Load()) }) - t.Run("equivalent messages not enabled, with previous consensus type ConsensusV1", func(t *testing.T) { + t.Run("equivalent messages not enabled, with previous consensus type consensusV1", func(t *testing.T) { t.Parallel() startCalled := atomic.Int32{} @@ -251,14 +251,14 @@ func TestSubroundsHandler_initSubroundsForEpoch(t *testing.T) { sh, err := NewSubroundsHandler(handlerArgs) require.Nil(t, err) require.NotNil(t, sh) - sh.currentConsensusType = ConsensusV1 + sh.currentConsensusType = consensusV1 err = sh.initSubroundsForEpoch(0) require.Nil(t, err) - require.Equal(t, ConsensusV1, sh.currentConsensusType) + require.Equal(t, consensusV1, sh.currentConsensusType) require.Equal(t, int32(0), startCalled.Load()) }) - t.Run("equivalent messages enabled, with previous consensus type not ConsensusV2", func(t *testing.T) { + t.Run("equivalent messages enabled, with previous consensus type not consensusV2", func(t *testing.T) { t.Parallel() startCalled := atomic.Int32{} handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() @@ -280,14 +280,14 @@ func TestSubroundsHandler_initSubroundsForEpoch(t *testing.T) { sh, err := NewSubroundsHandler(handlerArgs) require.Nil(t, err) require.NotNil(t, sh) - sh.currentConsensusType = ConsensusNone + sh.currentConsensusType = consensusNone err = sh.initSubroundsForEpoch(0) require.Nil(t, err) - require.Equal(t, ConsensusV2, sh.currentConsensusType) + require.Equal(t, consensusV2, sh.currentConsensusType) require.Equal(t, int32(1), startCalled.Load()) }) - t.Run("equivalent messages enabled, with previous consensus type ConsensusV2", func(t *testing.T) { + t.Run("equivalent messages enabled, with previous consensus type consensusV2", func(t *testing.T) { t.Parallel() startCalled := atomic.Int32{} @@ -310,11 +310,11 @@ func TestSubroundsHandler_initSubroundsForEpoch(t *testing.T) { sh, err := NewSubroundsHandler(handlerArgs) require.Nil(t, err) require.NotNil(t, sh) - sh.currentConsensusType = ConsensusV2 + sh.currentConsensusType = consensusV2 err = sh.initSubroundsForEpoch(0) require.Nil(t, err) - require.Equal(t, ConsensusV2, sh.currentConsensusType) + require.Equal(t, consensusV2, sh.currentConsensusType) require.Equal(t, int32(0), startCalled.Load()) }) } @@ -323,7 +323,7 @@ func TestSubroundsHandler_Start(t *testing.T) { t.Parallel() // the Start is tested via initSubroundsForEpoch, adding one of the test cases here as well - t.Run("equivalent messages not enabled, with previous consensus type not ConsensusV1", func(t *testing.T) { + t.Run("equivalent messages not enabled, with previous consensus type not consensusV1", func(t *testing.T) { t.Parallel() startCalled := atomic.Int32{} @@ -346,11 +346,11 @@ func TestSubroundsHandler_Start(t *testing.T) { sh, err := NewSubroundsHandler(handlerArgs) require.Nil(t, err) require.NotNil(t, sh) - sh.currentConsensusType = ConsensusNone + sh.currentConsensusType = consensusNone err = sh.Start(0) require.Nil(t, err) - require.Equal(t, ConsensusV1, sh.currentConsensusType) + require.Equal(t, consensusV1, sh.currentConsensusType) require.Equal(t, int32(1), startCalled.Load()) }) } @@ -358,17 +358,13 @@ func TestSubroundsHandler_Start(t *testing.T) { func TestSubroundsHandler_NotifyOrder(t *testing.T) { t.Parallel() - t.Run("OK", func(t *testing.T) { - t.Parallel() - - handlerArgs, _ := getDefaultArgumentsSubroundHandler() - sh, err := NewSubroundsHandler(handlerArgs) - require.Nil(t, err) - require.NotNil(t, sh) + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) - order := sh.NotifyOrder() - require.Equal(t, uint32(chainCommon.ConsensusHandlerOrder), order) - }) + order := sh.NotifyOrder() + require.Equal(t, uint32(chainCommon.ConsensusHandlerOrder), order) } func TestSubroundsHandler_IsInterfaceNil(t *testing.T) { @@ -434,10 +430,10 @@ func TestSubroundsHandler_EpochStartAction(t *testing.T) { require.Nil(t, err) require.NotNil(t, sh) - sh.currentConsensusType = ConsensusNone + sh.currentConsensusType = consensusNone sh.EpochStartAction(&testscommon.HeaderHandlerStub{}) require.Nil(t, err) - require.Equal(t, ConsensusV1, sh.currentConsensusType) + require.Equal(t, consensusV1, sh.currentConsensusType) require.Equal(t, int32(1), startCalled.Load()) }) } diff --git a/consensus/spos/bls/v1/export_test.go b/consensus/spos/bls/v1/export_test.go index 3ef8b963d2e..4a386a57933 100644 --- a/consensus/spos/bls/v1/export_test.go +++ b/consensus/spos/bls/v1/export_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) +// ProcessingThresholdPercent exports the internal processingThresholdPercent const ProcessingThresholdPercent = processingThresholdPercent // factory diff --git a/testscommon/consensus/initializers/initializers.go b/testscommon/consensus/initializers/initializers.go index 30bb88b44cb..aa3381281de 100644 --- a/testscommon/consensus/initializers/initializers.go +++ b/testscommon/consensus/initializers/initializers.go @@ -18,6 +18,7 @@ func createEligibleList(size int) []string { return eligibleList } +// CreateEligibleListFromMap creates a list of eligible nodes from a map of private keys func CreateEligibleListFromMap(mapKeys map[string]crypto.PrivateKey) []string { eligibleList := make([]string, 0, len(mapKeys)) for key := range mapKeys { @@ -27,18 +28,22 @@ func CreateEligibleListFromMap(mapKeys map[string]crypto.PrivateKey) []string { return eligibleList } +// InitConsensusStateWithNodesCoordinator creates a consensus state with a nodes coordinator func InitConsensusStateWithNodesCoordinator(validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { return initConsensusStateWithKeysHandlerAndNodesCoordinator(&testscommon.KeysHandlerStub{}, validatorsGroupSelector) } +// InitConsensusState creates a consensus state func InitConsensusState() *spos.ConsensusState { return InitConsensusStateWithKeysHandler(&testscommon.KeysHandlerStub{}) } +// InitConsensusStateWithArgs creates a consensus state the given arguments func InitConsensusStateWithArgs(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { return initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler, mapKeys) } +// InitConsensusStateWithKeysHandler creates a consensus state with a keys handler func InitConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos.ConsensusState { consensusGroupSize := 9 return initConsensusStateWithKeysHandlerWithGroupSize(keysHandler, consensusGroupSize) @@ -53,6 +58,7 @@ func initConsensusStateWithKeysHandlerAndNodesCoordinator(keysHandler consensus. return createConsensusStateWithNodes(eligibleNodesPubKeys, consensusValidators, leader, keysHandler) } +// InitConsensusStateWithArgsVerifySignature creates a consensus state with the given arguments for signature verification func InitConsensusStateWithArgsVerifySignature(keysHandler consensus.KeysHandler, keys []string) *spos.ConsensusState { numberOfKeys := len(keys) eligibleNodesPubKeys := make(map[string]struct{}, numberOfKeys) From 316379955ecee64e9d7ebdb4f0504e06d94877d4 Mon Sep 17 00:00:00 2001 From: Adrian Dobrita Date: Wed, 2 Oct 2024 18:08:04 +0300 Subject: [PATCH 30/30] add outport handler on factory constructor --- consensus/spos/bls/proxy/subroundsHandler.go | 5 ++++- consensus/spos/bls/v1/blsSubroundsFactory.go | 3 +++ .../spos/bls/v1/blsSubroundsFactory_test.go | 18 ++++++++++++++++++ consensus/spos/bls/v2/blsSubroundsFactory.go | 3 +++ .../spos/bls/v2/blsSubroundsFactory_test.go | 19 +++++++++++++++++++ 5 files changed, 47 insertions(+), 1 deletion(-) diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go index b15f53b03c9..2b284db5144 100644 --- a/consensus/spos/bls/proxy/subroundsHandler.go +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -124,6 +124,8 @@ func checkArgs(args *SubroundsHandlerArgs) error { if len(args.CurrentPid) == 0 { return ErrNilCurrentPid } + // outport handler can be nil if not configured so no need to check it + return nil } @@ -150,6 +152,7 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { s.appStatusHandler, s.sentSignatureTracker, s.signatureThrottler, + s.outportHandler, ) } else { if s.currentConsensusType == consensusV1 { @@ -165,6 +168,7 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { s.currentPid, s.appStatusHandler, s.sentSignatureTracker, + s.outportHandler, ) } if err != nil { @@ -176,7 +180,6 @@ func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { log.Warn("SubroundsHandler.initSubroundsForEpoch: cannot close the chronology", "error", err) } - fct.SetOutportHandler(s.outportHandler) err = fct.GenerateSubrounds() if err != nil { return err diff --git a/consensus/spos/bls/v1/blsSubroundsFactory.go b/consensus/spos/bls/v1/blsSubroundsFactory.go index 12cb0c59982..70915c5f30b 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory.go @@ -34,7 +34,9 @@ func NewSubroundsFactory( currentPid core.PeerID, appStatusHandler core.AppStatusHandler, sentSignaturesTracker spos.SentSignaturesTracker, + outportHandler outport.OutportHandler, ) (*factory, error) { + // no need to check the outportHandler, it can be nil err := checkNewFactoryParams( consensusDataContainer, consensusState, @@ -55,6 +57,7 @@ func NewSubroundsFactory( chainID: chainID, currentPid: currentPid, sentSignaturesTracker: sentSignaturesTracker, + outportHandler: outportHandler, } return &fct, nil diff --git a/consensus/spos/bls/v1/blsSubroundsFactory_test.go b/consensus/spos/bls/v1/blsSubroundsFactory_test.go index 280c0c74bf3..f057daae16f 100644 --- a/consensus/spos/bls/v1/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v1/blsSubroundsFactory_test.go @@ -81,6 +81,7 @@ func initFactoryWithContainer(container *consensusMock.ConsensusCoreMock) v1.Fac currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) return fct @@ -130,6 +131,7 @@ func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -150,6 +152,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -172,6 +175,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -194,6 +198,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -216,6 +221,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -238,6 +244,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -260,6 +267,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -282,6 +290,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -304,6 +313,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -326,6 +336,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -348,6 +359,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -370,6 +382,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -392,6 +405,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -412,6 +426,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -433,6 +448,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { currentPid, nil, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) @@ -454,6 +470,7 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, nil, + nil, ) assert.Nil(t, fct) @@ -483,6 +500,7 @@ func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, + nil, ) assert.Nil(t, fct) diff --git a/consensus/spos/bls/v2/blsSubroundsFactory.go b/consensus/spos/bls/v2/blsSubroundsFactory.go index 756cf1956f7..52baeb375c2 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory.go @@ -36,7 +36,9 @@ func NewSubroundsFactory( appStatusHandler core.AppStatusHandler, sentSignaturesTracker spos.SentSignaturesTracker, signatureThrottler core.Throttler, + outportHandler outport.OutportHandler, ) (*factory, error) { + // no need to check the outport handler, it can be nil err := checkNewFactoryParams( consensusDataContainer, consensusState, @@ -59,6 +61,7 @@ func NewSubroundsFactory( currentPid: currentPid, sentSignaturesTracker: sentSignaturesTracker, signatureThrottler: signatureThrottler, + outportHandler: outportHandler, } return &fct, nil diff --git a/consensus/spos/bls/v2/blsSubroundsFactory_test.go b/consensus/spos/bls/v2/blsSubroundsFactory_test.go index 89fd8406c7c..bfafd967169 100644 --- a/consensus/spos/bls/v2/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory_test.go @@ -71,6 +71,7 @@ func initFactoryWithContainer(container *testscommonConsensus.ConsensusCoreMock) &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) return fct @@ -121,6 +122,7 @@ func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -142,6 +144,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -165,6 +168,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -188,6 +192,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -211,6 +216,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -234,6 +240,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -257,6 +264,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -280,6 +288,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -303,6 +312,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -326,6 +336,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -349,6 +360,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -372,6 +384,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -395,6 +408,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -416,6 +430,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -438,6 +453,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { nil, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -460,6 +476,7 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, nil, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -482,6 +499,7 @@ func TestFactory_NewFactoryNilThrottlerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, nil, + nil, ) assert.Nil(t, fct) @@ -512,6 +530,7 @@ func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct)