diff --git a/common/constants.go b/common/constants.go index 98473acfd9f..4f9ac681316 100644 --- a/common/constants.go +++ b/common/constants.go @@ -843,8 +843,10 @@ const ( ChainParametersOrder // NodesCoordinatorOrder defines the order in which NodesCoordinator is notified of a start of epoch event NodesCoordinatorOrder - // ConsensusOrder defines the order in which Consensus is notified of a start of epoch event - ConsensusOrder + // ConsensusHandlerOrder defines the order in which ConsensusHandler is notified of a start of epoch event + ConsensusHandlerOrder + // ConsensusStartRoundOrder defines the order in which Consensus StartRound subround is notified of a start of epoch event + ConsensusStartRoundOrder // NetworkShardingOrder defines the order in which the network sharding subsystem is notified of a start of epoch event NetworkShardingOrder // IndexerOrder defines the order in which indexer is notified of a start of epoch event diff --git a/consensus/chronology/chronology_test.go b/consensus/chronology/chronology_test.go index 3f57da37f9b..c14a5be13e5 100644 --- a/consensus/chronology/chronology_test.go +++ b/consensus/chronology/chronology_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/chronology" "github.com/multiversx/mx-chain-go/consensus/mock" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -117,7 +118,7 @@ func TestChronology_StartRoundShouldReturnWhenRoundIndexIsNegative(t *testing.T) t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.IndexCalled = func() int64 { return -1 } @@ -151,7 +152,7 @@ func TestChronology_StartRoundShouldReturnWhenDoWorkReturnsFalse(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock chr, _ := chronology.NewChronology(arg) @@ -168,7 +169,7 @@ func TestChronology_StartRoundShouldWork(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock chr, _ := chronology.NewChronology(arg) @@ -221,7 +222,7 @@ func TestChronology_InitRoundShouldNotSetSubroundWhenRoundIndexIsNegative(t *tes t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -242,7 +243,7 @@ func TestChronology_InitRoundShouldSetSubroundWhenRoundIndexIsPositive(t *testin t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.UpdateRound(roundHandlerMock.TimeStamp(), roundHandlerMock.TimeStamp().Add(roundHandlerMock.TimeDuration())) arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() @@ -259,7 +260,7 @@ func TestChronology_StartRoundShouldNotUpdateRoundWhenCurrentRoundIsNotFinished( t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -273,7 +274,7 @@ func TestChronology_StartRoundShouldNotUpdateRoundWhenCurrentRoundIsNotFinished( func TestChronology_StartRoundShouldUpdateRoundWhenCurrentRoundIsFinished(t *testing.T) { t.Parallel() arg := getDefaultChronologyArg() - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} arg.RoundHandler = roundHandlerMock arg.GenesisTime = arg.SyncTimer.CurrentTime() chr, _ := chronology.NewChronology(arg) @@ -317,8 +318,8 @@ func TestChronology_CheckIfStatusHandlerWorks(t *testing.T) { func getDefaultChronologyArg() chronology.ArgChronology { return chronology.ArgChronology{ GenesisTime: time.Now(), - RoundHandler: &mock.RoundHandlerMock{}, - SyncTimer: &mock.SyncTimerMock{}, + RoundHandler: &consensusMocks.RoundHandlerMock{}, + SyncTimer: &consensusMocks.SyncTimerMock{}, AppStatusHandler: statusHandlerMock.NewAppStatusHandlerMock(), Watchdog: &mock.WatchdogMock{}, } diff --git a/consensus/interface.go b/consensus/interface.go index cd05efeadaa..95df29736ed 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/p2p" ) diff --git a/consensus/round/round_test.go b/consensus/round/round_test.go index ede509d7176..ec1f08ec82d 100644 --- a/consensus/round/round_test.go +++ b/consensus/round/round_test.go @@ -5,8 +5,10 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/round" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/stretchr/testify/assert" ) @@ -28,7 +30,7 @@ func TestRound_NewRoundShouldWork(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, err := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) @@ -41,7 +43,7 @@ func TestRound_UpdateRoundShouldNotChangeAnything(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) oldIndex := rnd.Index() @@ -61,7 +63,7 @@ func TestRound_UpdateRoundShouldAdvanceOneRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) oldIndex := rnd.Index() @@ -76,7 +78,7 @@ func TestRound_IndexShouldReturnFirstIndex(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) rnd.UpdateRound(genesisTime, genesisTime.Add(roundTimeDuration/2)) @@ -90,7 +92,7 @@ func TestRound_TimeStampShouldReturnTimeStampOfTheNextRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) rnd.UpdateRound(genesisTime, genesisTime.Add(roundTimeDuration+roundTimeDuration/2)) @@ -104,7 +106,7 @@ func TestRound_TimeDurationShouldReturnTheDurationOfOneRound(t *testing.T) { genesisTime := time.Now() - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} rnd, _ := round.NewRound(genesisTime, genesisTime, roundTimeDuration, syncTimerMock, 0) timeDuration := rnd.TimeDuration() @@ -117,7 +119,7 @@ func TestRound_RemainingTimeInCurrentRoundShouldReturnPositiveValue(t *testing.T genesisTime := time.Unix(0, 0) - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} timeElapsed := int64(roundTimeDuration - 1) @@ -138,7 +140,7 @@ func TestRound_RemainingTimeInCurrentRoundShouldReturnNegativeValue(t *testing.T genesisTime := time.Unix(0, 0) - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} timeElapsed := int64(roundTimeDuration + 1) diff --git a/consensus/spos/bls/blsWorker.go b/consensus/spos/bls/blsWorker.go index 456d4e8b1d8..b8ceffe9122 100644 --- a/consensus/spos/bls/blsWorker.go +++ b/consensus/spos/bls/blsWorker.go @@ -5,7 +5,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos" ) -// peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by +// PeerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by // following the next premises: // 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; // 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round @@ -16,15 +16,15 @@ import ( // // Validators only send one signature message in a round, treating the edge case of a delayed message, will need at most // 2 messages per round (which is ok as it is below the set value of 5) -const peerMaxMessagesPerSec = uint32(6) +const PeerMaxMessagesPerSec = uint32(6) -// defaultMaxNumOfMessageTypeAccepted represents the maximum number of the same message type accepted in one round to be +// DefaultMaxNumOfMessageTypeAccepted represents the maximum number of the same message type accepted in one round to be // received from the same public key for the default message types -const defaultMaxNumOfMessageTypeAccepted = uint32(1) +const DefaultMaxNumOfMessageTypeAccepted = uint32(1) -// maxNumOfMessageTypeSignatureAccepted represents the maximum number of the signature message type accepted in one round to be +// MaxNumOfMessageTypeSignatureAccepted represents the maximum number of the signature message type accepted in one round to be // received from the same public key -const maxNumOfMessageTypeSignatureAccepted = uint32(2) +const MaxNumOfMessageTypeSignatureAccepted = uint32(2) // worker defines the data needed by spos to communicate between nodes which are in the validators group type worker struct { @@ -52,17 +52,17 @@ func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus // GetMaxMessagesInARoundPerPeer returns the maximum number of messages a peer can send per round for BLS func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { - return peerMaxMessagesPerSec + return PeerMaxMessagesPerSec } // GetStringValue gets the name of the messageType func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { - return getStringValue(messageType) + return GetStringValue(messageType) } // GetSubroundName gets the subround name for the subround id provided func (wrk *worker) GetSubroundName(subroundId int) string { - return getSubroundName(subroundId) + return GetSubroundName(subroundId) } // IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header @@ -151,10 +151,10 @@ func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType conse // GetMaxNumOfMessageTypeAccepted returns the maximum number of accepted consensus message types per round, per public key func (wrk *worker) GetMaxNumOfMessageTypeAccepted(msgType consensus.MessageType) uint32 { if msgType == MtSignature { - return maxNumOfMessageTypeSignatureAccepted + return MaxNumOfMessageTypeSignatureAccepted } - return defaultMaxNumOfMessageTypeAccepted + return DefaultMaxNumOfMessageTypeAccepted } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/bls/blsWorker_test.go b/consensus/spos/bls/blsWorker_test.go index 75cc8f3b412..8d39b02e5f1 100644 --- a/consensus/spos/bls/blsWorker_test.go +++ b/consensus/spos/bls/blsWorker_test.go @@ -4,156 +4,14 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" - crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/stretchr/testify/assert" - "golang.org/x/exp/slices" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" ) -func createEligibleList(size int) []string { - eligibleList := make([]string, 0) - for i := 0; i < size; i++ { - eligibleList = append(eligibleList, string([]byte{byte(i + 65)})) - } - return eligibleList -} - -func createEligibleListFromMap(mapKeys map[string]crypto.PrivateKey) []string { - eligibleList := make([]string, 0, len(mapKeys)) - for key := range mapKeys { - eligibleList = append(eligibleList, key) - } - slices.Sort(eligibleList) - return eligibleList -} - -func initConsensusStateWithNodesCoordinator(validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { - return initConsensusStateWithKeysHandlerAndNodesCoordinator(&testscommon.KeysHandlerStub{}, validatorsGroupSelector) -} - -func initConsensusState() *spos.ConsensusState { - return initConsensusStateWithKeysHandler(&testscommon.KeysHandlerStub{}) -} - -func initConsensusStateWithArgs(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { - return initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler, mapKeys) -} - -func initConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos.ConsensusState { - consensusGroupSize := 9 - return initConsensusStateWithKeysHandlerWithGroupSize(keysHandler, consensusGroupSize) -} - -func initConsensusStateWithKeysHandlerAndNodesCoordinator(keysHandler consensus.KeysHandler, validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { - leader, consensusValidators, _ := validatorsGroupSelector.GetConsensusValidatorsPublicKeys([]byte("randomness"), 0, 0, 0) - eligibleNodesPubKeys := make(map[string]struct{}) - for _, key := range consensusValidators { - eligibleNodesPubKeys[key] = struct{}{} - } - return createConsensusStateWithNodes(eligibleNodesPubKeys, consensusValidators, leader, keysHandler) -} - -func initConsensusStateWithArgsVerifySignature(keysHandler consensus.KeysHandler, keys []string) *spos.ConsensusState { - numberOfKeys := len(keys) - eligibleNodesPubKeys := make(map[string]struct{}, numberOfKeys) - for _, key := range keys { - eligibleNodesPubKeys[key] = struct{}{} - } - - indexLeader := 1 - rcns, _ := spos.NewRoundConsensus( - eligibleNodesPubKeys, - numberOfKeys, - keys[indexLeader], - keysHandler, - ) - rcns.SetConsensusGroup(keys) - rcns.ResetRoundState() - - pBFTThreshold := numberOfKeys*2/3 + 1 - pBFTFallbackThreshold := numberOfKeys*1/2 + 1 - rthr := spos.NewRoundThreshold() - rthr.SetThreshold(1, 1) - rthr.SetThreshold(2, pBFTThreshold) - rthr.SetFallbackThreshold(1, 1) - rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) - - rstatus := spos.NewRoundStatus() - rstatus.ResetRoundStatus() - cns := spos.NewConsensusState( - rcns, - rthr, - rstatus, - ) - cns.Data = []byte("X") - cns.RoundIndex = 0 - - return cns -} - -func initConsensusStateWithKeysHandlerWithGroupSize(keysHandler consensus.KeysHandler, consensusGroupSize int) *spos.ConsensusState { - eligibleList := createEligibleList(consensusGroupSize) - - eligibleNodesPubKeys := make(map[string]struct{}) - for _, key := range eligibleList { - eligibleNodesPubKeys[key] = struct{}{} - } - - return createConsensusStateWithNodes(eligibleNodesPubKeys, eligibleList, eligibleList[0], keysHandler) -} - -func initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { - eligibleList := createEligibleListFromMap(mapKeys) - - eligibleNodesPubKeys := make(map[string]struct{}, len(eligibleList)) - for _, key := range eligibleList { - eligibleNodesPubKeys[key] = struct{}{} - } - - return createConsensusStateWithNodes(eligibleNodesPubKeys, eligibleList, eligibleList[0], keysHandler) -} - -func createConsensusStateWithNodes(eligibleNodesPubKeys map[string]struct{}, consensusValidators []string, leader string, keysHandler consensus.KeysHandler) *spos.ConsensusState { - consensusGroupSize := len(consensusValidators) - rcns, _ := spos.NewRoundConsensus( - eligibleNodesPubKeys, - consensusGroupSize, - consensusValidators[1], - keysHandler, - ) - - rcns.SetConsensusGroup(consensusValidators) - rcns.SetLeader(leader) - rcns.ResetRoundState() - - pBFTThreshold := consensusGroupSize*2/3 + 1 - pBFTFallbackThreshold := consensusGroupSize*1/2 + 1 - - rthr := spos.NewRoundThreshold() - rthr.SetThreshold(1, 1) - rthr.SetThreshold(2, pBFTThreshold) - rthr.SetFallbackThreshold(1, 1) - rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) - - rstatus := spos.NewRoundStatus() - rstatus.ResetRoundStatus() - - cns := spos.NewConsensusState( - rcns, - rthr, - rstatus, - ) - - cns.Data = []byte("X") - cns.RoundIndex = 0 - return cns -} - func TestWorker_NewConsensusServiceShouldWork(t *testing.T) { t.Parallel() @@ -209,7 +67,7 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyAndHeaderShouldW blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockBodyAndHeader) @@ -221,7 +79,7 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyAndHeaderShou blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockBodyAndHeader) @@ -233,7 +91,7 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockBodyShouldWork(t *te blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockBody) @@ -245,7 +103,7 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockBodyShouldNotWork blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockBody) @@ -257,7 +115,7 @@ func TestWorker_CanProceedWithSrStartRoundFinishedForMtBlockHeaderShouldWork(t * blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeader) @@ -269,7 +127,7 @@ func TestWorker_CanProceedWithSrStartRoundNotFinishedForMtBlockHeaderShouldNotWo blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrStartRound, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeader) @@ -281,7 +139,7 @@ func TestWorker_CanProceedWithSrBlockFinishedForMtBlockHeaderShouldWork(t *testi blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrBlock, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtSignature) @@ -293,7 +151,7 @@ func TestWorker_CanProceedWithSrBlockRoundNotFinishedForMtBlockHeaderShouldNotWo blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrBlock, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtSignature) @@ -305,7 +163,7 @@ func TestWorker_CanProceedWithSrSignatureFinishedForMtBlockHeaderFinalInfoShould blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrSignature, spos.SsFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeaderFinalInfo) @@ -317,7 +175,7 @@ func TestWorker_CanProceedWithSrSignatureRoundNotFinishedForMtBlockHeaderFinalIn blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetStatus(bls.SrSignature, spos.SsNotFinished) canProceed := blsService.CanProceed(consensusState, bls.MtBlockHeaderFinalInfo) @@ -328,7 +186,7 @@ func TestWorker_CanProceedWitUnkownMessageTypeShouldNotWork(t *testing.T) { t.Parallel() blsService, _ := bls.NewConsensusService() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() canProceed := blsService.CanProceed(consensusState, -1) assert.False(t, canProceed) diff --git a/consensus/spos/bls/constants.go b/consensus/spos/bls/constants.go index 166abe70b65..88667da3003 100644 --- a/consensus/spos/bls/constants.go +++ b/consensus/spos/bls/constants.go @@ -2,11 +2,8 @@ package bls import ( "github.com/multiversx/mx-chain-go/consensus" - logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("consensus/spos/bls") - const ( // SrStartRound defines ID of Subround "Start round" SrStartRound = iota @@ -36,36 +33,6 @@ const ( MtInvalidSigners ) -// waitingAllSigsMaxTimeThreshold specifies the max allocated time for waiting all signatures from the total time of the subround signature -const waitingAllSigsMaxTimeThreshold = 0.5 - -// processingThresholdPercent specifies the max allocated time for processing the block as a percentage of the total time of the round -const processingThresholdPercent = 85 - -// srStartStartTime specifies the start time, from the total time of the round, of Subround Start -const srStartStartTime = 0.0 - -// srEndStartTime specifies the end time, from the total time of the round, of Subround Start -const srStartEndTime = 0.05 - -// srBlockStartTime specifies the start time, from the total time of the round, of Subround Block -const srBlockStartTime = 0.05 - -// srBlockEndTime specifies the end time, from the total time of the round, of Subround Block -const srBlockEndTime = 0.25 - -// srSignatureStartTime specifies the start time, from the total time of the round, of Subround Signature -const srSignatureStartTime = 0.25 - -// srSignatureEndTime specifies the end time, from the total time of the round, of Subround Signature -const srSignatureEndTime = 0.85 - -// srEndStartTime specifies the start time, from the total time of the round, of Subround End -const srEndStartTime = 0.85 - -// srEndEndTime specifies the end time, from the total time of the round, of Subround End -const srEndEndTime = 0.95 - const ( // BlockBodyAndHeaderStringValue represents the string to be used to identify a block body and a block header BlockBodyAndHeaderStringValue = "(BLOCK_BODY_AND_HEADER)" @@ -89,7 +56,8 @@ const ( BlockDefaultStringValue = "Undefined message type" ) -func getStringValue(msgType consensus.MessageType) string { +// GetStringValue returns the string value of a given MessageType +func GetStringValue(msgType consensus.MessageType) string { switch msgType { case MtBlockBodyAndHeader: return BlockBodyAndHeaderStringValue @@ -108,8 +76,8 @@ func getStringValue(msgType consensus.MessageType) string { } } -// getSubroundName returns the name of each Subround from a given Subround ID -func getSubroundName(subroundId int) string { +// GetSubroundName returns the name of each Subround from a given Subround ID +func GetSubroundName(subroundId int) string { switch subroundId { case SrStartRound: return "(START_ROUND)" diff --git a/consensus/spos/bls/proxy/errors.go b/consensus/spos/bls/proxy/errors.go new file mode 100644 index 00000000000..4036ecf1c63 --- /dev/null +++ b/consensus/spos/bls/proxy/errors.go @@ -0,0 +1,38 @@ +package proxy + +import ( + "errors" +) + +// ErrNilChronologyHandler is the error returned when the chronology handler is nil +var ErrNilChronologyHandler = errors.New("nil chronology handler") + +// ErrNilConsensusCoreHandler is the error returned when the consensus core handler is nil +var ErrNilConsensusCoreHandler = errors.New("nil consensus core handler") + +// ErrNilConsensusState is the error returned when the consensus state is nil +var ErrNilConsensusState = errors.New("nil consensus state") + +// ErrNilWorker is the error returned when the worker is nil +var ErrNilWorker = errors.New("nil worker") + +// ErrNilSignatureThrottler is the error returned when the signature throttler is nil +var ErrNilSignatureThrottler = errors.New("nil signature throttler") + +// ErrNilAppStatusHandler is the error returned when the app status handler is nil +var ErrNilAppStatusHandler = errors.New("nil app status handler") + +// ErrNilOutportHandler is the error returned when the outport handler is nil +var ErrNilOutportHandler = errors.New("nil outport handler") + +// ErrNilSentSignatureTracker is the error returned when the sent signature tracker is nil +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") + +// ErrNilChainID is the error returned when the chain ID is nil +var ErrNilChainID = errors.New("nil chain ID") + +// ErrNilCurrentPid is the error returned when the current PID is nil +var ErrNilCurrentPid = errors.New("nil current PID") + +// ErrNilEnableEpochsHandler is the error returned when the enable epochs handler is nil +var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") diff --git a/consensus/spos/bls/proxy/subroundsHandler.go b/consensus/spos/bls/proxy/subroundsHandler.go new file mode 100644 index 00000000000..2b284db5144 --- /dev/null +++ b/consensus/spos/bls/proxy/subroundsHandler.go @@ -0,0 +1,217 @@ +package proxy + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + logger "github.com/multiversx/mx-chain-logger-go" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/outport" +) + +var log = logger.GetOrCreate("consensus/spos/bls/proxy") + +// SubroundsHandlerArgs struct contains the needed data for the SubroundsHandler +type SubroundsHandlerArgs struct { + Chronology consensus.ChronologyHandler + ConsensusCoreHandler spos.ConsensusCoreHandler + ConsensusState spos.ConsensusStateHandler + Worker factory.ConsensusWorker + SignatureThrottler core.Throttler + AppStatusHandler core.AppStatusHandler + OutportHandler outport.OutportHandler + SentSignatureTracker spos.SentSignaturesTracker + EnableEpochsHandler core.EnableEpochsHandler + ChainID []byte + CurrentPid core.PeerID +} + +// subroundsFactory defines the methods needed to generate the subrounds +type subroundsFactory interface { + GenerateSubrounds() error + SetOutportHandler(driver outport.OutportHandler) + IsInterfaceNil() bool +} + +type consensusStateMachineType int + +// SubroundsHandler struct contains the needed data for the SubroundsHandler +type SubroundsHandler struct { + chronology consensus.ChronologyHandler + consensusCoreHandler spos.ConsensusCoreHandler + consensusState spos.ConsensusStateHandler + worker factory.ConsensusWorker + signatureThrottler core.Throttler + appStatusHandler core.AppStatusHandler + outportHandler outport.OutportHandler + sentSignatureTracker spos.SentSignaturesTracker + enableEpochsHandler core.EnableEpochsHandler + chainID []byte + currentPid core.PeerID + currentConsensusType consensusStateMachineType +} + +const ( + consensusNone consensusStateMachineType = iota + consensusV1 + consensusV2 +) + +// NewSubroundsHandler creates a new SubroundsHandler object +func NewSubroundsHandler(args *SubroundsHandlerArgs) (*SubroundsHandler, error) { + err := checkArgs(args) + if err != nil { + return nil, err + } + + subroundHandler := &SubroundsHandler{ + chronology: args.Chronology, + consensusCoreHandler: args.ConsensusCoreHandler, + consensusState: args.ConsensusState, + worker: args.Worker, + signatureThrottler: args.SignatureThrottler, + appStatusHandler: args.AppStatusHandler, + outportHandler: args.OutportHandler, + sentSignatureTracker: args.SentSignatureTracker, + enableEpochsHandler: args.EnableEpochsHandler, + chainID: args.ChainID, + currentPid: args.CurrentPid, + currentConsensusType: consensusNone, + } + + subroundHandler.consensusCoreHandler.EpochStartRegistrationHandler().RegisterHandler(subroundHandler) + + return subroundHandler, nil +} + +func checkArgs(args *SubroundsHandlerArgs) error { + if check.IfNil(args.Chronology) { + return ErrNilChronologyHandler + } + if check.IfNil(args.ConsensusCoreHandler) { + return ErrNilConsensusCoreHandler + } + if check.IfNil(args.ConsensusState) { + return ErrNilConsensusState + } + if check.IfNil(args.Worker) { + return ErrNilWorker + } + if check.IfNil(args.SignatureThrottler) { + return ErrNilSignatureThrottler + } + if check.IfNil(args.AppStatusHandler) { + return ErrNilAppStatusHandler + } + if check.IfNil(args.OutportHandler) { + return ErrNilOutportHandler + } + if check.IfNil(args.SentSignatureTracker) { + return ErrNilSentSignatureTracker + } + if check.IfNil(args.EnableEpochsHandler) { + return ErrNilEnableEpochsHandler + } + if args.ChainID == nil { + return ErrNilChainID + } + if len(args.CurrentPid) == 0 { + return ErrNilCurrentPid + } + // outport handler can be nil if not configured so no need to check it + + return nil +} + +// Start starts the sub-rounds handler +func (s *SubroundsHandler) Start(epoch uint32) error { + return s.initSubroundsForEpoch(epoch) +} + +func (s *SubroundsHandler) initSubroundsForEpoch(epoch uint32) error { + var err error + var fct subroundsFactory + if s.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, epoch) { + if s.currentConsensusType == consensusV2 { + return nil + } + + s.currentConsensusType = consensusV2 + fct, err = v2.NewSubroundsFactory( + s.consensusCoreHandler, + s.consensusState, + s.worker, + s.chainID, + s.currentPid, + s.appStatusHandler, + s.sentSignatureTracker, + s.signatureThrottler, + s.outportHandler, + ) + } else { + if s.currentConsensusType == consensusV1 { + return nil + } + + s.currentConsensusType = consensusV1 + fct, err = v1.NewSubroundsFactory( + s.consensusCoreHandler, + s.consensusState, + s.worker, + s.chainID, + s.currentPid, + s.appStatusHandler, + s.sentSignatureTracker, + s.outportHandler, + ) + } + if err != nil { + return err + } + + err = s.chronology.Close() + if err != nil { + log.Warn("SubroundsHandler.initSubroundsForEpoch: cannot close the chronology", "error", err) + } + + err = fct.GenerateSubrounds() + if err != nil { + return err + } + + s.chronology.StartRounds() + return nil +} + +// EpochStartAction is called when the epoch starts +func (s *SubroundsHandler) EpochStartAction(hdr data.HeaderHandler) { + if check.IfNil(hdr) { + log.Error("SubroundsHandler.EpochStartAction: nil header") + return + } + + err := s.initSubroundsForEpoch(hdr.GetEpoch()) + if err != nil { + log.Error("SubroundsHandler.EpochStartAction: cannot initialize subrounds", "error", err) + } +} + +// EpochStartPrepare prepares the subrounds handler for the epoch start +func (s *SubroundsHandler) EpochStartPrepare(_ data.HeaderHandler, _ data.BodyHandler) { +} + +// NotifyOrder returns the order of the subrounds handler +func (s *SubroundsHandler) NotifyOrder() uint32 { + return common.ConsensusHandlerOrder +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *SubroundsHandler) IsInterfaceNil() bool { + return s == nil +} diff --git a/consensus/spos/bls/proxy/subroundsHandler_test.go b/consensus/spos/bls/proxy/subroundsHandler_test.go new file mode 100644 index 00000000000..148e9bc2fd7 --- /dev/null +++ b/consensus/spos/bls/proxy/subroundsHandler_test.go @@ -0,0 +1,439 @@ +package proxy + +import ( + "sync/atomic" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/require" + + chainCommon "github.com/multiversx/mx-chain-go/common" + mock2 "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" + "github.com/multiversx/mx-chain-go/testscommon/common" + "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + mock "github.com/multiversx/mx-chain-go/testscommon/epochstartmock" + outportStub "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func getDefaultArgumentsSubroundHandler() (*SubroundsHandlerArgs, *consensus.ConsensusCoreMock) { + x := make(chan bool) + chronology := &consensus.ChronologyHandlerMock{} + epochsEnable := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + epochStartNotifier := &mock.EpochStartNotifierStub{} + consensusState := &consensus.ConsensusStateMock{} + worker := &consensus.SposWorkerMock{ + RemoveAllReceivedMessagesCallsCalled: func() {}, + GetConsensusStateChangedChannelsCalled: func() chan bool { + return x + }, + } + antiFloodHandler := &mock2.P2PAntifloodHandlerStub{} + handlerArgs := &SubroundsHandlerArgs{ + Chronology: chronology, + ConsensusState: consensusState, + Worker: worker, + SignatureThrottler: &common.ThrottlerStub{}, + AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, + OutportHandler: &outportStub.OutportStub{}, + SentSignatureTracker: &testscommon.SentSignatureTrackerStub{}, + EnableEpochsHandler: epochsEnable, + ChainID: []byte("chainID"), + CurrentPid: "peerID", + } + + consensusCore := &consensus.ConsensusCoreMock{} + consensusCore.SetEpochStartNotifier(epochStartNotifier) + consensusCore.SetBlockchain(&testscommon.ChainHandlerStub{}) + consensusCore.SetBlockProcessor(&testscommon.BlockProcessorStub{}) + consensusCore.SetBootStrapper(&bootstrapperStubs.BootstrapperStub{}) + consensusCore.SetBroadcastMessenger(&consensus.BroadcastMessengerMock{}) + consensusCore.SetChronology(chronology) + consensusCore.SetAntifloodHandler(antiFloodHandler) + consensusCore.SetHasher(&testscommon.HasherStub{}) + consensusCore.SetMarshalizer(&testscommon.MarshallerStub{}) + consensusCore.SetMultiSignerContainer(&cryptoMocks.MultiSignerContainerStub{ + GetMultiSignerCalled: func(epoch uint32) (crypto.MultiSigner, error) { + return &cryptoMocks.MultisignerMock{}, nil + }, + }) + consensusCore.SetRoundHandler(&consensus.RoundHandlerMock{}) + consensusCore.SetShardCoordinator(&testscommon.ShardsCoordinatorMock{}) + consensusCore.SetSyncTimer(&testscommon.SyncTimerStub{}) + consensusCore.SetValidatorGroupSelector(&shardingMocks.NodesCoordinatorMock{}) + consensusCore.SetPeerHonestyHandler(&testscommon.PeerHonestyHandlerStub{}) + consensusCore.SetHeaderSigVerifier(&consensus.HeaderSigVerifierMock{}) + consensusCore.SetFallbackHeaderValidator(&testscommon.FallBackHeaderValidatorStub{}) + consensusCore.SetNodeRedundancyHandler(&mock2.NodeRedundancyHandlerStub{}) + consensusCore.SetScheduledProcessor(&consensus.ScheduledProcessorStub{}) + consensusCore.SetMessageSigningHandler(&mock2.MessageSigningHandlerStub{}) + consensusCore.SetPeerBlacklistHandler(&mock2.PeerBlacklistHandlerStub{}) + consensusCore.SetSigningHandler(&consensus.SigningHandlerStub{}) + consensusCore.SetEnableEpochsHandler(epochsEnable) + consensusCore.SetEquivalentProofsPool(&dataRetriever.ProofsPoolMock{}) + handlerArgs.ConsensusCoreHandler = consensusCore + + return handlerArgs, consensusCore +} + +func TestNewSubroundsHandler(t *testing.T) { + t.Parallel() + + t.Run("nil chronology should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.Chronology = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilChronologyHandler, err) + require.Nil(t, sh) + }) + t.Run("nil consensus core should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.ConsensusCoreHandler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilConsensusCoreHandler, err) + require.Nil(t, sh) + }) + t.Run("nil consensus state should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.ConsensusState = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilConsensusState, err) + require.Nil(t, sh) + }) + t.Run("nil worker should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.Worker = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilWorker, err) + require.Nil(t, sh) + }) + t.Run("nil signature throttler should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.SignatureThrottler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilSignatureThrottler, err) + require.Nil(t, sh) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.AppStatusHandler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilAppStatusHandler, err) + require.Nil(t, sh) + }) + t.Run("nil outport handler should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.OutportHandler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilOutportHandler, err) + require.Nil(t, sh) + }) + t.Run("nil sent signature tracker should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.SentSignatureTracker = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilSentSignatureTracker, err) + require.Nil(t, sh) + }) + t.Run("nil enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.EnableEpochsHandler = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilEnableEpochsHandler, err) + require.Nil(t, sh) + }) + t.Run("nil chain ID should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.ChainID = nil + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilChainID, err) + require.Nil(t, sh) + }) + t.Run("empty current PID should error", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + handlerArgs.CurrentPid = "" + sh, err := NewSubroundsHandler(handlerArgs) + require.Equal(t, ErrNilCurrentPid, err) + require.Nil(t, sh) + }) + t.Run("OK", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + }) +} + +func TestSubroundsHandler_initSubroundsForEpoch(t *testing.T) { + t.Parallel() + + t.Run("equivalent messages not enabled, with previous consensus type not consensusV1", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return false + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = consensusNone + + err = sh.initSubroundsForEpoch(0) + require.Nil(t, err) + require.Equal(t, consensusV1, sh.currentConsensusType) + require.Equal(t, int32(1), startCalled.Load()) + }) + t.Run("equivalent messages not enabled, with previous consensus type consensusV1", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return false + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = consensusV1 + + err = sh.initSubroundsForEpoch(0) + require.Nil(t, err) + require.Equal(t, consensusV1, sh.currentConsensusType) + require.Equal(t, int32(0), startCalled.Load()) + }) + t.Run("equivalent messages enabled, with previous consensus type not consensusV2", func(t *testing.T) { + t.Parallel() + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return true + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = consensusNone + + err = sh.initSubroundsForEpoch(0) + require.Nil(t, err) + require.Equal(t, consensusV2, sh.currentConsensusType) + require.Equal(t, int32(1), startCalled.Load()) + }) + t.Run("equivalent messages enabled, with previous consensus type consensusV2", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return true + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = consensusV2 + + err = sh.initSubroundsForEpoch(0) + require.Nil(t, err) + require.Equal(t, consensusV2, sh.currentConsensusType) + require.Equal(t, int32(0), startCalled.Load()) + }) +} + +func TestSubroundsHandler_Start(t *testing.T) { + t.Parallel() + + // the Start is tested via initSubroundsForEpoch, adding one of the test cases here as well + t.Run("equivalent messages not enabled, with previous consensus type not consensusV1", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return false + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + sh.currentConsensusType = consensusNone + + err = sh.Start(0) + require.Nil(t, err) + require.Equal(t, consensusV1, sh.currentConsensusType) + require.Equal(t, int32(1), startCalled.Load()) + }) +} + +func TestSubroundsHandler_NotifyOrder(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + + order := sh.NotifyOrder() + require.Equal(t, uint32(chainCommon.ConsensusHandlerOrder), order) +} + +func TestSubroundsHandler_IsInterfaceNil(t *testing.T) { + t.Parallel() + + t.Run("nil handler", func(t *testing.T) { + t.Parallel() + + var sh *SubroundsHandler + require.True(t, sh.IsInterfaceNil()) + }) + t.Run("not nil handler", func(t *testing.T) { + t.Parallel() + + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + + require.False(t, sh.IsInterfaceNil()) + }) +} + +func TestSubroundsHandler_EpochStartAction(t *testing.T) { + t.Parallel() + + t.Run("nil handler does not panic", func(t *testing.T) { + t.Parallel() + + defer func() { + if r := recover(); r != nil { + t.Errorf("The code panicked") + } + }() + handlerArgs, _ := getDefaultArgumentsSubroundHandler() + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + sh.EpochStartAction(&testscommon.HeaderHandlerStub{}) + }) + + // tested through initSubroundsForEpoch + t.Run("OK", func(t *testing.T) { + t.Parallel() + + startCalled := atomic.Int32{} + handlerArgs, consensusCore := getDefaultArgumentsSubroundHandler() + chronology := &consensus.ChronologyHandlerMock{ + StartRoundCalled: func() { + startCalled.Add(1) + }, + } + enableEpoch := &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return false + }, + } + handlerArgs.Chronology = chronology + handlerArgs.EnableEpochsHandler = enableEpoch + consensusCore.SetEnableEpochsHandler(enableEpoch) + consensusCore.SetChronology(chronology) + + sh, err := NewSubroundsHandler(handlerArgs) + require.Nil(t, err) + require.NotNil(t, sh) + + sh.currentConsensusType = consensusNone + sh.EpochStartAction(&testscommon.HeaderHandlerStub{}) + require.Nil(t, err) + require.Equal(t, consensusV1, sh.currentConsensusType) + require.Equal(t, int32(1), startCalled.Load()) + }) +} diff --git a/consensus/spos/bls/v1/blsSubroundsFactory.go b/consensus/spos/bls/v1/blsSubroundsFactory.go new file mode 100644 index 00000000000..70915c5f30b --- /dev/null +++ b/consensus/spos/bls/v1/blsSubroundsFactory.go @@ -0,0 +1,302 @@ +package v1 + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + "github.com/multiversx/mx-chain-go/outport" +) + +// factory defines the data needed by this factory to create all the subrounds and give them their specific +// functionality +type factory struct { + consensusCore spos.ConsensusCoreHandler + consensusState spos.ConsensusStateHandler + worker spos.WorkerHandler + + appStatusHandler core.AppStatusHandler + outportHandler outport.OutportHandler + sentSignaturesTracker spos.SentSignaturesTracker + chainID []byte + currentPid core.PeerID +} + +// NewSubroundsFactory creates a new consensusState object +func NewSubroundsFactory( + consensusDataContainer spos.ConsensusCoreHandler, + consensusState spos.ConsensusStateHandler, + worker spos.WorkerHandler, + chainID []byte, + currentPid core.PeerID, + appStatusHandler core.AppStatusHandler, + sentSignaturesTracker spos.SentSignaturesTracker, + outportHandler outport.OutportHandler, +) (*factory, error) { + // no need to check the outportHandler, it can be nil + err := checkNewFactoryParams( + consensusDataContainer, + consensusState, + worker, + chainID, + appStatusHandler, + sentSignaturesTracker, + ) + if err != nil { + return nil, err + } + + fct := factory{ + consensusCore: consensusDataContainer, + consensusState: consensusState, + worker: worker, + appStatusHandler: appStatusHandler, + chainID: chainID, + currentPid: currentPid, + sentSignaturesTracker: sentSignaturesTracker, + outportHandler: outportHandler, + } + + return &fct, nil +} + +func checkNewFactoryParams( + container spos.ConsensusCoreHandler, + state spos.ConsensusStateHandler, + worker spos.WorkerHandler, + chainID []byte, + appStatusHandler core.AppStatusHandler, + sentSignaturesTracker spos.SentSignaturesTracker, +) error { + err := spos.ValidateConsensusCore(container) + if err != nil { + return err + } + if check.IfNil(state) { + return spos.ErrNilConsensusState + } + if check.IfNil(worker) { + return spos.ErrNilWorker + } + if check.IfNil(appStatusHandler) { + return spos.ErrNilAppStatusHandler + } + if check.IfNil(sentSignaturesTracker) { + return ErrNilSentSignatureTracker + } + if len(chainID) == 0 { + return spos.ErrInvalidChainID + } + + return nil +} + +// SetOutportHandler method will update the value of the factory's outport +func (fct *factory) SetOutportHandler(driver outport.OutportHandler) { + fct.outportHandler = driver +} + +// GenerateSubrounds will generate the subrounds used in BLS Cns +func (fct *factory) GenerateSubrounds() error { + fct.initConsensusThreshold() + fct.consensusCore.Chronology().RemoveAllSubrounds() + fct.worker.RemoveAllReceivedMessagesCalls() + + err := fct.generateStartRoundSubround() + if err != nil { + return err + } + + err = fct.generateBlockSubround() + if err != nil { + return err + } + + err = fct.generateSignatureSubround() + if err != nil { + return err + } + + err = fct.generateEndRoundSubround() + if err != nil { + return err + } + + return nil +} + +func (fct *factory) getTimeDuration() time.Duration { + return fct.consensusCore.RoundHandler().TimeDuration() +} + +func (fct *factory) generateStartRoundSubround() error { + subround, err := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(float64(fct.getTimeDuration())*srStartStartTime), + int64(float64(fct.getTimeDuration())*srStartEndTime), + bls.GetSubroundName(bls.SrStartRound), + fct.consensusState, + fct.worker.GetConsensusStateChangedChannel(), + fct.worker.ExecuteStoredMessages, + fct.consensusCore, + fct.chainID, + fct.currentPid, + fct.appStatusHandler, + ) + if err != nil { + return err + } + + subroundStartRoundInstance, err := NewSubroundStartRound( + subround, + fct.worker.Extend, + processingThresholdPercent, + fct.worker.ExecuteStoredMessages, + fct.worker.ResetConsensusMessages, + fct.sentSignaturesTracker, + ) + if err != nil { + return err + } + + err = subroundStartRoundInstance.SetOutportHandler(fct.outportHandler) + if err != nil { + return err + } + + fct.consensusCore.Chronology().AddSubround(subroundStartRoundInstance) + + return nil +} + +func (fct *factory) generateBlockSubround() error { + subround, err := spos.NewSubround( + bls.SrStartRound, + bls.SrBlock, + bls.SrSignature, + int64(float64(fct.getTimeDuration())*srBlockStartTime), + int64(float64(fct.getTimeDuration())*srBlockEndTime), + bls.GetSubroundName(bls.SrBlock), + fct.consensusState, + fct.worker.GetConsensusStateChangedChannel(), + fct.worker.ExecuteStoredMessages, + fct.consensusCore, + fct.chainID, + fct.currentPid, + fct.appStatusHandler, + ) + if err != nil { + return err + } + + subroundBlockInstance, err := NewSubroundBlock( + subround, + fct.worker.Extend, + processingThresholdPercent, + ) + if err != nil { + return err + } + + fct.worker.AddReceivedMessageCall(bls.MtBlockBodyAndHeader, subroundBlockInstance.receivedBlockBodyAndHeader) + fct.worker.AddReceivedMessageCall(bls.MtBlockBody, subroundBlockInstance.receivedBlockBody) + fct.worker.AddReceivedMessageCall(bls.MtBlockHeader, subroundBlockInstance.receivedBlockHeader) + fct.consensusCore.Chronology().AddSubround(subroundBlockInstance) + + return nil +} + +func (fct *factory) generateSignatureSubround() error { + subround, err := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(float64(fct.getTimeDuration())*srSignatureStartTime), + int64(float64(fct.getTimeDuration())*srSignatureEndTime), + bls.GetSubroundName(bls.SrSignature), + fct.consensusState, + fct.worker.GetConsensusStateChangedChannel(), + fct.worker.ExecuteStoredMessages, + fct.consensusCore, + fct.chainID, + fct.currentPid, + fct.appStatusHandler, + ) + if err != nil { + return err + } + + subroundSignatureObject, err := NewSubroundSignature( + subround, + fct.worker.Extend, + fct.appStatusHandler, + fct.sentSignaturesTracker, + ) + if err != nil { + return err + } + + fct.worker.AddReceivedMessageCall(bls.MtSignature, subroundSignatureObject.receivedSignature) + fct.consensusCore.Chronology().AddSubround(subroundSignatureObject) + + return nil +} + +func (fct *factory) generateEndRoundSubround() error { + subround, err := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(float64(fct.getTimeDuration())*srEndStartTime), + int64(float64(fct.getTimeDuration())*srEndEndTime), + bls.GetSubroundName(bls.SrEndRound), + fct.consensusState, + fct.worker.GetConsensusStateChangedChannel(), + fct.worker.ExecuteStoredMessages, + fct.consensusCore, + fct.chainID, + fct.currentPid, + fct.appStatusHandler, + ) + if err != nil { + return err + } + + subroundEndRoundObject, err := NewSubroundEndRound( + subround, + fct.worker.Extend, + spos.MaxThresholdPercent, + fct.worker.DisplayStatistics, + fct.appStatusHandler, + fct.sentSignaturesTracker, + ) + if err != nil { + return err + } + + fct.worker.AddReceivedMessageCall(bls.MtBlockHeaderFinalInfo, subroundEndRoundObject.receivedBlockHeaderFinalInfo) + fct.worker.AddReceivedMessageCall(bls.MtInvalidSigners, subroundEndRoundObject.receivedInvalidSignersInfo) + fct.worker.AddReceivedHeaderHandler(subroundEndRoundObject.receivedHeader) + fct.consensusCore.Chronology().AddSubround(subroundEndRoundObject) + + return nil +} + +func (fct *factory) initConsensusThreshold() { + pBFTThreshold := core.GetPBFTThreshold(fct.consensusState.ConsensusGroupSize()) + pBFTFallbackThreshold := core.GetPBFTFallbackThreshold(fct.consensusState.ConsensusGroupSize()) + fct.consensusState.SetThreshold(bls.SrBlock, 1) + fct.consensusState.SetThreshold(bls.SrSignature, pBFTThreshold) + fct.consensusState.SetFallbackThreshold(bls.SrBlock, 1) + fct.consensusState.SetFallbackThreshold(bls.SrSignature, pBFTFallbackThreshold) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (fct *factory) IsInterfaceNil() bool { + return fct == nil +} diff --git a/consensus/spos/bls/v1/blsSubroundsFactory_test.go b/consensus/spos/bls/v1/blsSubroundsFactory_test.go new file mode 100644 index 00000000000..f057daae16f --- /dev/null +++ b/consensus/spos/bls/v1/blsSubroundsFactory_test.go @@ -0,0 +1,650 @@ +package v1_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/assert" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/testscommon" + consensusMock "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" + testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +var chainID = []byte("chain ID") + +const currentPid = core.PeerID("pid") + +const roundTimeDuration = 100 * time.Millisecond + +func displayStatistics() { +} + +func extend(subroundId int) { + fmt.Println(subroundId) +} + +// executeStoredMessages tries to execute all the messages received which are valid for execution +func executeStoredMessages() { +} + +// resetConsensusMessages resets at the start of each round, all the previous consensus messages received +func resetConsensusMessages() { +} + +func initRoundHandlerMock() *consensusMock.RoundHandlerMock { + return &consensusMock.RoundHandlerMock{ + RoundIndex: 0, + TimeStampCalled: func() time.Time { + return time.Unix(0, 0) + }, + TimeDurationCalled: func() time.Duration { + return roundTimeDuration + }, + } +} + +func initWorker() spos.WorkerHandler { + sposWorker := &consensusMock.SposWorkerMock{} + sposWorker.GetConsensusStateChangedChannelsCalled = func() chan bool { + return make(chan bool) + } + sposWorker.RemoveAllReceivedMessagesCallsCalled = func() {} + + sposWorker.AddReceivedMessageCallCalled = + func(messageType consensus.MessageType, receivedMessageCall func(ctx context.Context, cnsDta *consensus.Message) bool) { + } + + return sposWorker +} + +func initFactoryWithContainer(container *consensusMock.ConsensusCoreMock) v1.Factory { + worker := initWorker() + consensusState := initializers.InitConsensusState() + + fct, _ := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + return fct +} + +func initFactory() v1.Factory { + container := consensusMock.InitConsensusCore() + return initFactoryWithContainer(container) +} + +func TestFactory_GetMessageTypeName(t *testing.T) { + t.Parallel() + + r := bls.GetStringValue(bls.MtBlockBodyAndHeader) + assert.Equal(t, "(BLOCK_BODY_AND_HEADER)", r) + + r = bls.GetStringValue(bls.MtBlockBody) + assert.Equal(t, "(BLOCK_BODY)", r) + + r = bls.GetStringValue(bls.MtBlockHeader) + assert.Equal(t, "(BLOCK_HEADER)", r) + + r = bls.GetStringValue(bls.MtSignature) + assert.Equal(t, "(SIGNATURE)", r) + + r = bls.GetStringValue(bls.MtBlockHeaderFinalInfo) + assert.Equal(t, "(FINAL_INFO)", r) + + r = bls.GetStringValue(bls.MtUnknown) + assert.Equal(t, "(UNKNOWN)", r) + + r = bls.GetStringValue(consensus.MessageType(-1)) + assert.Equal(t, "Undefined message type", r) +} + +func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + nil, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilConsensusCore, err) +} + +func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + container, + nil, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetBlockchain(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilBlockChain, err) +} + +func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetBlockProcessor(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilBlockProcessor, err) +} + +func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetBootStrapper(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilBootstrapper, err) +} + +func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetChronology(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilChronologyHandler, err) +} + +func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetHasher(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilHasher, err) +} + +func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetMarshalizer(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilMarshalizer, err) +} + +func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetMultiSignerContainer(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetRoundHandler(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetShardCoordinator(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilShardCoordinator, err) +} + +func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetSyncTimer(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + container.SetValidatorGroupSelector(nil) + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilNodesCoordinator, err) +} + +func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + nil, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilWorker, err) +} + +func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + nil, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrNilAppStatusHandler, err) +} + +func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + nil, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, v1.ErrNilSentSignatureTracker, err) +} + +func TestFactory_NewFactoryShouldWork(t *testing.T) { + t.Parallel() + + fct := *initFactory() + + assert.False(t, check.IfNil(&fct)) +} + +func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { + t.Parallel() + + consensusState := initializers.InitConsensusState() + container := consensusMock.InitConsensusCore() + worker := initWorker() + + fct, err := v1.NewSubroundsFactory( + container, + consensusState, + worker, + nil, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + nil, + ) + + assert.Nil(t, fct) + assert.Equal(t, spos.ErrInvalidChainID, err) +} + +func TestFactory_GenerateSubroundStartRoundShouldFailWhenNewSubroundFail(t *testing.T) { + t.Parallel() + + fct := *initFactory() + fct.Worker().(*consensusMock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + return nil + } + + err := fct.GenerateStartRoundSubround() + + assert.Equal(t, spos.ErrNilChannel, err) +} + +func TestFactory_GenerateSubroundStartRoundShouldFailWhenNewSubroundStartRoundFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + container.SetSyncTimer(nil) + + err := fct.GenerateStartRoundSubround() + + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_GenerateSubroundBlockShouldFailWhenNewSubroundFail(t *testing.T) { + t.Parallel() + + fct := *initFactory() + fct.Worker().(*consensusMock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + return nil + } + + err := fct.GenerateBlockSubround() + + assert.Equal(t, spos.ErrNilChannel, err) +} + +func TestFactory_GenerateSubroundBlockShouldFailWhenNewSubroundBlockFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + container.SetSyncTimer(nil) + + err := fct.GenerateBlockSubround() + + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_GenerateSubroundSignatureShouldFailWhenNewSubroundFail(t *testing.T) { + t.Parallel() + + fct := *initFactory() + fct.Worker().(*consensusMock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + return nil + } + + err := fct.GenerateSignatureSubround() + + assert.Equal(t, spos.ErrNilChannel, err) +} + +func TestFactory_GenerateSubroundSignatureShouldFailWhenNewSubroundSignatureFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + container.SetSyncTimer(nil) + + err := fct.GenerateSignatureSubround() + + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_GenerateSubroundEndRoundShouldFailWhenNewSubroundFail(t *testing.T) { + t.Parallel() + + fct := *initFactory() + fct.Worker().(*consensusMock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + return nil + } + + err := fct.GenerateEndRoundSubround() + + assert.Equal(t, spos.ErrNilChannel, err) +} + +func TestFactory_GenerateSubroundEndRoundShouldFailWhenNewSubroundEndRoundFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + container.SetSyncTimer(nil) + + err := fct.GenerateEndRoundSubround() + + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestFactory_GenerateSubroundsShouldWork(t *testing.T) { + t.Parallel() + + subroundHandlers := 0 + + chrm := &consensusMock.ChronologyHandlerMock{} + chrm.AddSubroundCalled = func(subroundHandler consensus.SubroundHandler) { + subroundHandlers++ + } + container := consensusMock.InitConsensusCore() + container.SetChronology(chrm) + fct := *initFactoryWithContainer(container) + fct.SetOutportHandler(&testscommonOutport.OutportStub{}) + + err := fct.GenerateSubrounds() + assert.Nil(t, err) + + assert.Equal(t, 4, subroundHandlers) +} + +func TestFactory_GenerateSubroundsNilOutportShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + + err := fct.GenerateSubrounds() + assert.Equal(t, outport.ErrNilDriver, err) +} + +func TestFactory_SetIndexerShouldWork(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + fct := *initFactoryWithContainer(container) + + outportHandler := &testscommonOutport.OutportStub{} + fct.SetOutportHandler(outportHandler) + + assert.Equal(t, outportHandler, fct.Outport()) +} diff --git a/consensus/spos/bls/v1/constants.go b/consensus/spos/bls/v1/constants.go new file mode 100644 index 00000000000..5753fc94770 --- /dev/null +++ b/consensus/spos/bls/v1/constants.go @@ -0,0 +1,37 @@ +package v1 + +import ( + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("consensus/spos/bls") + +// waitingAllSigsMaxTimeThreshold specifies the max allocated time for waiting all signatures from the total time of the subround signature +const waitingAllSigsMaxTimeThreshold = 0.5 + +// processingThresholdPercent specifies the max allocated time for processing the block as a percentage of the total time of the round +const processingThresholdPercent = 85 + +// srStartStartTime specifies the start time, from the total time of the round, of Subround Start +const srStartStartTime = 0.0 + +// srEndStartTime specifies the end time, from the total time of the round, of Subround Start +const srStartEndTime = 0.05 + +// srBlockStartTime specifies the start time, from the total time of the round, of Subround Block +const srBlockStartTime = 0.05 + +// srBlockEndTime specifies the end time, from the total time of the round, of Subround Block +const srBlockEndTime = 0.25 + +// srSignatureStartTime specifies the start time, from the total time of the round, of Subround Signature +const srSignatureStartTime = 0.25 + +// srSignatureEndTime specifies the end time, from the total time of the round, of Subround Signature +const srSignatureEndTime = 0.85 + +// srEndStartTime specifies the start time, from the total time of the round, of Subround End +const srEndStartTime = 0.85 + +// srEndEndTime specifies the end time, from the total time of the round, of Subround End +const srEndEndTime = 0.95 diff --git a/consensus/spos/bls/errors.go b/consensus/spos/bls/v1/errors.go similarity index 93% rename from consensus/spos/bls/errors.go rename to consensus/spos/bls/v1/errors.go index b840f9e2c85..05c55b9592c 100644 --- a/consensus/spos/bls/errors.go +++ b/consensus/spos/bls/v1/errors.go @@ -1,4 +1,4 @@ -package bls +package v1 import "errors" diff --git a/consensus/spos/bls/v1/export_test.go b/consensus/spos/bls/v1/export_test.go new file mode 100644 index 00000000000..4a386a57933 --- /dev/null +++ b/consensus/spos/bls/v1/export_test.go @@ -0,0 +1,353 @@ +package v1 + +import ( + "context" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + + cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/ntp" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// ProcessingThresholdPercent exports the internal processingThresholdPercent +const ProcessingThresholdPercent = processingThresholdPercent + +// factory + +// Factory defines a type for the factory structure +type Factory *factory + +// BlockChain gets the chain handler object +func (fct *factory) BlockChain() data.ChainHandler { + return fct.consensusCore.Blockchain() +} + +// BlockProcessor gets the block processor object +func (fct *factory) BlockProcessor() process.BlockProcessor { + return fct.consensusCore.BlockProcessor() +} + +// Bootstrapper gets the bootstrapper object +func (fct *factory) Bootstrapper() process.Bootstrapper { + return fct.consensusCore.BootStrapper() +} + +// ChronologyHandler gets the chronology handler object +func (fct *factory) ChronologyHandler() consensus.ChronologyHandler { + return fct.consensusCore.Chronology() +} + +// ConsensusState gets the consensus state struct pointer +func (fct *factory) ConsensusState() spos.ConsensusStateHandler { + return fct.consensusState +} + +// Hasher gets the hasher object +func (fct *factory) Hasher() hashing.Hasher { + return fct.consensusCore.Hasher() +} + +// Marshalizer gets the marshalizer object +func (fct *factory) Marshalizer() marshal.Marshalizer { + return fct.consensusCore.Marshalizer() +} + +// MultiSigner gets the multi signer object +func (fct *factory) MultiSignerContainer() cryptoCommon.MultiSignerContainer { + return fct.consensusCore.MultiSignerContainer() +} + +// RoundHandler gets the roundHandler object +func (fct *factory) RoundHandler() consensus.RoundHandler { + return fct.consensusCore.RoundHandler() +} + +// ShardCoordinator gets the shard coordinator object +func (fct *factory) ShardCoordinator() sharding.Coordinator { + return fct.consensusCore.ShardCoordinator() +} + +// SyncTimer gets the sync timer object +func (fct *factory) SyncTimer() ntp.SyncTimer { + return fct.consensusCore.SyncTimer() +} + +// NodesCoordinator gets the nodes coordinator object +func (fct *factory) NodesCoordinator() nodesCoordinator.NodesCoordinator { + return fct.consensusCore.NodesCoordinator() +} + +// Worker gets the worker object +func (fct *factory) Worker() spos.WorkerHandler { + return fct.worker +} + +// SetWorker sets the worker object +func (fct *factory) SetWorker(worker spos.WorkerHandler) { + fct.worker = worker +} + +// GenerateStartRoundSubround generates the instance of subround StartRound and added it to the chronology subrounds list +func (fct *factory) GenerateStartRoundSubround() error { + return fct.generateStartRoundSubround() +} + +// GenerateBlockSubround generates the instance of subround Block and added it to the chronology subrounds list +func (fct *factory) GenerateBlockSubround() error { + return fct.generateBlockSubround() +} + +// GenerateSignatureSubround generates the instance of subround Signature and added it to the chronology subrounds list +func (fct *factory) GenerateSignatureSubround() error { + return fct.generateSignatureSubround() +} + +// GenerateEndRoundSubround generates the instance of subround EndRound and added it to the chronology subrounds list +func (fct *factory) GenerateEndRoundSubround() error { + return fct.generateEndRoundSubround() +} + +// AppStatusHandler gets the app status handler object +func (fct *factory) AppStatusHandler() core.AppStatusHandler { + return fct.appStatusHandler +} + +// Outport gets the outport object +func (fct *factory) Outport() outport.OutportHandler { + return fct.outportHandler +} + +// subroundStartRound + +// SubroundStartRound defines an alias to the subroundStartRound structure +type SubroundStartRound = *subroundStartRound + +// DoStartRoundJob method does the job of the subround StartRound +func (sr *subroundStartRound) DoStartRoundJob() bool { + return sr.doStartRoundJob(context.Background()) +} + +// DoStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound +func (sr *subroundStartRound) DoStartRoundConsensusCheck() bool { + return sr.doStartRoundConsensusCheck() +} + +// GenerateNextConsensusGroup generates the next consensu group based on current (random seed, shard id and round) +func (sr *subroundStartRound) GenerateNextConsensusGroup(roundIndex int64) error { + return sr.generateNextConsensusGroup(roundIndex) +} + +// InitCurrentRound inits all the stuff needed in the current round +func (sr *subroundStartRound) InitCurrentRound() bool { + return sr.initCurrentRound() +} + +// GetSentSignatureTracker returns the subroundStartRound's SentSignaturesTracker instance +func (sr *subroundStartRound) GetSentSignatureTracker() spos.SentSignaturesTracker { + return sr.sentSignatureTracker +} + +// subroundBlock + +// SubroundBlock defines a type for the subroundBlock structure +type SubroundBlock = *subroundBlock + +// Blockchain gets the ChainHandler stored in the ConsensusCore +func (sr *subroundBlock) BlockChain() data.ChainHandler { + return sr.Blockchain() +} + +// DoBlockJob method does the job of the subround Block +func (sr *subroundBlock) DoBlockJob() bool { + return sr.doBlockJob(context.Background()) +} + +// ProcessReceivedBlock method processes the received proposed block in the subround Block +func (sr *subroundBlock) ProcessReceivedBlock(cnsDta *consensus.Message) bool { + return sr.processReceivedBlock(context.Background(), cnsDta) +} + +// DoBlockConsensusCheck method checks if the consensus in the subround Block is achieved +func (sr *subroundBlock) DoBlockConsensusCheck() bool { + return sr.doBlockConsensusCheck() +} + +// IsBlockReceived method checks if the block was received from the leader in the current round +func (sr *subroundBlock) IsBlockReceived(threshold int) bool { + return sr.isBlockReceived(threshold) +} + +// CreateHeader method creates the proposed block header in the subround Block +func (sr *subroundBlock) CreateHeader() (data.HeaderHandler, error) { + return sr.createHeader() +} + +// CreateBody method creates the proposed block body in the subround Block +func (sr *subroundBlock) CreateBlock(hdr data.HeaderHandler) (data.HeaderHandler, data.BodyHandler, error) { + return sr.createBlock(hdr) +} + +// SendBlockBody method sends the proposed block body in the subround Block +func (sr *subroundBlock) SendBlockBody(body data.BodyHandler, marshalizedBody []byte) bool { + return sr.sendBlockBody(body, marshalizedBody) +} + +// SendBlockHeader method sends the proposed block header in the subround Block +func (sr *subroundBlock) SendBlockHeader(header data.HeaderHandler, marshalizedHeader []byte) bool { + return sr.sendBlockHeader(header, marshalizedHeader) +} + +// ComputeSubroundProcessingMetric computes processing metric related to the subround Block +func (sr *subroundBlock) ComputeSubroundProcessingMetric(startTime time.Time, metric string) { + sr.computeSubroundProcessingMetric(startTime, metric) +} + +// ReceivedBlockBody method is called when a block body is received through the block body channel +func (sr *subroundBlock) ReceivedBlockBody(cnsDta *consensus.Message) bool { + return sr.receivedBlockBody(context.Background(), cnsDta) +} + +// ReceivedBlockHeader method is called when a block header is received through the block header channel +func (sr *subroundBlock) ReceivedBlockHeader(cnsDta *consensus.Message) bool { + return sr.receivedBlockHeader(context.Background(), cnsDta) +} + +// ReceivedBlockBodyAndHeader is called when both a header and block body have been received +func (sr *subroundBlock) ReceivedBlockBodyAndHeader(cnsDta *consensus.Message) bool { + return sr.receivedBlockBodyAndHeader(context.Background(), cnsDta) +} + +// subroundSignature + +// SubroundSignature defines an alias for the subroundSignature structure +type SubroundSignature = *subroundSignature + +// DoSignatureJob method does the job of the subround Signature +func (sr *subroundSignature) DoSignatureJob() bool { + return sr.doSignatureJob(context.Background()) +} + +// ReceivedSignature method is called when a signature is received through the signature channel +func (sr *subroundSignature) ReceivedSignature(cnsDta *consensus.Message) bool { + return sr.receivedSignature(context.Background(), cnsDta) +} + +// DoSignatureConsensusCheck method checks if the consensus in the subround Signature is achieved +func (sr *subroundSignature) DoSignatureConsensusCheck() bool { + return sr.doSignatureConsensusCheck() +} + +// AreSignaturesCollected method checks if the number of signatures received from the nodes are more than the given threshold +func (sr *subroundSignature) AreSignaturesCollected(threshold int) (bool, int) { + return sr.areSignaturesCollected(threshold) +} + +// subroundEndRound + +// SubroundEndRound defines an alias for the subroundEndRound structure +type SubroundEndRound = *subroundEndRound + +// DoEndRoundJob method does the job of the subround EndRound +func (sr *subroundEndRound) DoEndRoundJob() bool { + return sr.doEndRoundJob(context.Background()) +} + +// DoEndRoundConsensusCheck method checks if the consensus is achieved +func (sr *subroundEndRound) DoEndRoundConsensusCheck() bool { + return sr.doEndRoundConsensusCheck() +} + +// CheckSignaturesValidity method checks the signature validity for the nodes included in bitmap +func (sr *subroundEndRound) CheckSignaturesValidity(bitmap []byte) error { + return sr.checkSignaturesValidity(bitmap) +} + +// DoEndRoundJobByParticipant calls the unexported doEndRoundJobByParticipant function +func (sr *subroundEndRound) DoEndRoundJobByParticipant(cnsDta *consensus.Message) bool { + return sr.doEndRoundJobByParticipant(cnsDta) +} + +// DoEndRoundJobByLeader calls the unexported doEndRoundJobByLeader function +func (sr *subroundEndRound) DoEndRoundJobByLeader() bool { + return sr.doEndRoundJobByLeader() +} + +// HaveConsensusHeaderWithFullInfo calls the unexported haveConsensusHeaderWithFullInfo function +func (sr *subroundEndRound) HaveConsensusHeaderWithFullInfo(cnsDta *consensus.Message) (bool, data.HeaderHandler) { + return sr.haveConsensusHeaderWithFullInfo(cnsDta) +} + +// CreateAndBroadcastHeaderFinalInfo calls the unexported createAndBroadcastHeaderFinalInfo function +func (sr *subroundEndRound) CreateAndBroadcastHeaderFinalInfo() { + sr.createAndBroadcastHeaderFinalInfo() +} + +// ReceivedBlockHeaderFinalInfo calls the unexported receivedBlockHeaderFinalInfo function +func (sr *subroundEndRound) ReceivedBlockHeaderFinalInfo(cnsDta *consensus.Message) bool { + return sr.receivedBlockHeaderFinalInfo(context.Background(), cnsDta) +} + +// IsBlockHeaderFinalInfoValid calls the unexported isBlockHeaderFinalInfoValid function +func (sr *subroundEndRound) IsBlockHeaderFinalInfoValid(cnsDta *consensus.Message) bool { + return sr.isBlockHeaderFinalInfoValid(cnsDta) +} + +// IsConsensusHeaderReceived calls the unexported isConsensusHeaderReceived function +func (sr *subroundEndRound) IsConsensusHeaderReceived() (bool, data.HeaderHandler) { + return sr.isConsensusHeaderReceived() +} + +// IsOutOfTime calls the unexported isOutOfTime function +func (sr *subroundEndRound) IsOutOfTime() bool { + return sr.isOutOfTime() +} + +// VerifyNodesOnAggSigFail calls the unexported verifyNodesOnAggSigFail function +func (sr *subroundEndRound) VerifyNodesOnAggSigFail() ([]string, error) { + return sr.verifyNodesOnAggSigFail() +} + +// ComputeAggSigOnValidNodes calls the unexported computeAggSigOnValidNodes function +func (sr *subroundEndRound) ComputeAggSigOnValidNodes() ([]byte, []byte, error) { + return sr.computeAggSigOnValidNodes() +} + +// ReceivedInvalidSignersInfo calls the unexported receivedInvalidSignersInfo function +func (sr *subroundEndRound) ReceivedInvalidSignersInfo(cnsDta *consensus.Message) bool { + return sr.receivedInvalidSignersInfo(context.Background(), cnsDta) +} + +// VerifyInvalidSigners calls the unexported verifyInvalidSigners function +func (sr *subroundEndRound) VerifyInvalidSigners(invalidSigners []byte) error { + return sr.verifyInvalidSigners(invalidSigners) +} + +// GetMinConsensusGroupIndexOfManagedKeys calls the unexported getMinConsensusGroupIndexOfManagedKeys function +func (sr *subroundEndRound) GetMinConsensusGroupIndexOfManagedKeys() int { + return sr.getMinConsensusGroupIndexOfManagedKeys() +} + +// CreateAndBroadcastInvalidSigners calls the unexported createAndBroadcastInvalidSigners function +func (sr *subroundEndRound) CreateAndBroadcastInvalidSigners(invalidSigners []byte) { + sr.createAndBroadcastInvalidSigners(invalidSigners) +} + +// GetFullMessagesForInvalidSigners calls the unexported getFullMessagesForInvalidSigners function +func (sr *subroundEndRound) GetFullMessagesForInvalidSigners(invalidPubKeys []string) ([]byte, error) { + return sr.getFullMessagesForInvalidSigners(invalidPubKeys) +} + +// GetSentSignatureTracker returns the subroundEndRound's SentSignaturesTracker instance +func (sr *subroundEndRound) GetSentSignatureTracker() spos.SentSignaturesTracker { + return sr.sentSignatureTracker +} diff --git a/consensus/spos/bls/v1/subroundBlock.go b/consensus/spos/bls/v1/subroundBlock.go new file mode 100644 index 00000000000..eac4a7c9204 --- /dev/null +++ b/consensus/spos/bls/v1/subroundBlock.go @@ -0,0 +1,687 @@ +package v1 + +import ( + "context" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" +) + +// maxAllowedSizeInBytes defines how many bytes are allowed as payload in a message +const maxAllowedSizeInBytes = uint32(core.MegabyteSize * 95 / 100) + +// subroundBlock defines the data needed by the subround Block +type subroundBlock struct { + *spos.Subround + + processingThresholdPercentage int +} + +// NewSubroundBlock creates a subroundBlock object +func NewSubroundBlock( + baseSubround *spos.Subround, + extend func(subroundId int), + processingThresholdPercentage int, +) (*subroundBlock, error) { + err := checkNewSubroundBlockParams(baseSubround) + if err != nil { + return nil, err + } + + srBlock := subroundBlock{ + Subround: baseSubround, + processingThresholdPercentage: processingThresholdPercentage, + } + + srBlock.Job = srBlock.doBlockJob + srBlock.Check = srBlock.doBlockConsensusCheck + srBlock.Extend = extend + + return &srBlock, nil +} + +func checkNewSubroundBlockParams( + baseSubround *spos.Subround, +) error { + if baseSubround == nil { + return spos.ErrNilSubround + } + + if check.IfNil(baseSubround.ConsensusStateHandler) { + return spos.ErrNilConsensusState + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err +} + +// doBlockJob method does the job of the subround Block +func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !isSelfLeader && !sr.IsMultiKeyLeaderInCurrentRound() { // is NOT self leader in this round? + return false + } + + if sr.RoundHandler().Index() <= sr.getRoundInLastCommittedBlock() { + return false + } + + if sr.IsLeaderJobDone(sr.Current()) { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + return false + } + + metricStatTime := time.Now() + defer sr.computeSubroundProcessingMetric(metricStatTime, common.MetricCreatedProposedBlock) + + header, err := sr.createHeader() + if err != nil { + printLogMessage(ctx, "doBlockJob.createHeader", err) + return false + } + + header, body, err := sr.createBlock(header) + if err != nil { + printLogMessage(ctx, "doBlockJob.createBlock", err) + return false + } + + sentWithSuccess := sr.sendBlock(header, body) + if !sentWithSuccess { + return false + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("doBlockJob.GetLeader", "error", errGetLeader) + return false + } + + err = sr.SetJobDone(leader, sr.Current(), true) + if err != nil { + log.Debug("doBlockJob.SetSelfJobDone", "error", err.Error()) + return false + } + + // placeholder for subroundBlock.doBlockJob script + + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(header, body, sr.GetRoundTimeStamp()) + + return true +} + +func printLogMessage(ctx context.Context, baseMessage string, err error) { + if common.IsContextDone(ctx) { + log.Debug(baseMessage + " context is closing") + return + } + + log.Debug(baseMessage, "error", err.Error()) +} + +func (sr *subroundBlock) sendBlock(header data.HeaderHandler, body data.BodyHandler) bool { + marshalizedBody, err := sr.Marshalizer().Marshal(body) + if err != nil { + log.Debug("sendBlock.Marshal: body", "error", err.Error()) + return false + } + + marshalizedHeader, err := sr.Marshalizer().Marshal(header) + if err != nil { + log.Debug("sendBlock.Marshal: header", "error", err.Error()) + return false + } + + if sr.couldBeSentTogether(marshalizedBody, marshalizedHeader) { + return sr.sendHeaderAndBlockBody(header, body, marshalizedBody, marshalizedHeader) + } + + if !sr.sendBlockBody(body, marshalizedBody) || !sr.sendBlockHeader(header, marshalizedHeader) { + return false + } + + return true +} + +func (sr *subroundBlock) couldBeSentTogether(marshalizedBody []byte, marshalizedHeader []byte) bool { + bodyAndHeaderSize := uint32(len(marshalizedBody) + len(marshalizedHeader)) + log.Debug("couldBeSentTogether", + "body size", len(marshalizedBody), + "header size", len(marshalizedHeader), + "body and header size", bodyAndHeaderSize, + "max allowed size in bytes", maxAllowedSizeInBytes) + return bodyAndHeaderSize <= maxAllowedSizeInBytes +} + +func (sr *subroundBlock) createBlock(header data.HeaderHandler) (data.HeaderHandler, data.BodyHandler, error) { + startTime := sr.GetRoundTimeStamp() + maxTime := time.Duration(sr.EndTime()) + haveTimeInCurrentSubround := func() bool { + return sr.RoundHandler().RemainingTime(startTime, maxTime) > 0 + } + + finalHeader, blockBody, err := sr.BlockProcessor().CreateBlock( + header, + haveTimeInCurrentSubround, + ) + if err != nil { + return nil, nil, err + } + + return finalHeader, blockBody, nil +} + +// sendHeaderAndBlockBody method sends the proposed header and block body in the subround Block +func (sr *subroundBlock) sendHeaderAndBlockBody( + headerHandler data.HeaderHandler, + bodyHandler data.BodyHandler, + marshalizedBody []byte, + marshalizedHeader []byte, +) bool { + headerHash := sr.Hasher().Compute(string(marshalizedHeader)) + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("sendBlockBodyAndHeader.GetLeader", "error", errGetLeader) + return false + } + + cnsMsg := consensus.NewConsensusMessage( + headerHash, + nil, + marshalizedBody, + marshalizedHeader, + []byte(leader), + nil, + int(bls.MtBlockBodyAndHeader), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid([]byte(leader)), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("sendHeaderAndBlockBody.BroadcastConsensusMessage", "error", err.Error()) + return false + } + + log.Debug("step 1: block body and header have been sent", + "nonce", headerHandler.GetNonce(), + "hash", headerHash) + + sr.SetData(headerHash) + sr.SetBody(bodyHandler) + sr.SetHeader(headerHandler) + + return true +} + +// sendBlockBody method sends the proposed block body in the subround Block +func (sr *subroundBlock) sendBlockBody(bodyHandler data.BodyHandler, marshalizedBody []byte) bool { + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("sendBlockBody.GetLeader", "error", errGetLeader) + return false + } + + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + marshalizedBody, + nil, + []byte(leader), + nil, + int(bls.MtBlockBody), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid([]byte(leader)), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("sendBlockBody.BroadcastConsensusMessage", "error", err.Error()) + return false + } + + log.Debug("step 1: block body has been sent") + + sr.SetBody(bodyHandler) + + return true +} + +// sendBlockHeader method sends the proposed block header in the subround Block +func (sr *subroundBlock) sendBlockHeader(headerHandler data.HeaderHandler, marshalizedHeader []byte) bool { + headerHash := sr.Hasher().Compute(string(marshalizedHeader)) + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("sendBlockBody.GetLeader", "error", errGetLeader) + return false + } + + cnsMsg := consensus.NewConsensusMessage( + headerHash, + nil, + nil, + marshalizedHeader, + []byte(leader), + nil, + int(bls.MtBlockHeader), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid([]byte(leader)), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("sendBlockHeader.BroadcastConsensusMessage", "error", err.Error()) + return false + } + + log.Debug("step 1: block header has been sent", + "nonce", headerHandler.GetNonce(), + "hash", headerHash) + + sr.SetData(headerHash) + sr.SetHeader(headerHandler) + + return true +} + +func (sr *subroundBlock) createHeader() (data.HeaderHandler, error) { + var nonce uint64 + var prevHash []byte + var prevRandSeed []byte + + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + nonce = sr.Blockchain().GetGenesisHeader().GetNonce() + 1 + prevHash = sr.Blockchain().GetGenesisHeaderHash() + prevRandSeed = sr.Blockchain().GetGenesisHeader().GetRandSeed() + } else { + nonce = currentHeader.GetNonce() + 1 + prevHash = sr.Blockchain().GetCurrentBlockHeaderHash() + prevRandSeed = currentHeader.GetRandSeed() + } + + round := uint64(sr.RoundHandler().Index()) + hdr, err := sr.BlockProcessor().CreateNewHeader(round, nonce) + if err != nil { + return nil, err + } + + err = hdr.SetPrevHash(prevHash) + if err != nil { + return nil, err + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + return nil, errGetLeader + } + + randSeed, err := sr.SigningHandler().CreateSignatureForPublicKey(prevRandSeed, []byte(leader)) + if err != nil { + return nil, err + } + + err = hdr.SetShardID(sr.ShardCoordinator().SelfId()) + if err != nil { + return nil, err + } + + err = hdr.SetTimeStamp(uint64(sr.RoundHandler().TimeStamp().Unix())) + if err != nil { + return nil, err + } + + err = hdr.SetPrevRandSeed(prevRandSeed) + if err != nil { + return nil, err + } + + err = hdr.SetRandSeed(randSeed) + if err != nil { + return nil, err + } + + err = hdr.SetChainID(sr.ChainID()) + if err != nil { + return nil, err + } + + return hdr, nil +} + +// receivedBlockBodyAndHeader method is called when a block body and a block header is received +func (sr *subroundBlock) receivedBlockBodyAndHeader(ctx context.Context, cnsDta *consensus.Message) bool { + sw := core.NewStopWatch() + sw.Start("receivedBlockBodyAndHeader") + + defer func() { + sw.Stop("receivedBlockBodyAndHeader") + log.Debug("time measurements of receivedBlockBodyAndHeader", sw.GetMeasurements()...) + }() + + node := string(cnsDta.PubKey) + + if sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsBlockBodyAlreadyReceived() { + return false + } + + if sr.IsHeaderAlreadyReceived() { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + sr.SetData(cnsDta.BlockHeaderHash) + sr.SetBody(sr.BlockProcessor().DecodeBlockBody(cnsDta.Body)) + sr.SetHeader(sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header)) + + isInvalidData := check.IfNil(sr.GetBody()) || sr.isInvalidHeaderOrData() + if isInvalidData { + return false + } + + log.Debug("step 1: block body and header have been received", + "nonce", sr.GetHeader().GetNonce(), + "hash", cnsDta.BlockHeaderHash) + + sw.Start("processReceivedBlock") + blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta) + sw.Stop("processReceivedBlock") + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return blockProcessedWithSuccess +} + +func (sr *subroundBlock) isInvalidHeaderOrData() bool { + return sr.GetData() == nil || check.IfNil(sr.GetHeader()) || sr.GetHeader().CheckFieldsForNil() != nil +} + +// receivedBlockBody method is called when a block body is received through the block body channel +func (sr *subroundBlock) receivedBlockBody(ctx context.Context, cnsDta *consensus.Message) bool { + node := string(cnsDta.PubKey) + + if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsBlockBodyAlreadyReceived() { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + sr.SetBody(sr.BlockProcessor().DecodeBlockBody(cnsDta.Body)) + + if check.IfNil(sr.GetBody()) { + return false + } + + log.Debug("step 1: block body has been received") + + blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta) + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return blockProcessedWithSuccess +} + +// receivedBlockHeader method is called when a block header is received through the block header channel. +// If the block header is valid, then the validatorRoundStates map corresponding to the node which sent it, +// is set on true for the subround Block +func (sr *subroundBlock) receivedBlockHeader(ctx context.Context, cnsDta *consensus.Message) bool { + node := string(cnsDta.PubKey) + + if sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsHeaderAlreadyReceived() { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + sr.SetData(cnsDta.BlockHeaderHash) + sr.SetHeader(sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header)) + + if sr.isInvalidHeaderOrData() { + return false + } + + log.Debug("step 1: block header has been received", + "nonce", sr.GetHeader().GetNonce(), + "hash", cnsDta.BlockHeaderHash) + blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta) + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return blockProcessedWithSuccess +} + +func (sr *subroundBlock) processReceivedBlock(ctx context.Context, cnsDta *consensus.Message) bool { + if check.IfNil(sr.GetBody()) { + return false + } + if check.IfNil(sr.GetHeader()) { + return false + } + + defer func() { + sr.SetProcessingBlock(false) + }() + + sr.SetProcessingBlock(true) + + shouldNotProcessBlock := sr.GetExtendedCalled() || cnsDta.RoundIndex < sr.RoundHandler().Index() + if shouldNotProcessBlock { + log.Debug("canceled round, extended has been called or round index has been changed", + "round", sr.RoundHandler().Index(), + "subround", sr.Name(), + "cnsDta round", cnsDta.RoundIndex, + "extended called", sr.GetExtendedCalled(), + ) + return false + } + + node := string(cnsDta.PubKey) + + startTime := sr.GetRoundTimeStamp() + maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 + remainingTimeInCurrentRound := func() time.Duration { + return sr.RoundHandler().RemainingTime(startTime, maxTime) + } + + metricStatTime := time.Now() + defer sr.computeSubroundProcessingMetric(metricStatTime, common.MetricProcessedProposedBlock) + + err := sr.BlockProcessor().ProcessBlock( + sr.GetHeader(), + sr.GetBody(), + remainingTimeInCurrentRound, + ) + + if cnsDta.RoundIndex < sr.RoundHandler().Index() { + log.Debug("canceled round, round index has been changed", + "round", sr.RoundHandler().Index(), + "subround", sr.Name(), + "cnsDta round", cnsDta.RoundIndex, + ) + return false + } + + if err != nil { + sr.printCancelRoundLogMessage(ctx, err) + sr.SetRoundCanceled(true) + + return false + } + + err = sr.SetJobDone(node, sr.Current(), true) + if err != nil { + sr.printCancelRoundLogMessage(ctx, err) + return false + } + + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(sr.GetHeader(), sr.GetBody(), sr.GetRoundTimeStamp()) + + return true +} + +func (sr *subroundBlock) printCancelRoundLogMessage(ctx context.Context, err error) { + if common.IsContextDone(ctx) { + log.Debug("canceled round as the context is closing") + return + } + + log.Debug("canceled round", + "round", sr.RoundHandler().Index(), + "subround", sr.Name(), + "error", err.Error()) +} + +func (sr *subroundBlock) computeSubroundProcessingMetric(startTime time.Time, metric string) { + subRoundDuration := sr.EndTime() - sr.StartTime() + if subRoundDuration == 0 { + // can not do division by 0 + return + } + + percent := uint64(time.Since(startTime)) * 100 / uint64(subRoundDuration) + sr.AppStatusHandler().SetUInt64Value(metric, percent) +} + +// doBlockConsensusCheck method checks if the consensus in the subround Block is achieved +func (sr *subroundBlock) doBlockConsensusCheck() bool { + if sr.GetRoundCanceled() { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + return true + } + + threshold := sr.Threshold(sr.Current()) + if sr.isBlockReceived(threshold) { + log.Debug("step 1: subround has been finished", + "subround", sr.Name()) + sr.SetStatus(sr.Current(), spos.SsFinished) + return true + } + + return false +} + +// isBlockReceived method checks if the block was received from the leader in the current round +func (sr *subroundBlock) isBlockReceived(threshold int) bool { + n := 0 + + for i := 0; i < len(sr.ConsensusGroup()); i++ { + node := sr.ConsensusGroup()[i] + isJobDone, err := sr.JobDone(node, sr.Current()) + if err != nil { + log.Debug("isBlockReceived.JobDone", + "node", node, + "subround", sr.Name(), + "error", err.Error()) + continue + } + + if isJobDone { + n++ + } + } + + return n >= threshold +} + +func (sr *subroundBlock) getRoundInLastCommittedBlock() int64 { + roundInLastCommittedBlock := int64(0) + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if !check.IfNil(currentHeader) { + roundInLastCommittedBlock = int64(currentHeader.GetRound()) + } + + return roundInLastCommittedBlock +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sr *subroundBlock) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/v1/subroundBlock_test.go b/consensus/spos/bls/v1/subroundBlock_test.go new file mode 100644 index 00000000000..e0d4690021d --- /dev/null +++ b/consensus/spos/bls/v1/subroundBlock_test.go @@ -0,0 +1,1141 @@ +package v1_test + +import ( + "errors" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + "github.com/multiversx/mx-chain-go/testscommon" + consensusMock "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func defaultSubroundForSRBlock(consensusState *spos.ConsensusState, ch chan bool, + container *consensusMock.ConsensusCoreMock, appStatusHandler core.AppStatusHandler) (*spos.Subround, error) { + return spos.NewSubround( + bls.SrStartRound, + bls.SrBlock, + bls.SrSignature, + int64(5*roundTimeDuration/100), + int64(25*roundTimeDuration/100), + "(BLOCK)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) +} + +func createDefaultHeader() *block.Header { + return &block.Header{ + Nonce: 1, + PrevHash: []byte("prev hash"), + PrevRandSeed: []byte("prev rand seed"), + RandSeed: []byte("rand seed"), + RootHash: []byte("roothash"), + TxCount: 0, + ChainID: []byte("chain ID"), + SoftwareVersion: []byte("software version"), + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } +} + +func defaultSubroundBlockFromSubround(sr *spos.Subround) (v1.SubroundBlock, error) { + srBlock, err := v1.NewSubroundBlock( + sr, + extend, + v1.ProcessingThresholdPercent, + ) + + return srBlock, err +} + +func defaultSubroundBlockWithoutErrorFromSubround(sr *spos.Subround) v1.SubroundBlock { + srBlock, _ := v1.NewSubroundBlock( + sr, + extend, + v1.ProcessingThresholdPercent, + ) + + return srBlock +} + +func initSubroundBlock( + blockChain data.ChainHandler, + container *consensusMock.ConsensusCoreMock, + appStatusHandler core.AppStatusHandler, +) v1.SubroundBlock { + if blockChain == nil { + blockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{} + }, + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Nonce: uint64(0), + Signature: []byte("genesis signature"), + RandSeed: []byte{0}, + } + }, + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis header hash") + }, + } + } + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + container.SetBlockchain(blockChain) + + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, appStatusHandler) + srBlock, _ := defaultSubroundBlockFromSubround(sr) + return srBlock +} + +func createConsensusContainers() []*consensusMock.ConsensusCoreMock { + consensusContainers := make([]*consensusMock.ConsensusCoreMock, 0) + container := consensusMock.InitConsensusCore() + consensusContainers = append(consensusContainers, container) + container = consensusMock.InitConsensusCoreHeaderV2() + consensusContainers = append(consensusContainers, container) + return consensusContainers +} + +func initSubroundBlockWithBlockProcessor( + bp *testscommon.BlockProcessorStub, + container *consensusMock.ConsensusCoreMock, +) v1.SubroundBlock { + blockChain := &testscommon.ChainHandlerStub{ + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Nonce: uint64(0), + Signature: []byte("genesis signature"), + } + }, + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis header hash") + }, + } + blockProcessorMock := bp + + container.SetBlockchain(blockChain) + container.SetBlockProcessor(blockProcessorMock) + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + srBlock, _ := defaultSubroundBlockFromSubround(sr) + return srBlock +} + +func TestSubroundBlock_NewSubroundBlockNilSubroundShouldFail(t *testing.T) { + t.Parallel() + + srBlock, err := v1.NewSubroundBlock( + nil, + extend, + v1.ProcessingThresholdPercent, + ) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilSubround, err) +} + +func TestSubroundBlock_NewSubroundBlockNilBlockchainShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetBlockchain(nil) + + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilBlockChain, err) +} + +func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetBlockProcessor(nil) + + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilBlockProcessor, err) +} + +func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + sr.ConsensusStateHandler = nil + + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestSubroundBlock_NewSubroundBlockNilHasherShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetHasher(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilHasher, err) +} + +func TestSubroundBlock_NewSubroundBlockNilMarshalizerShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetMarshalizer(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilMarshalizer, err) +} + +func TestSubroundBlock_NewSubroundBlockNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetMultiSignerContainer(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestSubroundBlock_NewSubroundBlockNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetRoundHandler(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestSubroundBlock_NewSubroundBlockNilShardCoordinatorShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetShardCoordinator(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilShardCoordinator, err) +} + +func TestSubroundBlock_NewSubroundBlockNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + + container.SetSyncTimer(nil) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.Nil(t, srBlock) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestSubroundBlock_NewSubroundBlockShouldWork(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + srBlock, err := defaultSubroundBlockFromSubround(sr) + assert.NotNil(t, srBlock) + assert.Nil(t, err) +} + +func TestSubroundBlock_DoBlockJob(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + r := sr.DoBlockJob() + assert.False(t, r) + + sr.SetSelfPubKey(sr.Leader()) + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrBlock, true) + r = sr.DoBlockJob() + assert.False(t, r) + + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrBlock, false) + sr.SetStatus(bls.SrBlock, spos.SsFinished) + r = sr.DoBlockJob() + assert.False(t, r) + + sr.SetStatus(bls.SrBlock, spos.SsNotFinished) + bpm := &testscommon.BlockProcessorStub{} + err := errors.New("error") + bpm.CreateBlockCalled = func(header data.HeaderHandler, remainingTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return header, nil, err + } + container.SetBlockProcessor(bpm) + r = sr.DoBlockJob() + assert.False(t, r) + + bpm = consensusMock.InitBlockProcessorMock(container.Marshalizer()) + container.SetBlockProcessor(bpm) + bm := &consensusMock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + return nil + }, + } + container.SetBroadcastMessenger(bm) + container.SetRoundHandler(&consensusMock.RoundHandlerMock{ + RoundIndex: 1, + }) + r = sr.DoBlockJob() + assert.True(t, r) + assert.Equal(t, uint64(1), sr.GetHeader().GetNonce()) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) + + sr.SetData([]byte("some data")) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[1]), bls.MtBlockBodyAndHeader) + + sr.SetData(nil) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) + + sr.SetData(nil) + _ = sr.SetJobDone(sr.Leader(), bls.SrBlock, true) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + blProc := consensusMock.InitBlockProcessorMock(container.Marshalizer()) + blProc.DecodeBlockHeaderCalled = func(dta []byte) data.HeaderHandler { + // error decoding so return nil + return nil + } + container.SetBlockProcessor(blProc) + + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) + + sr.SetData(nil) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) + + sr.SetData(nil) + sr.SetBody(&block.Body{}) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + hdr := &block.Header{Nonce: 1} + blkBody := &block.Body{} + + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.Leader()), bls.MtBlockBodyAndHeader) + + sr.SetData(nil) + sr.SetHeader(&block.Header{Nonce: 1}) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.False(t, r) +} + +func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { + t.Parallel() + + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + + t.Run("block is valid", func(t *testing.T) { + hdr := createDefaultHeader() + blkBody := &block.Body{} + leader, err := sr.GetLeader() + require.Nil(t, err) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) + sr.SetData(nil) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.True(t, r) + }) + t.Run("block is not valid", func(t *testing.T) { + hdr := &block.Header{ + Nonce: 1, + } + blkBody := &block.Body{} + leader, err := sr.GetLeader() + require.Nil(t, err) + cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) + sr.SetData(nil) + r := sr.ReceivedBlockBodyAndHeader(cnsMsg) + assert.False(t, r) + }) +} + +func createConsensusMessage(header *block.Header, body *block.Body, leader []byte, topic consensus.MessageType) *consensus.Message { + marshaller := &marshallerMock.MarshalizerMock{} + hasher := &hashingMocks.HasherMock{} + + hdrStr, _ := marshaller.Marshal(header) + hdrHash := hasher.Compute(string(hdrStr)) + blkBodyStr, _ := marshaller.Marshal(body) + + return consensus.NewConsensusMessage( + hdrHash, + nil, + blkBodyStr, + hdrStr, + leader, + []byte("sig"), + int(topic), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) +} + +func TestSubroundBlock_ReceivedBlock(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + blockProcessorMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) + blkBody := &block.Body{} + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, err := sr.GetLeader() + assert.Nil(t, err) + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(leader), + []byte("sig"), + int(bls.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.SetBody(&block.Body{}) + r := sr.ReceivedBlockBody(cnsMsg) + assert.False(t, r) + + sr.SetBody(nil) + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) + r = sr.ReceivedBlockBody(cnsMsg) + assert.False(t, r) + + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[0]) + sr.SetStatus(bls.SrBlock, spos.SsFinished) + r = sr.ReceivedBlockBody(cnsMsg) + assert.False(t, r) + + sr.SetStatus(bls.SrBlock, spos.SsNotFinished) + r = sr.ReceivedBlockBody(cnsMsg) + assert.False(t, r) + + hdr := createDefaultHeader() + hdr.Nonce = 2 + hdrStr, _ := container.Marshalizer().Marshal(hdr) + hdrHash := (&hashingMocks.HasherMock{}).Compute(string(hdrStr)) + cnsMsg = consensus.NewConsensusMessage( + hdrHash, + nil, + nil, + hdrStr, + []byte(leader), + []byte("sig"), + int(bls.MtBlockHeader), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + r = sr.ReceivedBlockHeader(cnsMsg) + assert.False(t, r) + + sr.SetData(nil) + sr.SetHeader(hdr) + r = sr.ReceivedBlockHeader(cnsMsg) + assert.False(t, r) + + sr.SetHeader(nil) + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) + r = sr.ReceivedBlockHeader(cnsMsg) + assert.False(t, r) + + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[0]) + sr.SetStatus(bls.SrBlock, spos.SsFinished) + r = sr.ReceivedBlockHeader(cnsMsg) + assert.False(t, r) + + sr.SetStatus(bls.SrBlock, spos.SsNotFinished) + container.SetBlockProcessor(blockProcessorMock) + sr.SetData(nil) + sr.SetHeader(nil) + hdr = createDefaultHeader() + hdr.Nonce = 1 + hdrStr, _ = marshallerMock.MarshalizerMock{}.Marshal(hdr) + hdrHash = (&hashingMocks.HasherMock{}).Compute(string(hdrStr)) + cnsMsg.BlockHeaderHash = hdrHash + cnsMsg.Header = hdrStr + r = sr.ReceivedBlockHeader(cnsMsg) + assert.True(t, r) +} + +func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAreNotSet(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + leader, _ := sr.GetLeader() + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + nil, + nil, + []byte(leader), + []byte("sig"), + int(bls.MtBlockBodyAndHeader), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) +} + +func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFails(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + blProcMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) + err := errors.New("error process block") + blProcMock.ProcessBlockCalled = func(data.HeaderHandler, data.BodyHandler, func() time.Duration) error { + return err + } + container.SetBlockProcessor(blProcMock) + hdr := &block.Header{} + blkBody := &block.Body{} + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, _ := sr.GetLeader() + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(leader), + []byte("sig"), + int(bls.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.SetHeader(hdr) + sr.SetBody(blkBody) + assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) +} + +func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockReturnsInNextRound(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + hdr := &block.Header{} + blkBody := &block.Body{} + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, _ := sr.GetLeader() + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(leader), + []byte("sig"), + int(bls.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.SetHeader(hdr) + sr.SetBody(blkBody) + blockProcessorMock := consensusMock.InitBlockProcessorMock(container.Marshalizer()) + blockProcessorMock.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { + return errors.New("error") + } + container.SetBlockProcessor(blockProcessorMock) + container.SetRoundHandler(&consensusMock.RoundHandlerMock{RoundIndex: 1}) + assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) +} + +func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { + t.Parallel() + + consensusContainers := createConsensusContainers() + for _, container := range consensusContainers { + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + hdr, _ := container.BlockProcessor().CreateNewHeader(1, 1) + hdr, blkBody, _ := container.BlockProcessor().CreateBlock(hdr, func() bool { return true }) + + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + leader, _ := sr.GetLeader() + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(leader), + []byte("sig"), + int(bls.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.SetHeader(hdr) + sr.SetBody(blkBody) + assert.True(t, sr.ProcessReceivedBlock(cnsMsg)) + } +} + +func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + roundHandlerMock := initRoundHandlerMock() + container.SetRoundHandler(roundHandlerMock) + + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + remainingTimeInThisRound := func() time.Duration { + roundStartTime := sr.RoundHandler().TimeStamp() + currentTime := sr.SyncTimer().CurrentTime() + elapsedTime := currentTime.Sub(roundStartTime) + remainingTime := sr.RoundHandler().TimeDuration()*85/100 - elapsedTime + + return remainingTime + } + container.SetSyncTimer(&consensusMock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + return time.Unix(0, 0).Add(roundTimeDuration * 84 / 100) + }}) + ret := remainingTimeInThisRound() + assert.True(t, ret > 0) + + container.SetSyncTimer(&consensusMock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + return time.Unix(0, 0).Add(roundTimeDuration * 85 / 100) + }}) + ret = remainingTimeInThisRound() + assert.True(t, ret == 0) + + container.SetSyncTimer(&consensusMock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + return time.Unix(0, 0).Add(roundTimeDuration * 86 / 100) + }}) + ret = remainingTimeInThisRound() + assert.True(t, ret < 0) +} + +func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr.SetRoundCanceled(true) + assert.False(t, sr.DoBlockConsensusCheck()) +} + +func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr.SetStatus(bls.SrBlock, spos.SsFinished) + assert.True(t, sr.DoBlockConsensusCheck()) +} + +func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedReturnTrue(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + for i := 0; i < sr.Threshold(bls.SrBlock); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, true) + } + assert.True(t, sr.DoBlockConsensusCheck()) +} + +func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenBlockIsReceivedReturnFalse(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + assert.False(t, sr.DoBlockConsensusCheck()) +} + +func TestSubroundBlock_IsBlockReceived(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + for i := 0; i < len(sr.ConsensusGroup()); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, false) + } + ok := sr.IsBlockReceived(1) + assert.False(t, ok) + + _ = sr.SetJobDone("A", bls.SrBlock, true) + isJobDone, _ := sr.JobDone("A", bls.SrBlock) + assert.True(t, isJobDone) + + ok = sr.IsBlockReceived(1) + assert.True(t, ok) + + ok = sr.IsBlockReceived(2) + assert.False(t, ok) +} + +func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + haveTimeInCurrentSubound := func() bool { + roundStartTime := sr.RoundHandler().TimeStamp() + currentTime := sr.SyncTimer().CurrentTime() + elapsedTime := currentTime.Sub(roundStartTime) + remainingTime := sr.EndTime() - int64(elapsedTime) + + return time.Duration(remainingTime) > 0 + } + roundHandlerMock := &consensusMock.RoundHandlerMock{} + roundHandlerMock.TimeDurationCalled = func() time.Duration { + return 4000 * time.Millisecond + } + roundHandlerMock.TimeStampCalled = func() time.Time { + return time.Unix(0, 0) + } + syncTimerMock := &consensusMock.SyncTimerMock{} + timeElapsed := sr.EndTime() - 1 + syncTimerMock.CurrentTimeCalled = func() time.Time { + return time.Unix(0, timeElapsed) + } + container.SetRoundHandler(roundHandlerMock) + container.SetSyncTimer(syncTimerMock) + + assert.True(t, haveTimeInCurrentSubound()) +} + +func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { + t.Parallel() + container := consensusMock.InitConsensusCore() + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + haveTimeInCurrentSubound := func() bool { + roundStartTime := sr.RoundHandler().TimeStamp() + currentTime := sr.SyncTimer().CurrentTime() + elapsedTime := currentTime.Sub(roundStartTime) + remainingTime := sr.EndTime() - int64(elapsedTime) + + return time.Duration(remainingTime) > 0 + } + roundHandlerMock := &consensusMock.RoundHandlerMock{} + roundHandlerMock.TimeDurationCalled = func() time.Duration { + return 4000 * time.Millisecond + } + roundHandlerMock.TimeStampCalled = func() time.Time { + return time.Unix(0, 0) + } + syncTimerMock := &consensusMock.SyncTimerMock{} + timeElapsed := sr.EndTime() + 1 + syncTimerMock.CurrentTimeCalled = func() time.Time { + return time.Unix(0, timeElapsed) + } + container.SetRoundHandler(roundHandlerMock) + container.SetSyncTimer(syncTimerMock) + + assert.False(t, haveTimeInCurrentSubound()) +} + +func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { + blockChain := &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return nil + }, + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Nonce: uint64(0), + Signature: []byte("genesis signature"), + RandSeed: []byte{0}, + } + }, + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis header hash") + }, + } + + consensusContainers := createConsensusContainers() + for _, container := range consensusContainers { + sr := initSubroundBlock(blockChain, container, &statusHandler.AppStatusHandlerStub{}) + _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(nil, nil) + header, _ := sr.CreateHeader() + header, body, _ := sr.CreateBlock(header) + marshalizedBody, _ := sr.Marshalizer().Marshal(body) + marshalizedHeader, _ := sr.Marshalizer().Marshal(header) + _ = sr.SendBlockBody(body, marshalizedBody) + _ = sr.SendBlockHeader(header, marshalizedHeader) + + expectedHeader, _ := container.BlockProcessor().CreateNewHeader(uint64(sr.RoundHandler().Index()), uint64(1)) + err := expectedHeader.SetTimeStamp(uint64(sr.RoundHandler().TimeStamp().Unix())) + require.Nil(t, err) + err = expectedHeader.SetRootHash([]byte{}) + require.Nil(t, err) + err = expectedHeader.SetPrevHash(sr.BlockChain().GetGenesisHeaderHash()) + require.Nil(t, err) + err = expectedHeader.SetPrevRandSeed(sr.BlockChain().GetGenesisHeader().GetRandSeed()) + require.Nil(t, err) + err = expectedHeader.SetRandSeed(make([]byte, 0)) + require.Nil(t, err) + err = expectedHeader.SetMiniBlockHeaderHandlers(header.GetMiniBlockHeaderHandlers()) + require.Nil(t, err) + err = expectedHeader.SetChainID(chainID) + require.Nil(t, err) + require.Equal(t, expectedHeader, header) + } +} + +func TestSubroundBlock_CreateHeaderNotNilCurrentHeader(t *testing.T) { + consensusContainers := createConsensusContainers() + for _, container := range consensusContainers { + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ + Nonce: 1, + }, []byte("root hash")) + + header, _ := sr.CreateHeader() + header, body, _ := sr.CreateBlock(header) + marshalizedBody, _ := sr.Marshalizer().Marshal(body) + marshalizedHeader, _ := sr.Marshalizer().Marshal(header) + _ = sr.SendBlockBody(body, marshalizedBody) + _ = sr.SendBlockHeader(header, marshalizedHeader) + + expectedHeader, _ := container.BlockProcessor().CreateNewHeader( + uint64(sr.RoundHandler().Index()), + sr.BlockChain().GetCurrentBlockHeader().GetNonce()+1) + err := expectedHeader.SetTimeStamp(uint64(sr.RoundHandler().TimeStamp().Unix())) + require.Nil(t, err) + err = expectedHeader.SetRootHash([]byte{}) + require.Nil(t, err) + err = expectedHeader.SetPrevHash(sr.BlockChain().GetCurrentBlockHeaderHash()) + require.Nil(t, err) + err = expectedHeader.SetRandSeed(make([]byte, 0)) + require.Nil(t, err) + err = expectedHeader.SetMiniBlockHeaderHandlers(header.GetMiniBlockHeaderHandlers()) + require.Nil(t, err) + err = expectedHeader.SetChainID(chainID) + require.Nil(t, err) + require.Equal(t, expectedHeader, header) + } +} + +func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { + mbHeaders := []block.MiniBlockHeader{ + {Hash: []byte("mb1"), SenderShardID: 1, ReceiverShardID: 1}, + {Hash: []byte("mb2"), SenderShardID: 1, ReceiverShardID: 2}, + {Hash: []byte("mb3"), SenderShardID: 2, ReceiverShardID: 3}, + } + blockChainMock := testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Nonce: 1, + } + }, + } + container := consensusMock.InitConsensusCore() + bp := consensusMock.InitBlockProcessorMock(container.Marshalizer()) + bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + shardHeader, _ := header.(*block.Header) + shardHeader.MiniBlockHeaders = mbHeaders + shardHeader.RootHash = []byte{} + + return shardHeader, &block.Body{}, nil + } + sr := initSubroundBlockWithBlockProcessor(bp, container) + container.SetBlockchain(&blockChainMock) + + header, _ := sr.CreateHeader() + header, body, _ := sr.CreateBlock(header) + marshalizedBody, _ := sr.Marshalizer().Marshal(body) + marshalizedHeader, _ := sr.Marshalizer().Marshal(header) + _ = sr.SendBlockBody(body, marshalizedBody) + _ = sr.SendBlockHeader(header, marshalizedHeader) + + expectedHeader := &block.Header{ + Round: uint64(sr.RoundHandler().Index()), + TimeStamp: uint64(sr.RoundHandler().TimeStamp().Unix()), + RootHash: []byte{}, + Nonce: sr.BlockChain().GetCurrentBlockHeader().GetNonce() + 1, + PrevHash: sr.BlockChain().GetCurrentBlockHeaderHash(), + RandSeed: make([]byte, 0), + MiniBlockHeaders: mbHeaders, + ChainID: chainID, + } + + assert.Equal(t, expectedHeader, header) +} + +func TestSubroundBlock_CreateHeaderNilMiniBlocks(t *testing.T) { + expectedErr := errors.New("nil mini blocks") + container := consensusMock.InitConsensusCore() + bp := consensusMock.InitBlockProcessorMock(container.Marshalizer()) + bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return nil, nil, expectedErr + } + sr := initSubroundBlockWithBlockProcessor(bp, container) + _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ + Nonce: 1, + }, []byte("root hash")) + header, _ := sr.CreateHeader() + _, _, err := sr.CreateBlock(header) + assert.Equal(t, expectedErr, err) +} + +func TestSubroundBlock_CallFuncRemainingTimeWithStructShouldWork(t *testing.T) { + roundStartTime := time.Now() + maxTime := 100 * time.Millisecond + newRoundStartTime := roundStartTime + remainingTimeInCurrentRound := func() time.Duration { + return RemainingTimeWithStruct(newRoundStartTime, maxTime) + } + assert.True(t, remainingTimeInCurrentRound() > 0) + + time.Sleep(200 * time.Millisecond) + assert.True(t, remainingTimeInCurrentRound() < 0) +} + +func TestSubroundBlock_CallFuncRemainingTimeWithStructShouldNotWork(t *testing.T) { + roundStartTime := time.Now() + maxTime := 100 * time.Millisecond + remainingTimeInCurrentRound := func() time.Duration { + return RemainingTimeWithStruct(roundStartTime, maxTime) + } + assert.True(t, remainingTimeInCurrentRound() > 0) + + time.Sleep(200 * time.Millisecond) + assert.True(t, remainingTimeInCurrentRound() < 0) + + roundStartTime = roundStartTime.Add(500 * time.Millisecond) + assert.False(t, remainingTimeInCurrentRound() < 0) +} + +func RemainingTimeWithStruct(startTime time.Time, maxTime time.Duration) time.Duration { + currentTime := time.Now() + elapsedTime := currentTime.Sub(startTime) + remainingTime := maxTime - elapsedTime + return remainingTime +} + +func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { + t.Parallel() + + srStartTime := int64(5 * roundTimeDuration / 100) + srEndTime := int64(25 * roundTimeDuration / 100) + srDuration := srEndTime - srStartTime + delay := srDuration * 430 / 1000 + + container := consensusMock.InitConsensusCore() + receivedValue := uint64(0) + container.SetBlockProcessor(&testscommon.BlockProcessorStub{ + ProcessBlockCalled: func(_ data.HeaderHandler, _ data.BodyHandler, _ func() time.Duration) error { + time.Sleep(time.Duration(delay)) + return nil + }, + }) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + receivedValue = value + }}) + hdr := &block.Header{} + blkBody := &block.Body{} + blkBodyStr, _ := marshallerMock.MarshalizerMock{}.Marshal(blkBody) + + leader, err := sr.GetLeader() + assert.Nil(t, err) + cnsMsg := consensus.NewConsensusMessage( + nil, + nil, + blkBodyStr, + nil, + []byte(leader), + []byte("sig"), + int(bls.MtBlockBody), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + sr.SetHeader(hdr) + sr.SetBody(blkBody) + + minimumExpectedValue := uint64(delay * 100 / srDuration) + _ = sr.ProcessReceivedBlock(cnsMsg) + + assert.True(t, + receivedValue >= minimumExpectedValue, + fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), + ) +} + +func TestSubroundBlock_ReceivedBlockComputeProcessDurationWithZeroDurationShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not have paniced", r) + } + }() + + container := consensusMock.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) + srBlock := defaultSubroundBlockWithoutErrorFromSubround(sr) + + srBlock.ComputeSubroundProcessingMetric(time.Now(), "dummy") +} diff --git a/consensus/spos/bls/v1/subroundEndRound.go b/consensus/spos/bls/v1/subroundEndRound.go new file mode 100644 index 00000000000..c591c736aca --- /dev/null +++ b/consensus/spos/bls/v1/subroundEndRound.go @@ -0,0 +1,948 @@ +package v1 + +import ( + "bytes" + "context" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/display" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process/headerCheck" +) + +type subroundEndRound struct { + *spos.Subround + processingThresholdPercentage int + displayStatistics func() + appStatusHandler core.AppStatusHandler + mutProcessingEndRound sync.Mutex + sentSignatureTracker spos.SentSignaturesTracker +} + +// NewSubroundEndRound creates a subroundEndRound object +func NewSubroundEndRound( + baseSubround *spos.Subround, + extend func(subroundId int), + processingThresholdPercentage int, + displayStatistics func(), + appStatusHandler core.AppStatusHandler, + sentSignatureTracker spos.SentSignaturesTracker, +) (*subroundEndRound, error) { + err := checkNewSubroundEndRoundParams( + baseSubround, + ) + if err != nil { + return nil, err + } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if check.IfNil(appStatusHandler) { + return nil, spos.ErrNilAppStatusHandler + } + if check.IfNil(sentSignatureTracker) { + return nil, ErrNilSentSignatureTracker + } + + srEndRound := subroundEndRound{ + Subround: baseSubround, + processingThresholdPercentage: processingThresholdPercentage, + displayStatistics: displayStatistics, + appStatusHandler: appStatusHandler, + mutProcessingEndRound: sync.Mutex{}, + sentSignatureTracker: sentSignatureTracker, + } + srEndRound.Job = srEndRound.doEndRoundJob + srEndRound.Check = srEndRound.doEndRoundConsensusCheck + srEndRound.Extend = extend + + return &srEndRound, nil +} + +func checkNewSubroundEndRoundParams( + baseSubround *spos.Subround, +) error { + if baseSubround == nil { + return spos.ErrNilSubround + } + if check.IfNil(baseSubround.ConsensusStateHandler) { + return spos.ErrNilConsensusState + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err +} + +// receivedBlockHeaderFinalInfo method is called when a block header final info is received +func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsDta *consensus.Message) bool { + node := string(cnsDta.PubKey) + + if !sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeLeaderInCurrentRound(node) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { + return false + } + + if !sr.IsConsensusDataEqual(cnsDta.BlockHeaderHash) { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + if !sr.isBlockHeaderFinalInfoValid(cnsDta) { + return false + } + + log.Debug("step 3: block header final info has been received", + "PubKeysBitmap", cnsDta.PubKeysBitmap, + "AggregateSignature", cnsDta.AggregateSignature, + "LeaderSignature", cnsDta.LeaderSignature) + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return sr.doEndRoundJobByParticipant(cnsDta) +} + +func (sr *subroundEndRound) isBlockHeaderFinalInfoValid(cnsDta *consensus.Message) bool { + if check.IfNil(sr.GetHeader()) { + return false + } + + header := sr.GetHeader().ShallowClone() + err := header.SetPubKeysBitmap(cnsDta.PubKeysBitmap) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.SetPubKeysBitmap", "error", err.Error()) + return false + } + + err = header.SetSignature(cnsDta.AggregateSignature) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.SetSignature", "error", err.Error()) + return false + } + + err = header.SetLeaderSignature(cnsDta.LeaderSignature) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.SetLeaderSignature", "error", err.Error()) + return false + } + + err = sr.HeaderSigVerifier().VerifyLeaderSignature(header) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.VerifyLeaderSignature", "error", err.Error()) + return false + } + + err = sr.HeaderSigVerifier().VerifySignature(header) + if err != nil { + log.Debug("isBlockHeaderFinalInfoValid.VerifySignature", "error", err.Error()) + return false + } + + return true +} + +// receivedInvalidSignersInfo method is called when a message with invalid signers has been received +func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta *consensus.Message) bool { + messageSender := string(cnsDta.PubKey) + + if !sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeLeaderInCurrentRound(messageSender) { // is NOT this node leader in current round? + sr.PeerHonestyHandler().ChangeScore( + messageSender, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyDecreaseFactor, + ) + + return false + } + + if sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { + return false + } + + if !sr.IsConsensusDataEqual(cnsDta.BlockHeaderHash) { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + if len(cnsDta.InvalidSigners) == 0 { + return false + } + + err := sr.verifyInvalidSigners(cnsDta.InvalidSigners) + if err != nil { + log.Trace("receivedInvalidSignersInfo.verifyInvalidSigners", "error", err.Error()) + return false + } + + log.Debug("step 3: invalid signers info has been evaluated") + + sr.PeerHonestyHandler().ChangeScore( + messageSender, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.LeaderPeerHonestyIncreaseFactor, + ) + + return true +} + +func (sr *subroundEndRound) verifyInvalidSigners(invalidSigners []byte) error { + messages, err := sr.MessageSigningHandler().Deserialize(invalidSigners) + if err != nil { + return err + } + + for _, msg := range messages { + err = sr.verifyInvalidSigner(msg) + if err != nil { + return err + } + } + + return nil +} + +func (sr *subroundEndRound) verifyInvalidSigner(msg p2p.MessageP2P) error { + err := sr.MessageSigningHandler().Verify(msg) + if err != nil { + return err + } + + cnsMsg := &consensus.Message{} + err = sr.Marshalizer().Unmarshal(cnsMsg, msg.Data()) + if err != nil { + return err + } + + err = sr.SigningHandler().VerifySingleSignature(cnsMsg.PubKey, cnsMsg.BlockHeaderHash, cnsMsg.SignatureShare) + if err != nil { + log.Debug("verifyInvalidSigner: confirmed that node provided invalid signature", + "pubKey", cnsMsg.PubKey, + "blockHeaderHash", cnsMsg.BlockHeaderHash, + "error", err.Error(), + ) + sr.applyBlacklistOnNode(msg.Peer()) + } + + return nil +} + +func (sr *subroundEndRound) applyBlacklistOnNode(peer core.PeerID) { + sr.PeerBlacklistHandler().BlacklistPeer(peer, common.InvalidSigningBlacklistDuration) +} + +func (sr *subroundEndRound) receivedHeader(headerHandler data.HeaderHandler) { + if sr.ConsensusGroup() == nil || sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { + return + } + + sr.AddReceivedHeader(headerHandler) + + sr.doEndRoundJobByParticipant(nil) +} + +// doEndRoundJob method does the job of the subround EndRound +func (sr *subroundEndRound) doEndRoundJob(_ context.Context) bool { + if !sr.IsSelfLeaderInCurrentRound() && !sr.IsMultiKeyLeaderInCurrentRound() { + if sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || sr.IsMultiKeyInConsensusGroup() { + err := sr.prepareBroadcastBlockDataForValidator() + if err != nil { + log.Warn("validator in consensus group preparing for delayed broadcast", + "error", err.Error()) + } + } + + return sr.doEndRoundJobByParticipant(nil) + } + + return sr.doEndRoundJobByLeader() +} + +func (sr *subroundEndRound) doEndRoundJobByLeader() bool { + bitmap := sr.GenerateBitmap(bls.SrSignature) + err := sr.checkSignaturesValidity(bitmap) + if err != nil { + log.Debug("doEndRoundJobByLeader.checkSignaturesValidity", "error", err.Error()) + return false + } + + header := sr.GetHeader() + if check.IfNil(header) { + log.Error("doEndRoundJobByLeader.CheckNilHeader", "error", spos.ErrNilHeader) + return false + } + + // Aggregate sig and add it to the block + bitmap, sig, err := sr.aggregateSigsAndHandleInvalidSigners(bitmap) + if err != nil { + log.Debug("doEndRoundJobByLeader.aggregateSigsAndHandleInvalidSigners", "error", err.Error()) + return false + } + + err = header.SetPubKeysBitmap(bitmap) + if err != nil { + log.Debug("doEndRoundJobByLeader.SetPubKeysBitmap", "error", err.Error()) + return false + } + + err = header.SetSignature(sig) + if err != nil { + log.Debug("doEndRoundJobByLeader.SetSignature", "error", err.Error()) + return false + } + + // Header is complete so the leader can sign it + leaderSignature, err := sr.signBlockHeader() + if err != nil { + log.Error(err.Error()) + return false + } + + err = header.SetLeaderSignature(leaderSignature) + if err != nil { + log.Debug("doEndRoundJobByLeader.SetLeaderSignature", "error", err.Error()) + return false + } + + ok := sr.ScheduledProcessor().IsProcessedOKWithTimeout() + // placeholder for subroundEndRound.doEndRoundJobByLeader script + if !ok { + return false + } + + roundHandler := sr.RoundHandler() + if roundHandler.RemainingTime(roundHandler.TimeStamp(), roundHandler.TimeDuration()) < 0 { + log.Debug("doEndRoundJob: time is out -> cancel broadcasting final info and header", + "round time stamp", roundHandler.TimeStamp(), + "current time", time.Now()) + return false + } + + // broadcast header and final info section + + sr.createAndBroadcastHeaderFinalInfo() + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("doEndRoundJobByLeader.GetLeader", "error", errGetLeader) + return false + } + + // broadcast header + err = sr.BroadcastMessenger().BroadcastHeader(header, []byte(leader)) + if err != nil { + log.Debug("doEndRoundJobByLeader.BroadcastHeader", "error", err.Error()) + } + + startTime := time.Now() + err = sr.BlockProcessor().CommitBlock(header, sr.GetBody()) + elapsedTime := time.Since(startTime) + if elapsedTime >= common.CommitMaxTime { + log.Warn("doEndRoundJobByLeader.CommitBlock", "elapsed time", elapsedTime) + } else { + log.Debug("elapsed time to commit block", + "time [s]", elapsedTime, + ) + } + if err != nil { + log.Debug("doEndRoundJobByLeader.CommitBlock", "error", err) + return false + } + + sr.SetStatus(sr.Current(), spos.SsFinished) + + sr.displayStatistics() + + log.Debug("step 3: Body and Header have been committed and header has been broadcast") + + err = sr.broadcastBlockDataLeader() + if err != nil { + log.Debug("doEndRoundJobByLeader.broadcastBlockDataLeader", "error", err.Error()) + } + + msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", header.GetNonce()) + log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "+")) + + sr.updateMetricsForLeader() + + return true +} + +func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) ([]byte, []byte, error) { + header := sr.GetHeader() + sig, err := sr.SigningHandler().AggregateSigs(bitmap, header.GetEpoch()) + if err != nil { + log.Debug("doEndRoundJobByLeader.AggregateSigs", "error", err.Error()) + + return sr.handleInvalidSignersOnAggSigFail() + } + + err = sr.SigningHandler().SetAggregatedSig(sig) + if err != nil { + log.Debug("doEndRoundJobByLeader.SetAggregatedSig", "error", err.Error()) + return nil, nil, err + } + + err = sr.SigningHandler().Verify(sr.GetData(), bitmap, header.GetEpoch()) + if err != nil { + log.Debug("doEndRoundJobByLeader.Verify", "error", err.Error()) + + return sr.handleInvalidSignersOnAggSigFail() + } + + return bitmap, sig, nil +} + +func (sr *subroundEndRound) verifyNodesOnAggSigFail() ([]string, error) { + invalidPubKeys := make([]string, 0) + pubKeys := sr.ConsensusGroup() + + header := sr.GetHeader() + if check.IfNil(header) { + return nil, spos.ErrNilHeader + } + + for i, pk := range pubKeys { + isJobDone, err := sr.JobDone(pk, bls.SrSignature) + if err != nil || !isJobDone { + continue + } + + sigShare, err := sr.SigningHandler().SignatureShare(uint16(i)) + if err != nil { + return nil, err + } + + isSuccessfull := true + err = sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), header.GetEpoch()) + if err != nil { + isSuccessfull = false + + err = sr.SetJobDone(pk, bls.SrSignature, false) + if err != nil { + return nil, err + } + + // use increase factor since it was added optimistically, and it proved to be wrong + decreaseFactor := -spos.ValidatorPeerHonestyIncreaseFactor + spos.ValidatorPeerHonestyDecreaseFactor + sr.PeerHonestyHandler().ChangeScore( + pk, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + decreaseFactor, + ) + + invalidPubKeys = append(invalidPubKeys, pk) + } + + log.Trace("verifyNodesOnAggSigVerificationFail: verifying signature share", "public key", pk, "is successfull", isSuccessfull) + } + + return invalidPubKeys, nil +} + +func (sr *subroundEndRound) getFullMessagesForInvalidSigners(invalidPubKeys []string) ([]byte, error) { + p2pMessages := make([]p2p.MessageP2P, 0) + + for _, pk := range invalidPubKeys { + p2pMsg, ok := sr.GetMessageWithSignature(pk) + if !ok { + log.Trace("message not found in state for invalid signer", "pubkey", pk) + continue + } + + p2pMessages = append(p2pMessages, p2pMsg) + } + + invalidSigners, err := sr.MessageSigningHandler().Serialize(p2pMessages) + if err != nil { + return nil, err + } + + return invalidSigners, nil +} + +func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, error) { + invalidPubKeys, err := sr.verifyNodesOnAggSigFail() + if err != nil { + log.Debug("doEndRoundJobByLeader.verifyNodesOnAggSigFail", "error", err.Error()) + return nil, nil, err + } + + invalidSigners, err := sr.getFullMessagesForInvalidSigners(invalidPubKeys) + if err != nil { + log.Debug("doEndRoundJobByLeader.getFullMessagesForInvalidSigners", "error", err.Error()) + return nil, nil, err + } + + if len(invalidSigners) > 0 { + sr.createAndBroadcastInvalidSigners(invalidSigners) + } + + bitmap, sig, err := sr.computeAggSigOnValidNodes() + if err != nil { + log.Debug("doEndRoundJobByLeader.computeAggSigOnValidNodes", "error", err.Error()) + return nil, nil, err + } + + return bitmap, sig, nil +} + +func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) { + threshold := sr.Threshold(sr.Current()) + numValidSigShares := sr.ComputeSize(bls.SrSignature) + + header := sr.GetHeader() + if check.IfNil(header) { + return nil, nil, spos.ErrNilHeader + } + + if numValidSigShares < threshold { + return nil, nil, fmt.Errorf("%w: number of valid sig shares lower than threshold, numSigShares: %d, threshold: %d", + spos.ErrInvalidNumSigShares, numValidSigShares, threshold) + } + + bitmap := sr.GenerateBitmap(bls.SrSignature) + err := sr.checkSignaturesValidity(bitmap) + if err != nil { + return nil, nil, err + } + + sig, err := sr.SigningHandler().AggregateSigs(bitmap, header.GetEpoch()) + if err != nil { + return nil, nil, err + } + + err = sr.SigningHandler().SetAggregatedSig(sig) + if err != nil { + return nil, nil, err + } + + return bitmap, sig, nil +} + +func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("createAndBroadcastHeaderFinalInfo.GetLeader", "error", errGetLeader) + return + } + + header := sr.GetHeader() + cnsMsg := consensus.NewConsensusMessage( + sr.GetData(), + nil, + nil, + nil, + []byte(leader), + nil, + int(bls.MtBlockHeaderFinalInfo), + sr.RoundHandler().Index(), + sr.ChainID(), + header.GetPubKeysBitmap(), + header.GetSignature(), + header.GetLeaderSignature(), + sr.GetAssociatedPid([]byte(leader)), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("doEndRoundJob.BroadcastConsensusMessage", "error", err.Error()) + return + } + + log.Debug("step 3: block header final info has been sent", + "PubKeysBitmap", header.GetPubKeysBitmap(), + "AggregateSignature", header.GetSignature(), + "LeaderSignature", header.GetLeaderSignature()) +} + +func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) { + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !(isSelfLeader || sr.IsMultiKeyLeaderInCurrentRound()) { + return + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("createAndBroadcastInvalidSigners.GetLeader", "error", errGetLeader) + return + } + + cnsMsg := consensus.NewConsensusMessage( + sr.GetData(), + nil, + nil, + nil, + []byte(leader), + nil, + int(bls.MtInvalidSigners), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid([]byte(leader)), + invalidSigners, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("doEndRoundJob.BroadcastConsensusMessage", "error", err.Error()) + return + } + + log.Debug("step 3: invalid signers info has been sent") +} + +func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message) bool { + sr.mutProcessingEndRound.Lock() + defer sr.mutProcessingEndRound.Unlock() + + if sr.GetRoundCanceled() { + return false + } + if !sr.IsConsensusDataSet() { + return false + } + if !sr.IsSubroundFinished(sr.Previous()) { + return false + } + if sr.IsSubroundFinished(sr.Current()) { + return false + } + + haveHeader, header := sr.haveConsensusHeaderWithFullInfo(cnsDta) + if !haveHeader { + return false + } + + defer func() { + sr.SetProcessingBlock(false) + }() + + sr.SetProcessingBlock(true) + + shouldNotCommitBlock := sr.GetExtendedCalled() || int64(header.GetRound()) < sr.RoundHandler().Index() + if shouldNotCommitBlock { + log.Debug("canceled round, extended has been called or round index has been changed", + "round", sr.RoundHandler().Index(), + "subround", sr.Name(), + "header round", header.GetRound(), + "extended called", sr.GetExtendedCalled(), + ) + return false + } + + if sr.isOutOfTime() { + return false + } + + ok := sr.ScheduledProcessor().IsProcessedOKWithTimeout() + if !ok { + return false + } + + startTime := time.Now() + err := sr.BlockProcessor().CommitBlock(header, sr.GetBody()) + elapsedTime := time.Since(startTime) + if elapsedTime >= common.CommitMaxTime { + log.Warn("doEndRoundJobByParticipant.CommitBlock", "elapsed time", elapsedTime) + } else { + log.Debug("elapsed time to commit block", + "time [s]", elapsedTime, + ) + } + if err != nil { + log.Debug("doEndRoundJobByParticipant.CommitBlock", "error", err.Error()) + return false + } + + sr.SetStatus(sr.Current(), spos.SsFinished) + + if sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || sr.IsMultiKeyInConsensusGroup() { + err = sr.setHeaderForValidator(header) + if err != nil { + log.Warn("doEndRoundJobByParticipant", "error", err.Error()) + } + } + + sr.displayStatistics() + + log.Debug("step 3: Body and Header have been committed") + + headerTypeMsg := "received" + if cnsDta != nil { + headerTypeMsg = "assembled" + } + + msg := fmt.Sprintf("Added %s block with nonce %d in blockchain", headerTypeMsg, header.GetNonce()) + log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "-")) + return true +} + +func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Message) (bool, data.HeaderHandler) { + if cnsDta == nil { + return sr.isConsensusHeaderReceived() + } + + if check.IfNil(sr.GetHeader()) { + return false, nil + } + + header := sr.GetHeader().ShallowClone() + err := header.SetPubKeysBitmap(cnsDta.PubKeysBitmap) + if err != nil { + return false, nil + } + + err = header.SetSignature(cnsDta.AggregateSignature) + if err != nil { + return false, nil + } + + err = header.SetLeaderSignature(cnsDta.LeaderSignature) + if err != nil { + return false, nil + } + + return true, header +} + +func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandler) { + if check.IfNil(sr.GetHeader()) { + return false, nil + } + + consensusHeaderHash, err := core.CalculateHash(sr.Marshalizer(), sr.Hasher(), sr.GetHeader()) + if err != nil { + log.Debug("isConsensusHeaderReceived: calculate consensus header hash", "error", err.Error()) + return false, nil + } + + receivedHeaders := sr.GetReceivedHeaders() + + var receivedHeaderHash []byte + for index := range receivedHeaders { + receivedHeader := receivedHeaders[index].ShallowClone() + err = receivedHeader.SetLeaderSignature(nil) + if err != nil { + log.Debug("isConsensusHeaderReceived - SetLeaderSignature", "error", err.Error()) + return false, nil + } + + err = receivedHeader.SetPubKeysBitmap(nil) + if err != nil { + log.Debug("isConsensusHeaderReceived - SetPubKeysBitmap", "error", err.Error()) + return false, nil + } + + err = receivedHeader.SetSignature(nil) + if err != nil { + log.Debug("isConsensusHeaderReceived - SetSignature", "error", err.Error()) + return false, nil + } + + receivedHeaderHash, err = core.CalculateHash(sr.Marshalizer(), sr.Hasher(), receivedHeader) + if err != nil { + log.Debug("isConsensusHeaderReceived: calculate received header hash", "error", err.Error()) + return false, nil + } + + if bytes.Equal(receivedHeaderHash, consensusHeaderHash) { + return true, receivedHeaders[index] + } + } + + return false, nil +} + +func (sr *subroundEndRound) signBlockHeader() ([]byte, error) { + headerClone := sr.GetHeader().ShallowClone() + err := headerClone.SetLeaderSignature(nil) + if err != nil { + return nil, err + } + + marshalizedHdr, err := sr.Marshalizer().Marshal(headerClone) + if err != nil { + return nil, err + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + return nil, errGetLeader + } + + return sr.SigningHandler().CreateSignatureForPublicKey(marshalizedHdr, []byte(leader)) +} + +func (sr *subroundEndRound) updateMetricsForLeader() { + sr.appStatusHandler.Increment(common.MetricCountAcceptedBlocks) + sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, + fmt.Sprintf("valid block produced in %f sec", time.Since(sr.RoundHandler().TimeStamp()).Seconds())) +} + +func (sr *subroundEndRound) broadcastBlockDataLeader() error { + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.GetHeader(), sr.GetBody()) + if err != nil { + return err + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("broadcastBlockDataLeader.GetLeader", "error", errGetLeader) + return errGetLeader + } + + return sr.BroadcastMessenger().BroadcastBlockDataLeader(sr.GetHeader(), miniBlocks, transactions, []byte(leader)) +} + +func (sr *subroundEndRound) setHeaderForValidator(header data.HeaderHandler) error { + idx, pk, miniBlocks, transactions, err := sr.getIndexPkAndDataToBroadcast() + if err != nil { + return err + } + + go sr.BroadcastMessenger().PrepareBroadcastHeaderValidator(header, miniBlocks, transactions, idx, pk) + + return nil +} + +func (sr *subroundEndRound) prepareBroadcastBlockDataForValidator() error { + idx, pk, miniBlocks, transactions, err := sr.getIndexPkAndDataToBroadcast() + if err != nil { + return err + } + + go sr.BroadcastMessenger().PrepareBroadcastBlockDataValidator(sr.GetHeader(), miniBlocks, transactions, idx, pk) + + return nil +} + +// doEndRoundConsensusCheck method checks if the consensus is achieved +func (sr *subroundEndRound) doEndRoundConsensusCheck() bool { + if sr.GetRoundCanceled() { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + return true + } + + return false +} + +func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { + consensusGroup := sr.ConsensusGroup() + signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, bitmap) + for _, pubKey := range signers { + isSigJobDone, err := sr.JobDone(pubKey, bls.SrSignature) + if err != nil { + return err + } + + if !isSigJobDone { + return spos.ErrNilSignature + } + } + + return nil +} + +func (sr *subroundEndRound) isOutOfTime() bool { + startTime := sr.GetRoundTimeStamp() + maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 + if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { + log.Debug("canceled round, time is out", + "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), + "subround", sr.Name()) + + sr.SetRoundCanceled(true) + return true + } + + return false +} + +func (sr *subroundEndRound) getIndexPkAndDataToBroadcast() (int, []byte, map[uint32][]byte, map[string][][]byte, error) { + minIdx := sr.getMinConsensusGroupIndexOfManagedKeys() + + idx, err := sr.SelfConsensusGroupIndex() + if err == nil { + if idx < minIdx { + minIdx = idx + } + } + + if minIdx == sr.ConsensusGroupSize() { + return -1, nil, nil, nil, err + } + + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.GetHeader(), sr.GetBody()) + if err != nil { + return -1, nil, nil, nil, err + } + + consensusGroup := sr.ConsensusGroup() + pk := []byte(consensusGroup[minIdx]) + + return minIdx, pk, miniBlocks, transactions, nil +} + +func (sr *subroundEndRound) getMinConsensusGroupIndexOfManagedKeys() int { + minIdx := sr.ConsensusGroupSize() + + for idx, validator := range sr.ConsensusGroup() { + if !sr.IsKeyManagedBySelf([]byte(validator)) { + continue + } + + if idx < minIdx { + minIdx = idx + } + } + + return minIdx +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sr *subroundEndRound) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/v1/subroundEndRound_test.go b/consensus/spos/bls/v1/subroundEndRound_test.go new file mode 100644 index 00000000000..c3388302557 --- /dev/null +++ b/consensus/spos/bls/v1/subroundEndRound_test.go @@ -0,0 +1,1783 @@ +package v1_test + +import ( + "bytes" + "errors" + "sync" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/p2p/factory" + "github.com/multiversx/mx-chain-go/testscommon" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func initSubroundEndRoundWithContainer( + container *consensusMocks.ConsensusCoreMock, + appStatusHandler core.AppStatusHandler, +) v1.SubroundEndRound { + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusState() + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srEndRound, _ := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + appStatusHandler, + &testscommon.SentSignatureTrackerStub{}, + ) + + return srEndRound +} + +func initSubroundEndRound(appStatusHandler core.AppStatusHandler) v1.SubroundEndRound { + container := consensusMocks.InitConsensusCore() + return initSubroundEndRoundWithContainer(container, appStatusHandler) +} + +func TestNewSubroundEndRound(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := v1.NewSubroundEndRound( + nil, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := v1.NewSubroundEndRound( + sr, + nil, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + nil, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, spos.ErrNilAppStatusHandler, err) + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + nil, + ) + + assert.Nil(t, srEndRound) + assert.Equal(t, v1.ErrNilSentSignatureTracker, err) + }) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetBlockchain(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilBlockChain, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetBlockProcessor(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilBlockProcessor, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + sr.ConsensusStateHandler = nil + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetMultiSignerContainer(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetRoundHandler(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetSyncTimer(nil) + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srEndRound)) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, err := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.False(t, check.IfNil(srEndRound)) + assert.Nil(t, err) +} + +func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) { + t.Parallel() + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + signingHandler := &consensusMocks.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + return nil, crypto.ErrNilHasher + }, + } + container.SetSigningHandler(signingHandler) + + sr.SetHeader(&block.Header{}) + + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + assert.True(t, sr.IsSelfLeaderInCurrentRound()) + r := sr.DoEndRoundJob() + assert.False(t, r) +} + +func TestSubroundEndRound_DoEndRoundJobErrCommitBlockShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + blProcMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) + blProcMock.CommitBlockCalled = func( + header data.HeaderHandler, + body data.BodyHandler, + ) error { + return blockchain.ErrHeaderUnitNil + } + + container.SetBlockProcessor(blProcMock) + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJob() + assert.False(t, r) +} + +func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + remainingTime := time.Millisecond + roundHandlerMock := &consensusMocks.RoundHandlerMock{ + RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { + return remainingTime + }, + } + + container.SetRoundHandler(roundHandlerMock) + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJob() + assert.True(t, r) + + remainingTime = -time.Millisecond + + r = sr.DoEndRoundJob() + assert.False(t, r) +} + +func TestSubroundEndRound_DoEndRoundJobErrBroadcastBlockOK(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + bm := &consensusMocks.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return errors.New("error") + }, + } + container.SetBroadcastMessenger(bm) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJob() + assert.True(t, r) +} + +func TestSubroundEndRound_DoEndRoundJobErrMarshalizedDataToBroadcastOK(t *testing.T) { + t.Parallel() + + err := errors.New("") + container := consensusMocks.InitConsensusCore() + + bpm := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) + bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + err = errors.New("error marshalized data to broadcast") + return make(map[uint32][]byte), make(map[string][][]byte), err + } + container.SetBlockProcessor(bpm) + + bm := &consensusMocks.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return nil + }, + BroadcastMiniBlocksCalled: func(bytes map[uint32][]byte, pkBytes []byte) error { + return nil + }, + BroadcastTransactionsCalled: func(bytes map[string][][]byte, pkBytes []byte) error { + return nil + }, + } + container.SetBroadcastMessenger(bm) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJob() + assert.True(t, r) + assert.Equal(t, errors.New("error marshalized data to broadcast"), err) +} + +func TestSubroundEndRound_DoEndRoundJobErrBroadcastMiniBlocksOK(t *testing.T) { + t.Parallel() + + err := errors.New("") + container := consensusMocks.InitConsensusCore() + + bpm := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) + bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return make(map[uint32][]byte), make(map[string][][]byte), nil + } + container.SetBlockProcessor(bpm) + + bm := &consensusMocks.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return nil + }, + BroadcastMiniBlocksCalled: func(bytes map[uint32][]byte, pkBytes []byte) error { + err = errors.New("error broadcast miniblocks") + return err + }, + BroadcastTransactionsCalled: func(bytes map[string][][]byte, pkBytes []byte) error { + return nil + }, + } + container.SetBroadcastMessenger(bm) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJob() + assert.True(t, r) + // no error as broadcast is delayed + assert.Equal(t, errors.New("error broadcast miniblocks"), err) +} + +func TestSubroundEndRound_DoEndRoundJobErrBroadcastTransactionsOK(t *testing.T) { + t.Parallel() + + err := errors.New("") + container := consensusMocks.InitConsensusCore() + + bpm := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) + bpm.MarshalizedDataToBroadcastCalled = func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return make(map[uint32][]byte), make(map[string][][]byte), nil + } + container.SetBlockProcessor(bpm) + + bm := &consensusMocks.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return nil + }, + BroadcastMiniBlocksCalled: func(bytes map[uint32][]byte, pkBytes []byte) error { + return nil + }, + BroadcastTransactionsCalled: func(bytes map[string][][]byte, pkBytes []byte) error { + err = errors.New("error broadcast transactions") + return err + }, + } + container.SetBroadcastMessenger(bm) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJob() + assert.True(t, r) + // no error as broadcast is delayed + assert.Equal(t, errors.New("error broadcast transactions"), err) +} + +func TestSubroundEndRound_DoEndRoundJobAllOK(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + bm := &consensusMocks.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return errors.New("error") + }, + } + container.SetBroadcastMessenger(bm) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJob() + assert.True(t, r) +} + +func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { + t.Parallel() + + expectedSignature := []byte("signature") + container := consensusMocks.InitConsensusCore() + signingHandler := &consensusMocks.SigningHandlerStub{ + CreateSignatureForPublicKeyCalled: func(publicKeyBytes []byte, msg []byte) ([]byte, error) { + var receivedHdr block.Header + _ = container.Marshalizer().Unmarshal(&receivedHdr, msg) + return expectedSignature, nil + }, + } + container.SetSigningHandler(signingHandler) + bm := &consensusMocks.BroadcastMessengerMock{ + BroadcastBlockCalled: func(handler data.BodyHandler, handler2 data.HeaderHandler) error { + return errors.New("error") + }, + } + container.SetBroadcastMessenger(bm) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + sr.SetHeader(&block.Header{Nonce: 5}) + + r := sr.DoEndRoundJob() + assert.True(t, r) + assert.Equal(t, expectedSignature, sr.GetHeader().GetLeaderSignature()) +} + +func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetRoundCanceled(true) + + ok := sr.DoEndRoundConsensusCheck() + assert.False(t, ok) +} + +func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnTrueWhenRoundIsFinished(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetStatus(bls.SrEndRound, spos.SsFinished) + + ok := sr.DoEndRoundConsensusCheck() + assert.True(t, ok) +} + +func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsNotFinished(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + ok := sr.DoEndRoundConsensusCheck() + assert.False(t, ok) +} + +func TestSubroundEndRound_CheckSignaturesValidityShouldErrNilSignature(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + err := sr.CheckSignaturesValidity([]byte{2}) + assert.Equal(t, spos.ErrNilSignature, err) +} + +func TestSubroundEndRound_CheckSignaturesValidityShouldReturnNil(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + + err := sr.CheckSignaturesValidity([]byte{1}) + assert.Equal(t, nil, err) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_RoundCanceledShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetRoundCanceled(true) + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusDataNotSetShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetData(nil) + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_PreviousSubroundNotFinishedShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetStatus(2, spos.SsNotFinished) + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_CurrentSubroundFinishedShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + // set previous as finished + sr.SetStatus(2, spos.SsFinished) + + // set current as finished + sr.SetStatus(3, spos.SsFinished) + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusHeaderNotReceivedShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + // set previous as finished + sr.SetStatus(2, spos.SsFinished) + + // set current as not finished + sr.SetStatus(3, spos.SsNotFinished) + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_DoEndRoundJobByParticipant_ShouldReturnTrue(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetHeader(hdr) + sr.AddReceivedHeader(hdr) + + // set previous as finished + sr.SetStatus(2, spos.SsFinished) + + // set current as not finished + sr.SetStatus(3, spos.SsNotFinished) + + cnsData := consensus.Message{} + res := sr.DoEndRoundJobByParticipant(&cnsData) + assert.True(t, res) +} + +func TestSubroundEndRound_IsConsensusHeaderReceived_NoReceivedHeadersShouldReturnFalse(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetHeader(hdr) + + res, retHdr := sr.IsConsensusHeaderReceived() + assert.False(t, res) + assert.Nil(t, retHdr) +} + +func TestSubroundEndRound_IsConsensusHeaderReceived_HeaderNotReceivedShouldReturnFalse(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + hdrToSearchFor := &block.Header{Nonce: 38} + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.AddReceivedHeader(hdr) + sr.SetHeader(hdrToSearchFor) + + res, retHdr := sr.IsConsensusHeaderReceived() + assert.False(t, res) + assert.Nil(t, retHdr) +} + +func TestSubroundEndRound_IsConsensusHeaderReceivedShouldReturnTrue(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetHeader(hdr) + sr.AddReceivedHeader(hdr) + + res, retHdr := sr.IsConsensusHeaderReceived() + assert.True(t, res) + assert.Equal(t, hdr, retHdr) +} + +func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoNilHdrShouldNotWork(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{} + + haveHdr, hdr := sr.HaveConsensusHeaderWithFullInfo(&cnsData) + assert.False(t, haveHdr) + assert.Nil(t, hdr) +} + +func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoShouldWork(t *testing.T) { + t.Parallel() + + originalPubKeyBitMap := []byte{0, 1, 2} + newPubKeyBitMap := []byte{3, 4, 5} + originalLeaderSig := []byte{6, 7, 8} + newLeaderSig := []byte{9, 10, 11} + originalSig := []byte{12, 13, 14} + newSig := []byte{15, 16, 17} + hdr := block.Header{ + PubKeysBitmap: originalPubKeyBitMap, + Signature: originalSig, + LeaderSignature: originalLeaderSig, + } + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetHeader(&hdr) + + cnsData := consensus.Message{ + PubKeysBitmap: newPubKeyBitMap, + LeaderSignature: newLeaderSig, + AggregateSignature: newSig, + } + haveHdr, newHdr := sr.HaveConsensusHeaderWithFullInfo(&cnsData) + assert.True(t, haveHdr) + require.NotNil(t, newHdr) + assert.Equal(t, newPubKeyBitMap, newHdr.GetPubKeysBitmap()) + assert.Equal(t, newLeaderSig, newHdr.GetLeaderSignature()) + assert.Equal(t, newSig, newHdr.GetSignature()) +} + +func TestSubroundEndRound_CreateAndBroadcastHeaderFinalInfoBroadcastShouldBeCalled(t *testing.T) { + t.Parallel() + + chanRcv := make(chan bool, 1) + leaderSigInHdr := []byte("leader sig") + container := consensusMocks.InitConsensusCore() + messenger := &consensusMocks.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + chanRcv <- true + assert.Equal(t, message.LeaderSignature, leaderSigInHdr) + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetHeader(&block.Header{LeaderSignature: leaderSigInHdr}) + + sr.CreateAndBroadcastHeaderFinalInfo() + + select { + case <-chanRcv: + case <-time.After(100 * time.Millisecond): + assert.Fail(t, "broadcast not called") + } +} + +func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { + t.Parallel() + + hdr := &block.Header{Nonce: 37} + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + sr.SetHeader(hdr) + sr.AddReceivedHeader(hdr) + + sr.SetStatus(2, spos.SsFinished) + sr.SetStatus(3, spos.SsNotFinished) + + cnsData := consensus.Message{ + // apply the data which is mocked in consensus state so the checks will pass + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) + assert.True(t, res) +} + +func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinalInfoIsNotValid(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + headerSigVerifier := &consensusMocks.HeaderSigVerifierMock{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { + return errors.New("error") + }, + VerifySignatureCalled: func(header data.HeaderHandler) error { + return errors.New("error") + }, + } + + container.SetHeaderSigVerifier(headerSigVerifier) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + sr.SetHeader(&block.Header{}) + res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) + assert.False(t, res) +} + +func TestSubroundEndRound_IsOutOfTimeShouldReturnFalse(t *testing.T) { + t.Parallel() + + sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) + + res := sr.IsOutOfTime() + assert.False(t, res) +} + +func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { + t.Parallel() + + // update roundHandler's mock, so it will calculate for real the duration + container := consensusMocks.InitConsensusCore() + roundHandler := consensusMocks.RoundHandlerMock{RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { + currentTime := time.Now() + elapsedTime := currentTime.Sub(startTime) + remainingTime := maxTime - elapsedTime + + return remainingTime + }} + container.SetRoundHandler(&roundHandler) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + sr.SetRoundTimeStamp(time.Now().AddDate(0, 0, -1)) + + res := sr.IsOutOfTime() + assert.True(t, res) +} + +func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerifyLeaderSignatureFails(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + headerSigVerifier := &consensusMocks.HeaderSigVerifierMock{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { + return errors.New("error") + }, + VerifySignatureCalled: func(header data.HeaderHandler) error { + return nil + }, + } + + container.SetHeaderSigVerifier(headerSigVerifier) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsDta := &consensus.Message{} + sr.SetHeader(&block.Header{}) + isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) + assert.False(t, isValid) +} + +func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerifySignatureFails(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + headerSigVerifier := &consensusMocks.HeaderSigVerifierMock{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { + return nil + }, + VerifySignatureCalled: func(header data.HeaderHandler) error { + return errors.New("error") + }, + } + + container.SetHeaderSigVerifier(headerSigVerifier) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsDta := &consensus.Message{} + sr.SetHeader(&block.Header{}) + isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) + assert.False(t, isValid) +} + +func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnTrue(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + headerSigVerifier := &consensusMocks.HeaderSigVerifierMock{ + VerifyLeaderSignatureCalled: func(header data.HeaderHandler) error { + return nil + }, + VerifySignatureCalled: func(header data.HeaderHandler) error { + return nil + }, + } + + container.SetHeaderSigVerifier(headerSigVerifier) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsDta := &consensus.Message{} + sr.SetHeader(&block.Header{}) + isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) + assert.True(t, isValid) +} + +func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { + t.Parallel() + + t.Run("fail to get signature share", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + expectedErr := errors.New("exptected error") + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, expectedErr + }, + } + + container.SetSigningHandler(signingHandler) + + sr.SetHeader(&block.Header{}) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + + _, err := sr.VerifyNodesOnAggSigFail() + require.Equal(t, expectedErr, err) + }) + + t.Run("fail to verify signature share, job done will be set to false", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + expectedErr := errors.New("exptected error") + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, nil + }, + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + return expectedErr + }, + } + + sr.SetHeader(&block.Header{}) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + container.SetSigningHandler(signingHandler) + + _, err := sr.VerifyNodesOnAggSigFail() + require.Nil(t, err) + + isJobDone, err := sr.JobDone(sr.ConsensusGroup()[0], bls.SrSignature) + require.Nil(t, err) + require.False(t, isJobDone) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, nil + }, + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + return nil + }, + VerifyCalled: func(msg, bitmap []byte, epoch uint32) error { + return nil + }, + } + container.SetSigningHandler(signingHandler) + + sr.SetHeader(&block.Header{}) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) + + invalidSigners, err := sr.VerifyNodesOnAggSigFail() + require.Nil(t, err) + require.NotNil(t, invalidSigners) + }) +} + +func TestComputeAddSigOnValidNodes(t *testing.T) { + t.Parallel() + + t.Run("invalid number of valid sig shares", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetHeader(&block.Header{}) + sr.SetThreshold(bls.SrEndRound, 2) + + _, _, err := sr.ComputeAggSigOnValidNodes() + require.True(t, errors.Is(err, spos.ErrInvalidNumSigShares)) + }) + + t.Run("fail to created aggregated sig", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + expectedErr := errors.New("exptected error") + signingHandler := &consensusMocks.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + return nil, expectedErr + }, + } + container.SetSigningHandler(signingHandler) + + sr.SetHeader(&block.Header{}) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + + _, _, err := sr.ComputeAggSigOnValidNodes() + require.Equal(t, expectedErr, err) + }) + + t.Run("fail to set aggregated sig", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + expectedErr := errors.New("exptected error") + signingHandler := &consensusMocks.SigningHandlerStub{ + SetAggregatedSigCalled: func(_ []byte) error { + return expectedErr + }, + } + container.SetSigningHandler(signingHandler) + sr.SetHeader(&block.Header{}) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + + _, _, err := sr.ComputeAggSigOnValidNodes() + require.Equal(t, expectedErr, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetHeader(&block.Header{}) + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + + bitmap, sig, err := sr.ComputeAggSigOnValidNodes() + require.NotNil(t, bitmap) + require.NotNil(t, sig) + require.Nil(t, err) + }) +} + +func TestSubroundEndRound_DoEndRoundJobByLeaderVerificationFail(t *testing.T) { + t.Parallel() + + t.Run("not enough valid signature shares", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + verifySigShareNumCalls := 0 + verifyFirstCall := true + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, nil + }, + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + if verifySigShareNumCalls == 0 { + verifySigShareNumCalls++ + return errors.New("expected error") + } + + verifySigShareNumCalls++ + return nil + }, + VerifyCalled: func(msg, bitmap []byte, epoch uint32) error { + if verifyFirstCall { + verifyFirstCall = false + return errors.New("expected error") + } + + return nil + }, + } + + container.SetSigningHandler(signingHandler) + + sr.SetThreshold(bls.SrEndRound, 2) + + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) + + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJobByLeader() + require.False(t, r) + + assert.False(t, verifyFirstCall) + assert.Equal(t, 2, verifySigShareNumCalls) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + verifySigShareNumCalls := 0 + verifyFirstCall := true + signingHandler := &consensusMocks.SigningHandlerStub{ + SignatureShareCalled: func(index uint16) ([]byte, error) { + return nil, nil + }, + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + if verifySigShareNumCalls == 0 { + verifySigShareNumCalls++ + return errors.New("expected error") + } + + verifySigShareNumCalls++ + return nil + }, + VerifyCalled: func(msg, bitmap []byte, epoch uint32) error { + if verifyFirstCall { + verifyFirstCall = false + return errors.New("expected error") + } + + return nil + }, + } + + container.SetSigningHandler(signingHandler) + + sr.SetThreshold(bls.SrEndRound, 2) + + _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) + _ = sr.SetJobDone(sr.ConsensusGroup()[2], bls.SrSignature, true) + + sr.SetHeader(&block.Header{}) + + r := sr.DoEndRoundJobByLeader() + require.True(t, r) + + assert.False(t, verifyFirstCall) + assert.Equal(t, 3, verifySigShareNumCalls) + }) +} + +func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { + t.Parallel() + + t.Run("consensus data is not set", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.ConsensusStateHandler.SetData(nil) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("received message node is not leader in current round", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("other node"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("received message from self leader should return false", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("received message from self multikey leader should return false", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "A" + }, + } + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, _ := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + srEndRound.SetSelfPubKey("A") + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := srEndRound.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("received hash does not match the hash from current consensus state", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("Y"), + PubKey: []byte("A"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("process received message verification failed, different round index", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + RoundIndex: 1, + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("empty invalid signers", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + InvalidSigners: []byte{}, + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("invalid signers data", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + messageSigningHandler := &mock.MessageSigningHandlerStub{ + DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) { + return nil, expectedErr + }, + } + + container := consensusMocks.InitConsensusCore() + container.SetMessageSigningHandler(messageSigningHandler) + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + InvalidSigners: []byte("invalid data"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + InvalidSigners: []byte("invalidSignersData"), + } + + res := sr.ReceivedInvalidSignersInfo(&cnsData) + assert.True(t, res) + }) +} + +func TestVerifyInvalidSigners(t *testing.T) { + t.Parallel() + + t.Run("failed to deserialize invalidSigners field, should error", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + expectedErr := errors.New("expected err") + messageSigningHandler := &mock.MessageSigningHandlerStub{ + DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) { + return nil, expectedErr + }, + } + + container.SetMessageSigningHandler(messageSigningHandler) + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + err := sr.VerifyInvalidSigners([]byte{}) + require.Equal(t, expectedErr, err) + }) + + t.Run("failed to verify low level p2p message, should error", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + invalidSigners := []p2p.MessageP2P{&factory.Message{ + FromField: []byte("from"), + }} + invalidSignersBytes, _ := container.Marshalizer().Marshal(invalidSigners) + + expectedErr := errors.New("expected err") + messageSigningHandler := &mock.MessageSigningHandlerStub{ + DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) { + require.Equal(t, invalidSignersBytes, messagesBytes) + return invalidSigners, nil + }, + VerifyCalled: func(message p2p.MessageP2P) error { + return expectedErr + }, + } + + container.SetMessageSigningHandler(messageSigningHandler) + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + err := sr.VerifyInvalidSigners(invalidSignersBytes) + require.Equal(t, expectedErr, err) + }) + + t.Run("failed to verify signature share", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + pubKey := []byte("A") // it's in consensus + + consensusMsg := &consensus.Message{ + PubKey: pubKey, + } + consensusMsgBytes, _ := container.Marshalizer().Marshal(consensusMsg) + + invalidSigners := []p2p.MessageP2P{&factory.Message{ + FromField: []byte("from"), + DataField: consensusMsgBytes, + }} + invalidSignersBytes, _ := container.Marshalizer().Marshal(invalidSigners) + + messageSigningHandler := &mock.MessageSigningHandlerStub{ + DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) { + require.Equal(t, invalidSignersBytes, messagesBytes) + return invalidSigners, nil + }, + } + + wasCalled := false + signingHandler := &consensusMocks.SigningHandlerStub{ + VerifySingleSignatureCalled: func(publicKeyBytes []byte, message []byte, signature []byte) error { + wasCalled = true + return errors.New("expected err") + }, + } + + container.SetSigningHandler(signingHandler) + container.SetMessageSigningHandler(messageSigningHandler) + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + err := sr.VerifyInvalidSigners(invalidSignersBytes) + require.Nil(t, err) + require.True(t, wasCalled) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + pubKey := []byte("A") // it's in consensus + + consensusMsg := &consensus.Message{ + PubKey: pubKey, + } + consensusMsgBytes, _ := container.Marshalizer().Marshal(consensusMsg) + + invalidSigners := []p2p.MessageP2P{&factory.Message{ + FromField: []byte("from"), + DataField: consensusMsgBytes, + }} + invalidSignersBytes, _ := container.Marshalizer().Marshal(invalidSigners) + + messageSigningHandler := &mock.MessageSignerMock{} + container.SetMessageSigningHandler(messageSigningHandler) + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + err := sr.VerifyInvalidSigners(invalidSignersBytes) + require.Nil(t, err) + }) +} + +func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { + t.Parallel() + + t.Run("redundancy node should not send while main is active", func(t *testing.T) { + t.Parallel() + + expectedInvalidSigners := []byte("invalid signers") + + container := consensusMocks.InitConsensusCore() + nodeRedundancy := &mock.NodeRedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + IsMainMachineActiveCalled: func() bool { + return true + }, + } + container.SetNodeRedundancyHandler(nodeRedundancy) + messenger := &consensusMocks.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Fail(t, "should have not been called") + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + wg := &sync.WaitGroup{} + wg.Add(1) + + expectedInvalidSigners := []byte("invalid signers") + + wasCalled := false + container := consensusMocks.InitConsensusCore() + messenger := &consensusMocks.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) + wasCalled = true + wg.Done() + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + sr.SetLeader("A") + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + + wg.Wait() + + require.True(t, wasCalled) + }) +} + +func TestGetFullMessagesForInvalidSigners(t *testing.T) { + t.Parallel() + + t.Run("empty p2p messages slice if not in state", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + messageSigningHandler := &mock.MessageSigningHandlerStub{ + SerializeCalled: func(messages []p2p.MessageP2P) ([]byte, error) { + require.Equal(t, 0, len(messages)) + + return []byte{}, nil + }, + } + + container.SetMessageSigningHandler(messageSigningHandler) + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + invalidSigners := []string{"B", "C"} + + invalidSignersBytes, err := sr.GetFullMessagesForInvalidSigners(invalidSigners) + require.Nil(t, err) + require.Equal(t, []byte{}, invalidSignersBytes) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + expectedInvalidSigners := []byte("expectedInvalidSigners") + + messageSigningHandler := &mock.MessageSigningHandlerStub{ + SerializeCalled: func(messages []p2p.MessageP2P) ([]byte, error) { + require.Equal(t, 2, len(messages)) + + return expectedInvalidSigners, nil + }, + } + + container.SetMessageSigningHandler(messageSigningHandler) + + sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.AddMessageWithSignature("B", &p2pmocks.P2PMessageMock{}) + sr.AddMessageWithSignature("C", &p2pmocks.P2PMessageMock{}) + + invalidSigners := []string{"B", "C"} + + invalidSignersBytes, err := sr.GetFullMessagesForInvalidSigners(invalidSigners) + require.Nil(t, err) + require.Equal(t, expectedInvalidSigners, invalidSignersBytes) + }) +} + +func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, _ := v1.NewSubroundEndRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + t.Run("no managed keys from consensus group", func(t *testing.T) { + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return false + } + + assert.Equal(t, 9, srEndRound.GetMinConsensusGroupIndexOfManagedKeys()) + }) + t.Run("first managed key in consensus group should return 0", func(t *testing.T) { + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return bytes.Equal([]byte("A"), pkBytes) + } + + assert.Equal(t, 0, srEndRound.GetMinConsensusGroupIndexOfManagedKeys()) + }) + t.Run("third managed key in consensus group should return 2", func(t *testing.T) { + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return bytes.Equal([]byte("C"), pkBytes) + } + + assert.Equal(t, 2, srEndRound.GetMinConsensusGroupIndexOfManagedKeys()) + }) + t.Run("last managed key in consensus group should return 8", func(t *testing.T) { + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return bytes.Equal([]byte("I"), pkBytes) + } + + assert.Equal(t, 8, srEndRound.GetMinConsensusGroupIndexOfManagedKeys()) + }) +} diff --git a/consensus/spos/bls/v1/subroundSignature.go b/consensus/spos/bls/v1/subroundSignature.go new file mode 100644 index 00000000000..1d71ac59420 --- /dev/null +++ b/consensus/spos/bls/v1/subroundSignature.go @@ -0,0 +1,415 @@ +package v1 + +import ( + "context" + "encoding/hex" + "fmt" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" +) + +type subroundSignature struct { + *spos.Subround + appStatusHandler core.AppStatusHandler + sentSignatureTracker spos.SentSignaturesTracker +} + +// NewSubroundSignature creates a subroundSignature object +func NewSubroundSignature( + baseSubround *spos.Subround, + extend func(subroundId int), + appStatusHandler core.AppStatusHandler, + sentSignatureTracker spos.SentSignaturesTracker, +) (*subroundSignature, error) { + err := checkNewSubroundSignatureParams( + baseSubround, + ) + if err != nil { + return nil, err + } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if check.IfNil(appStatusHandler) { + return nil, spos.ErrNilAppStatusHandler + } + if check.IfNil(sentSignatureTracker) { + return nil, ErrNilSentSignatureTracker + } + + srSignature := subroundSignature{ + Subround: baseSubround, + appStatusHandler: appStatusHandler, + sentSignatureTracker: sentSignatureTracker, + } + srSignature.Job = srSignature.doSignatureJob + srSignature.Check = srSignature.doSignatureConsensusCheck + srSignature.Extend = extend + + return &srSignature, nil +} + +func checkNewSubroundSignatureParams( + baseSubround *spos.Subround, +) error { + if baseSubround == nil { + return spos.ErrNilSubround + } + if check.IfNil(baseSubround.ConsensusStateHandler) { + return spos.ErrNilConsensusState + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err +} + +// doSignatureJob method does the job of the subround Signature +func (sr *subroundSignature) doSignatureJob(_ context.Context) bool { + if !sr.CanDoSubroundJob(sr.Current()) { + return false + } + if check.IfNil(sr.GetHeader()) { + log.Error("doSignatureJob", "error", spos.ErrNilHeader) + return false + } + + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + isSelfInConsensusGroup := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() + + if isSelfLeader || isSelfInConsensusGroup { + selfIndex, err := sr.SelfConsensusGroupIndex() + if err != nil { + log.Debug("doSignatureJob.SelfConsensusGroupIndex: not in consensus group") + return false + } + + signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( + sr.GetData(), + uint16(selfIndex), + sr.GetHeader().GetEpoch(), + []byte(sr.SelfPubKey()), + ) + if err != nil { + log.Debug("doSignatureJob.CreateSignatureShareForPublicKey", "error", err.Error()) + return false + } + + if !isSelfLeader { + ok := sr.createAndSendSignatureMessage(signatureShare, []byte(sr.SelfPubKey())) + if !ok { + return false + } + } + + ok := sr.completeSignatureSubRound(sr.SelfPubKey(), isSelfLeader) + if !ok { + return false + } + } + + return sr.doSignatureJobForManagedKeys() +} + +func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte, pkBytes []byte) bool { + // TODO: Analyze it is possible to send message only to leader with O(1) instead of O(n) + cnsMsg := consensus.NewConsensusMessage( + sr.GetData(), + signatureShare, + nil, + nil, + pkBytes, + nil, + int(bls.MtSignature), + sr.RoundHandler().Index(), + sr.ChainID(), + nil, + nil, + nil, + sr.GetAssociatedPid(pkBytes), + nil, + ) + + err := sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) + if err != nil { + log.Debug("createAndSendSignatureMessage.BroadcastConsensusMessage", + "error", err.Error(), "pk", pkBytes) + return false + } + + log.Debug("step 2: signature has been sent", "pk", pkBytes) + + return true +} + +func (sr *subroundSignature) completeSignatureSubRound(pk string, shouldWaitForAllSigsAsync bool) bool { + err := sr.SetJobDone(pk, sr.Current(), true) + if err != nil { + log.Debug("doSignatureJob.SetSelfJobDone", + "subround", sr.Name(), + "error", err.Error(), + "pk", []byte(pk), + ) + return false + } + + if shouldWaitForAllSigsAsync { + go sr.waitAllSignatures() + } + + return true +} + +// receivedSignature method is called when a signature is received through the signature channel. +// If the signature is valid, then the jobDone map corresponding to the node which sent it, +// is set on true for the subround Signature +func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consensus.Message) bool { + node := string(cnsDta.PubKey) + pkForLogs := core.GetTrimmedPk(hex.EncodeToString(cnsDta.PubKey)) + + if !sr.IsConsensusDataSet() { + return false + } + + if !sr.IsNodeInConsensusGroup(node) { + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.ValidatorPeerHonestyDecreaseFactor, + ) + + return false + } + + if !sr.IsSelfLeaderInCurrentRound() && !sr.IsMultiKeyLeaderInCurrentRound() { + return false + } + + if !sr.IsConsensusDataEqual(cnsDta.BlockHeaderHash) { + return false + } + + if !sr.CanProcessReceivedMessage(cnsDta, sr.RoundHandler().Index(), sr.Current()) { + return false + } + + index, err := sr.ConsensusGroupIndex(node) + if err != nil { + log.Debug("receivedSignature.ConsensusGroupIndex", + "node", pkForLogs, + "error", err.Error()) + return false + } + + err = sr.SigningHandler().StoreSignatureShare(uint16(index), cnsDta.SignatureShare) + if err != nil { + log.Debug("receivedSignature.StoreSignatureShare", + "node", pkForLogs, + "index", index, + "error", err.Error()) + return false + } + + err = sr.SetJobDone(node, sr.Current(), true) + if err != nil { + log.Debug("receivedSignature.SetJobDone", + "node", pkForLogs, + "subround", sr.Name(), + "error", err.Error()) + return false + } + + sr.PeerHonestyHandler().ChangeScore( + node, + spos.GetConsensusTopicID(sr.ShardCoordinator()), + spos.ValidatorPeerHonestyIncreaseFactor, + ) + + sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, "signed") + return true +} + +// doSignatureConsensusCheck method checks if the consensus in the subround Signature is achieved +func (sr *subroundSignature) doSignatureConsensusCheck() bool { + if sr.GetRoundCanceled() { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, "signed") + + return true + } + + isSelfLeader := sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() + isSelfInConsensusGroup := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || sr.IsMultiKeyInConsensusGroup() + + threshold := sr.Threshold(sr.Current()) + if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.GetHeader()) { + threshold = sr.FallbackThreshold(sr.Current()) + log.Warn("subroundSignature.doSignatureConsensusCheck: fallback validation has been applied", + "minimum number of signatures required", threshold, + "actual number of signatures received", sr.getNumOfSignaturesCollected(), + ) + } + + areSignaturesCollected, numSigs := sr.areSignaturesCollected(threshold) + areAllSignaturesCollected := numSigs == sr.ConsensusGroupSize() + + isJobDoneByLeader := isSelfLeader && (areAllSignaturesCollected || (areSignaturesCollected && sr.GetWaitingAllSignaturesTimeOut())) + + selfJobDone := true + if sr.IsNodeInConsensusGroup(sr.SelfPubKey()) { + selfJobDone = sr.IsSelfJobDone(sr.Current()) + } + multiKeyJobDone := true + if sr.IsMultiKeyInConsensusGroup() { + multiKeyJobDone = sr.IsMultiKeyJobDone(sr.Current()) + } + isJobDoneByConsensusNode := !isSelfLeader && isSelfInConsensusGroup && selfJobDone && multiKeyJobDone + + isSubroundFinished := !isSelfInConsensusGroup || isJobDoneByConsensusNode || isJobDoneByLeader + + if isSubroundFinished { + if isSelfLeader { + log.Debug("step 2: signatures", + "received", numSigs, + "total", len(sr.ConsensusGroup())) + } + + log.Debug("step 2: subround has been finished", + "subround", sr.Name()) + sr.SetStatus(sr.Current(), spos.SsFinished) + + sr.appStatusHandler.SetStringValue(common.MetricConsensusRoundState, "signed") + + return true + } + + return false +} + +// areSignaturesCollected method checks if the signatures received from the nodes, belonging to the current +// jobDone group, are more than the necessary given threshold +func (sr *subroundSignature) areSignaturesCollected(threshold int) (bool, int) { + n := sr.getNumOfSignaturesCollected() + return n >= threshold, n +} + +func (sr *subroundSignature) getNumOfSignaturesCollected() int { + n := 0 + + for i := 0; i < len(sr.ConsensusGroup()); i++ { + node := sr.ConsensusGroup()[i] + + isSignJobDone, err := sr.JobDone(node, sr.Current()) + if err != nil { + log.Debug("getNumOfSignaturesCollected.JobDone", + "node", node, + "subround", sr.Name(), + "error", err.Error()) + continue + } + + if isSignJobDone { + n++ + } + } + + return n +} + +func (sr *subroundSignature) waitAllSignatures() { + remainingTime := sr.remainingTime() + time.Sleep(remainingTime) + + if sr.IsSubroundFinished(sr.Current()) { + return + } + + sr.SetWaitingAllSignaturesTimeOut(true) + + select { + case sr.ConsensusChannel() <- true: + default: + } +} + +func (sr *subroundSignature) remainingTime() time.Duration { + startTime := sr.RoundHandler().TimeStamp() + maxTime := time.Duration(float64(sr.StartTime()) + float64(sr.EndTime()-sr.StartTime())*waitingAllSigsMaxTimeThreshold) + remainigTime := sr.RoundHandler().RemainingTime(startTime, maxTime) + + return remainigTime +} + +func (sr *subroundSignature) doSignatureJobForManagedKeys() bool { + isMultiKeyLeader := sr.IsMultiKeyLeaderInCurrentRound() + + numMultiKeysSignaturesSent := 0 + for _, pk := range sr.ConsensusGroup() { + pkBytes := []byte(pk) + if sr.IsJobDone(pk, sr.Current()) { + continue + } + if !sr.IsKeyManagedBySelf(pkBytes) { + continue + } + + selfIndex, err := sr.ConsensusGroupIndex(pk) + if err != nil { + log.Warn("doSignatureJobForManagedKeys: index not found", "pk", pkBytes) + continue + } + + signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( + sr.GetData(), + uint16(selfIndex), + sr.GetHeader().GetEpoch(), + pkBytes, + ) + if err != nil { + log.Debug("doSignatureJobForManagedKeys.CreateSignatureShareForPublicKey", "error", err.Error()) + return false + } + + if !isMultiKeyLeader { + ok := sr.createAndSendSignatureMessage(signatureShare, pkBytes) + if !ok { + return false + } + + numMultiKeysSignaturesSent++ + } + sr.sentSignatureTracker.SignatureSent(pkBytes) + leader, err := sr.GetLeader() + if err != nil { + log.Debug("doSignatureJobForManagedKeys.GetLeader", "error", err.Error()) + return false + } + + isLeader := pk == leader + ok := sr.completeSignatureSubRound(pk, isLeader) + if !ok { + return false + } + } + + if numMultiKeysSignaturesSent > 0 { + log.Debug("step 2: multi keys signatures have been sent", "num", numMultiKeysSignaturesSent) + } + + return true +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sr *subroundSignature) IsInterfaceNil() bool { + return sr == nil +} diff --git a/consensus/spos/bls/v1/subroundSignature_test.go b/consensus/spos/bls/v1/subroundSignature_test.go new file mode 100644 index 00000000000..73d765cb67b --- /dev/null +++ b/consensus/spos/bls/v1/subroundSignature_test.go @@ -0,0 +1,799 @@ +package v1_test + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + "github.com/multiversx/mx-chain-go/testscommon" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreMock) v1.SubroundSignature { + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srSignature, _ := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + return srSignature +} + +func initSubroundSignature() v1.SubroundSignature { + container := consensusMocks.InitConsensusCore() + return initSubroundSignatureWithContainer(container) +} + +func TestNewSubroundSignature(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := v1.NewSubroundSignature( + nil, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := v1.NewSubroundSignature( + sr, + nil, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + nil, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, spos.ErrNilAppStatusHandler, err) + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + nil, + ) + + assert.Nil(t, srSignature) + assert.Equal(t, v1.ErrNilSentSignatureTracker, err) + }) +} + +func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + sr.ConsensusStateHandler = nil + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetHasher(nil) + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilHasher, err) +} + +func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetMultiSignerContainer(nil) + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetRoundHandler(nil) + + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + container.SetSyncTimer(nil) + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.True(t, check.IfNil(srSignature)) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srSignature, err := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.False(t, check.IfNil(srSignature)) + assert.Nil(t, err) +} + +func TestSubroundSignature_DoSignatureJob(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundSignatureWithContainer(container) + + sr.SetHeader(&block.Header{}) + sr.SetData(nil) + r := sr.DoSignatureJob() + assert.False(t, r) + + sr.SetData([]byte("X")) + + err := errors.New("create signature share error") + signingHandler := &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, err + }, + } + container.SetSigningHandler(signingHandler) + + r = sr.DoSignatureJob() + assert.False(t, r) + + signingHandler = &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return []byte("SIG"), nil + }, + } + container.SetSigningHandler(signingHandler) + + r = sr.DoSignatureJob() + assert.True(t, r) + + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) + sr.SetRoundCanceled(false) + leader, err := sr.GetLeader() + assert.Nil(t, err) + + sr.SetSelfPubKey(leader) + r = sr.DoSignatureJob() + assert.True(t, r) + assert.False(t, sr.GetRoundCanceled()) +} + +func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusStateWithKeysHandler( + &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return true + }, + }, + ) + ch := make(chan bool, 1) + + sr, _ := spos.NewSubround( + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, + int64(70*roundTimeDuration/100), + int64(85*roundTimeDuration/100), + "(SIGNATURE)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + signatureSentForPks := make(map[string]struct{}) + srSignature, _ := v1.NewSubroundSignature( + sr, + extend, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{ + SignatureSentCalled: func(pkBytes []byte) { + signatureSentForPks[string(pkBytes)] = struct{}{} + }, + }, + ) + + srSignature.SetHeader(&block.Header{}) + srSignature.SetData(nil) + r := srSignature.DoSignatureJob() + assert.False(t, r) + + sr.SetData([]byte("X")) + + err := errors.New("create signature share error") + signingHandler := &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, err + }, + } + container.SetSigningHandler(signingHandler) + + r = srSignature.DoSignatureJob() + assert.False(t, r) + + signingHandler = &consensusMocks.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(msg []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return []byte("SIG"), nil + }, + } + container.SetSigningHandler(signingHandler) + + r = srSignature.DoSignatureJob() + assert.True(t, r) + + _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) + sr.SetRoundCanceled(false) + + leader, err := sr.GetLeader() + assert.Nil(t, err) + + sr.SetSelfPubKey(leader) + r = srSignature.DoSignatureJob() + assert.True(t, r) + assert.False(t, sr.GetRoundCanceled()) + expectedMap := map[string]struct{}{ + "A": {}, + "B": {}, + "C": {}, + "D": {}, + "E": {}, + "F": {}, + "G": {}, + "H": {}, + "I": {}, + } + assert.Equal(t, expectedMap, signatureSentForPks) +} + +func TestSubroundSignature_ReceivedSignature(t *testing.T) { + t.Parallel() + + sr := initSubroundSignature() + signature := []byte("signature") + cnsMsg := consensus.NewConsensusMessage( + sr.GetData(), + signature, + nil, + nil, + []byte(sr.ConsensusGroup()[1]), + []byte("sig"), + int(bls.MtSignature), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + + sr.SetHeader(&block.Header{}) + sr.SetData(nil) + r := sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.SetData([]byte("Y")) + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.SetData([]byte("X")) + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + leader, err := sr.GetLeader() + assert.Nil(t, err) + + sr.SetSelfPubKey(leader) + + cnsMsg.PubKey = []byte("X") + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) + maxCount := len(sr.ConsensusGroup()) * 2 / 3 + count := 0 + for i := 0; i < len(sr.ConsensusGroup()); i++ { + if sr.ConsensusGroup()[i] != string(cnsMsg.PubKey) { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) + count++ + if count == maxCount { + break + } + } + } + r = sr.ReceivedSignature(cnsMsg) + assert.True(t, r) +} + +func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { + t.Parallel() + + errStore := errors.New("signature share store failed") + storeSigShareCalled := false + signingHandler := &consensusMocks.SigningHandlerStub{ + VerifySignatureShareCalled: func(index uint16, sig, msg []byte, epoch uint32) error { + return nil + }, + StoreSignatureShareCalled: func(index uint16, sig []byte) error { + storeSigShareCalled = true + return errStore + }, + } + + container := consensusMocks.InitConsensusCore() + container.SetSigningHandler(signingHandler) + sr := initSubroundSignatureWithContainer(container) + sr.SetHeader(&block.Header{}) + + signature := []byte("signature") + cnsMsg := consensus.NewConsensusMessage( + sr.GetData(), + signature, + nil, + nil, + []byte(sr.ConsensusGroup()[1]), + []byte("sig"), + int(bls.MtSignature), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + + sr.SetData(nil) + r := sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.SetData([]byte("Y")) + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + sr.SetData([]byte("X")) + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) + + cnsMsg.PubKey = []byte("X") + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + + cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) + maxCount := len(sr.ConsensusGroup()) * 2 / 3 + count := 0 + for i := 0; i < len(sr.ConsensusGroup()); i++ { + if sr.ConsensusGroup()[i] != string(cnsMsg.PubKey) { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) + count++ + if count == maxCount { + break + } + } + } + r = sr.ReceivedSignature(cnsMsg) + assert.False(t, r) + assert.True(t, storeSigShareCalled) +} + +func TestSubroundSignature_SignaturesCollected(t *testing.T) { + t.Parallel() + + sr := initSubroundSignature() + + for i := 0; i < len(sr.ConsensusGroup()); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, false) + } + + ok, n := sr.AreSignaturesCollected(2) + assert.False(t, ok) + assert.Equal(t, 0, n) + + ok, _ = sr.AreSignaturesCollected(2) + assert.False(t, ok) + + _ = sr.SetJobDone("B", bls.SrSignature, true) + isJobDone, _ := sr.JobDone("B", bls.SrSignature) + assert.True(t, isJobDone) + + ok, _ = sr.AreSignaturesCollected(2) + assert.False(t, ok) + + _ = sr.SetJobDone("C", bls.SrSignature, true) + ok, _ = sr.AreSignaturesCollected(2) + assert.True(t, ok) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { + t.Parallel() + + sr := initSubroundSignature() + sr.SetRoundCanceled(true) + assert.False(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { + t.Parallel() + + sr := initSubroundSignature() + sr.SetStatus(bls.SrSignature, spos.SsFinished) + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignaturesCollectedReturnTrue(t *testing.T) { + t.Parallel() + + sr := initSubroundSignature() + + for i := 0; i < sr.Threshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) + } + + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenSignaturesCollectedReturnFalse(t *testing.T) { + t.Parallel() + + sr := initSubroundSignature() + assert.False(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenNotAllSignaturesCollectedAndTimeIsNotOut(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundSignatureWithContainer(container) + sr.SetWaitingAllSignaturesTimeOut(false) + + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) + + for i := 0; i < sr.Threshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) + } + + assert.False(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenAllSignaturesCollected(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundSignatureWithContainer(container) + sr.SetWaitingAllSignaturesTimeOut(false) + + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) + + for i := 0; i < sr.ConsensusGroupSize(); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) + } + + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenEnoughButNotAllSignaturesCollectedAndTimeIsOut(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + sr := initSubroundSignatureWithContainer(container) + sr.SetWaitingAllSignaturesTimeOut(true) + + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) + + for i := 0; i < sr.Threshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) + } + + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbackThresholdCouldNotBeApplied(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + container.SetFallbackHeaderValidator(&testscommon.FallBackHeaderValidatorStub{ + ShouldApplyFallbackValidationCalled: func(headerHandler data.HeaderHandler) bool { + return false + }, + }) + sr := initSubroundSignatureWithContainer(container) + sr.SetWaitingAllSignaturesTimeOut(false) + + sr.SetSelfPubKey(sr.ConsensusGroup()[0]) + + for i := 0; i < sr.FallbackThreshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) + } + + assert.False(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallbackThresholdCouldBeApplied(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + container.SetFallbackHeaderValidator(&testscommon.FallBackHeaderValidatorStub{ + ShouldApplyFallbackValidationCalled: func(headerHandler data.HeaderHandler) bool { + return true + }, + }) + sr := initSubroundSignatureWithContainer(container) + sr.SetWaitingAllSignaturesTimeOut(true) + + leader, err := sr.GetLeader() + assert.Nil(t, err) + sr.SetSelfPubKey(leader) + + for i := 0; i < sr.FallbackThreshold(bls.SrSignature); i++ { + _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) + } + + assert.True(t, sr.DoSignatureConsensusCheck()) +} + +func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqual(t *testing.T) { + t.Parallel() + + sr := initSubroundSignature() + + leader, err := sr.GetLeader() + assert.Nil(t, err) + cnsMsg := consensus.NewConsensusMessage( + append(sr.GetData(), []byte("X")...), + []byte("signature"), + nil, + nil, + []byte(leader), + []byte("sig"), + int(bls.MtSignature), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + + assert.False(t, sr.ReceivedSignature(cnsMsg)) +} diff --git a/consensus/spos/bls/v1/subroundStartRound.go b/consensus/spos/bls/v1/subroundStartRound.go new file mode 100644 index 00000000000..a47d9235cd2 --- /dev/null +++ b/consensus/spos/bls/v1/subroundStartRound.go @@ -0,0 +1,378 @@ +package v1 + +import ( + "context" + "encoding/hex" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + outportcore "github.com/multiversx/mx-chain-core-go/data/outport" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/outport/disabled" +) + +// subroundStartRound defines the data needed by the subround StartRound +type subroundStartRound struct { + outportMutex sync.RWMutex + *spos.Subround + processingThresholdPercentage int + executeStoredMessages func() + resetConsensusMessages func() + + outportHandler outport.OutportHandler + sentSignatureTracker spos.SentSignaturesTracker +} + +// NewSubroundStartRound creates a subroundStartRound object +func NewSubroundStartRound( + baseSubround *spos.Subround, + extend func(subroundId int), + processingThresholdPercentage int, + executeStoredMessages func(), + resetConsensusMessages func(), + sentSignatureTracker spos.SentSignaturesTracker, +) (*subroundStartRound, error) { + err := checkNewSubroundStartRoundParams( + baseSubround, + ) + if err != nil { + return nil, err + } + if extend == nil { + return nil, fmt.Errorf("%w for extend function", spos.ErrNilFunctionHandler) + } + if executeStoredMessages == nil { + return nil, fmt.Errorf("%w for executeStoredMessages function", spos.ErrNilFunctionHandler) + } + if resetConsensusMessages == nil { + return nil, fmt.Errorf("%w for resetConsensusMessages function", spos.ErrNilFunctionHandler) + } + if check.IfNil(sentSignatureTracker) { + return nil, ErrNilSentSignatureTracker + } + + srStartRound := subroundStartRound{ + Subround: baseSubround, + processingThresholdPercentage: processingThresholdPercentage, + executeStoredMessages: executeStoredMessages, + resetConsensusMessages: resetConsensusMessages, + outportHandler: disabled.NewDisabledOutport(), + sentSignatureTracker: sentSignatureTracker, + outportMutex: sync.RWMutex{}, + } + srStartRound.Job = srStartRound.doStartRoundJob + srStartRound.Check = srStartRound.doStartRoundConsensusCheck + srStartRound.Extend = extend + baseSubround.EpochStartRegistrationHandler().RegisterHandler(&srStartRound) + + return &srStartRound, nil +} + +func checkNewSubroundStartRoundParams( + baseSubround *spos.Subround, +) error { + if baseSubround == nil { + return spos.ErrNilSubround + } + if check.IfNil(baseSubround.ConsensusStateHandler) { + return spos.ErrNilConsensusState + } + + err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) + + return err +} + +// SetOutportHandler method sets outport handler +func (sr *subroundStartRound) SetOutportHandler(outportHandler outport.OutportHandler) error { + if check.IfNil(outportHandler) { + return outport.ErrNilDriver + } + + sr.outportMutex.Lock() + sr.outportHandler = outportHandler + sr.outportMutex.Unlock() + + return nil +} + +// doStartRoundJob method does the job of the subround StartRound +func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool { + sr.ResetConsensusState() + sr.SetRoundIndex(sr.RoundHandler().Index()) + sr.SetRoundTimeStamp(sr.RoundHandler().TimeStamp()) + topic := spos.GetConsensusTopicID(sr.ShardCoordinator()) + sr.GetAntiFloodHandler().ResetForTopic(topic) + sr.resetConsensusMessages() + return true +} + +// doStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound +func (sr *subroundStartRound) doStartRoundConsensusCheck() bool { + if sr.GetRoundCanceled() { + return false + } + + if sr.IsSubroundFinished(sr.Current()) { + return true + } + + if sr.initCurrentRound() { + return true + } + + return false +} + +func (sr *subroundStartRound) initCurrentRound() bool { + nodeState := sr.BootStrapper().GetNodeState() + if nodeState != common.NsSynchronized { // if node is not synchronized yet, it has to continue the bootstrapping mechanism + return false + } + + sr.AppStatusHandler().SetStringValue(common.MetricConsensusRoundState, "") + + err := sr.generateNextConsensusGroup(sr.RoundHandler().Index()) + if err != nil { + log.Debug("initCurrentRound.generateNextConsensusGroup", + "round index", sr.RoundHandler().Index(), + "error", err.Error()) + + sr.SetRoundCanceled(true) + + return false + } + + if sr.NodeRedundancyHandler().IsRedundancyNode() { + sr.NodeRedundancyHandler().AdjustInactivityIfNeeded( + sr.SelfPubKey(), + sr.ConsensusGroup(), + sr.RoundHandler().Index(), + ) + // we should not return here, the multikey redundancy system relies on it + // the NodeRedundancyHandler "thinks" it is in redundancy mode even if we use the multikey redundancy system + } + + leader, err := sr.GetLeader() + if err != nil { + log.Debug("initCurrentRound.GetLeader", "error", err.Error()) + + sr.SetRoundCanceled(true) + + return false + } + + msg := "" + if sr.IsKeyManagedBySelf([]byte(leader)) { + msg = " (my turn in multi-key)" + } + if leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() { + msg = " (my turn)" + } + if len(msg) != 0 { + sr.AppStatusHandler().Increment(common.MetricCountLeader) + sr.AppStatusHandler().SetStringValue(common.MetricConsensusRoundState, "proposed") + sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "proposer") + } + + log.Debug("step 0: preparing the round", + "leader", core.GetTrimmedPk(hex.EncodeToString([]byte(leader))), + "messsage", msg) + sr.sentSignatureTracker.StartRound() + + pubKeys := sr.ConsensusGroup() + numMultiKeysInConsensusGroup := sr.computeNumManagedKeysInConsensusGroup(pubKeys) + + sr.indexRoundIfNeeded(pubKeys) + + isSingleKeyLeader := leader == sr.SelfPubKey() && sr.ShouldConsiderSelfKeyInConsensus() + isLeader := isSingleKeyLeader || sr.IsKeyManagedBySelf([]byte(leader)) + isSelfInConsensus := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) || numMultiKeysInConsensusGroup > 0 + if !isSelfInConsensus { + log.Debug("not in consensus group") + sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "not in consensus group") + } else { + if !isLeader { + sr.AppStatusHandler().Increment(common.MetricCountConsensus) + sr.AppStatusHandler().SetStringValue(common.MetricConsensusState, "participant") + } + } + + err = sr.SigningHandler().Reset(pubKeys) + if err != nil { + log.Debug("initCurrentRound.Reset", "error", err.Error()) + + sr.SetRoundCanceled(true) + + return false + } + + startTime := sr.GetRoundTimeStamp() + maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 + if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { + log.Debug("canceled round, time is out", + "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), + "subround", sr.Name()) + + sr.SetRoundCanceled(true) + + return false + } + + sr.SetStatus(sr.Current(), spos.SsFinished) + + // execute stored messages which were received in this new round but before this initialisation + go sr.executeStoredMessages() + + return true +} + +func (sr *subroundStartRound) computeNumManagedKeysInConsensusGroup(pubKeys []string) int { + numMultiKeysInConsensusGroup := 0 + for _, pk := range pubKeys { + pkBytes := []byte(pk) + if sr.IsKeyManagedBySelf(pkBytes) { + numMultiKeysInConsensusGroup++ + log.Trace("in consensus group with multi key", + "pk", core.GetTrimmedPk(hex.EncodeToString(pkBytes))) + } + sr.IncrementRoundsWithoutReceivedMessages(pkBytes) + } + + if numMultiKeysInConsensusGroup > 0 { + log.Debug("in consensus group with multi keys identities", "num", numMultiKeysInConsensusGroup) + } + + return numMultiKeysInConsensusGroup +} + +func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { + sr.outportMutex.RLock() + defer sr.outportMutex.RUnlock() + + if !sr.outportHandler.HasDrivers() { + return + } + + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + currentHeader = sr.Blockchain().GetGenesisHeader() + } + + epoch := currentHeader.GetEpoch() + shardId := sr.ShardCoordinator().SelfId() + nodesCoordinatorShardID, err := sr.NodesCoordinator().ShardIdForEpoch(epoch) + if err != nil { + log.Debug("initCurrentRound.ShardIdForEpoch", + "epoch", epoch, + "error", err.Error()) + return + } + + if shardId != nodesCoordinatorShardID { + log.Debug("initCurrentRound.ShardIdForEpoch", + "epoch", epoch, + "shardCoordinator.ShardID", shardId, + "nodesCoordinator.ShardID", nodesCoordinatorShardID) + return + } + + signersIndexes, err := sr.NodesCoordinator().GetValidatorsIndexes(pubKeys, epoch) + if err != nil { + log.Error(err.Error()) + return + } + + round := sr.RoundHandler().Index() + + roundInfo := &outportcore.RoundInfo{ + Round: uint64(round), + SignersIndexes: signersIndexes, + BlockWasProposed: false, + ShardId: shardId, + Epoch: epoch, + Timestamp: uint64(sr.GetRoundTimeStamp().Unix()), + } + roundsInfo := &outportcore.RoundsInfo{ + ShardID: shardId, + RoundsInfo: []*outportcore.RoundInfo{roundInfo}, + } + sr.outportHandler.SaveRoundsInfo(roundsInfo) +} + +func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error { + currentHeader := sr.Blockchain().GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + currentHeader = sr.Blockchain().GetGenesisHeader() + if check.IfNil(currentHeader) { + return spos.ErrNilHeader + } + } + + randomSeed := currentHeader.GetRandSeed() + + log.Debug("random source for the next consensus group", + "rand", randomSeed) + + shardId := sr.ShardCoordinator().SelfId() + + leader, nextConsensusGroup, err := sr.GetNextConsensusGroup( + randomSeed, + uint64(sr.GetRoundIndex()), + shardId, + sr.NodesCoordinator(), + currentHeader.GetEpoch(), + ) + if err != nil { + return err + } + + log.Trace("consensus group is formed by next validators:", + "round", roundIndex) + + for i := 0; i < len(nextConsensusGroup); i++ { + log.Trace(core.GetTrimmedPk(hex.EncodeToString([]byte(nextConsensusGroup[i])))) + } + + sr.SetConsensusGroup(nextConsensusGroup) + sr.SetLeader(leader) + + consensusGroupSizeForEpoch := sr.NodesCoordinator().ConsensusGroupSizeForShardAndEpoch(shardId, currentHeader.GetEpoch()) + sr.SetConsensusGroupSize(consensusGroupSizeForEpoch) + + return nil +} + +// EpochStartPrepare wis called when an epoch start event is observed, but not yet confirmed/committed. +// Some components may need to do initialisation on this event +func (sr *subroundStartRound) EpochStartPrepare(metaHdr data.HeaderHandler, _ data.BodyHandler) { + log.Trace(fmt.Sprintf("epoch %d start prepare in consensus", metaHdr.GetEpoch())) +} + +// EpochStartAction is called upon a start of epoch event. +func (sr *subroundStartRound) EpochStartAction(hdr data.HeaderHandler) { + log.Trace(fmt.Sprintf("epoch %d start action in consensus", hdr.GetEpoch())) + + sr.changeEpoch(hdr.GetEpoch()) +} + +func (sr *subroundStartRound) changeEpoch(currentEpoch uint32) { + epochNodes, err := sr.NodesCoordinator().GetConsensusWhitelistedNodes(currentEpoch) + if err != nil { + panic(fmt.Sprintf("consensus changing epoch failed with error %s", err.Error())) + } + + sr.SetEligibleList(epochNodes) +} + +// NotifyOrder returns the notification order for a start of epoch event +func (sr *subroundStartRound) NotifyOrder() uint32 { + return common.ConsensusStartRoundOrder +} diff --git a/consensus/spos/bls/v1/subroundStartRound_test.go b/consensus/spos/bls/v1/subroundStartRound_test.go new file mode 100644 index 00000000000..5ab4523bf94 --- /dev/null +++ b/consensus/spos/bls/v1/subroundStartRound_test.go @@ -0,0 +1,846 @@ +package v1_test + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v1 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v1" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" + consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (v1.SubroundStartRound, error) { + startRound, err := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + return startRound, err +} + +func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) v1.SubroundStartRound { + startRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + return startRound +} + +func defaultSubround( + consensusState *spos.ConsensusState, + ch chan bool, + container spos.ConsensusCoreHandler, +) (*spos.Subround, error) { + + return spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(0*roundTimeDuration/100), + int64(5*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) +} + +func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) v1.SubroundStartRound { + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + sr, _ := defaultSubround(consensusState, ch, container) + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + return srStartRound +} + +func initSubroundStartRound() v1.SubroundStartRound { + container := consensusMocks.InitConsensusCore() + return initSubroundStartRoundWithContainer(container) +} + +func TestNewSubroundStartRound(t *testing.T) { + t.Parallel() + + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusState() + container := consensusMocks.InitConsensusCore() + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + t.Run("nil subround should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + nil, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilSubround, err) + }) + t.Run("nil extend function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + sr, + nil, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "extend") + }) + t.Run("nil executeStoredMessages function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + nil, + resetConsensusMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "executeStoredMessages") + }) + t.Run("nil resetConsensusMessages function handler should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + nil, + &testscommon.SentSignatureTrackerStub{}, + ) + + assert.Nil(t, srStartRound) + assert.ErrorIs(t, err, spos.ErrNilFunctionHandler) + assert.Contains(t, err.Error(), "resetConsensusMessages") + }) + t.Run("nil sent signatures tracker should error", func(t *testing.T) { + t.Parallel() + + srStartRound, err := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + executeStoredMessages, + resetConsensusMessages, + nil, + ) + + assert.Nil(t, srStartRound) + assert.Equal(t, v1.ErrNilSentSignatureTracker, err) + }) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilBlockChainShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetBlockchain(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilBlockChain, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilBootstrapperShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetBootStrapper(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilBootstrapper, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + sr.ConsensusStateHandler = nil + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilConsensusState, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerContainerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetMultiSignerContainer(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilMultiSignerContainer, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilRoundHandlerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetRoundHandler(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilRoundHandler, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilSyncTimerShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetSyncTimer(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilSyncTimer, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundNilValidatorGroupSelectorShouldFail(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + container.SetValidatorGroupSelector(nil) + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.Nil(t, srStartRound) + assert.Equal(t, spos.ErrNilNodesCoordinator, err) +} + +func TestSubroundStartRound_NewSubroundStartRoundShouldWork(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + srStartRound, err := defaultSubroundStartRoundFromSubround(sr) + + assert.NotNil(t, srStartRound) + assert.Nil(t, err) +} + +func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + + consensusState := initializers.InitConsensusState() + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + srStartRound := defaultWithoutErrorSubroundStartRoundFromSubround(sr) + + r := srStartRound.DoStartRoundJob() + assert.True(t, r) +} + +func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { + t.Parallel() + + sr := initSubroundStartRound() + + sr.SetRoundCanceled(true) + + ok := sr.DoStartRoundConsensusCheck() + assert.False(t, ok) +} + +func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenRoundIsFinished(t *testing.T) { + t.Parallel() + + sr := initSubroundStartRound() + + sr.SetStatus(bls.SrStartRound, spos.SsFinished) + + ok := sr.DoStartRoundConsensusCheck() + assert.True(t, ok) +} + +func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCurrentRoundReturnTrue(t *testing.T) { + t.Parallel() + + bootstrapperMock := &bootstrapperStubs.BootstrapperStub{GetNodeStateCalled: func() common.NodeState { + return common.NsSynchronized + }} + + container := consensusMocks.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + + sr := initSubroundStartRoundWithContainer(container) + sentTrackerInterface := sr.GetSentSignatureTracker() + sentTracker := sentTrackerInterface.(*testscommon.SentSignatureTrackerStub) + startRoundCalled := false + sentTracker.StartRoundCalled = func() { + startRoundCalled = true + } + + ok := sr.DoStartRoundConsensusCheck() + assert.True(t, ok) + assert.True(t, startRoundCalled) +} + +func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenInitCurrentRoundReturnFalse(t *testing.T) { + t.Parallel() + + bootstrapperMock := &bootstrapperStubs.BootstrapperStub{GetNodeStateCalled: func() common.NodeState { + return common.NsNotSynchronized + }} + + container := consensusMocks.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + container.SetRoundHandler(initRoundHandlerMock()) + + sr := initSubroundStartRoundWithContainer(container) + + ok := sr.DoStartRoundConsensusCheck() + assert.False(t, ok) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetNodeStateNotReturnSynchronized(t *testing.T) { + t.Parallel() + + bootstrapperMock := &bootstrapperStubs.BootstrapperStub{} + + bootstrapperMock.GetNodeStateCalled = func() common.NodeState { + return common.NsNotSynchronized + } + container := consensusMocks.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + + srStartRound := initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.False(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextConsensusGroupErr(t *testing.T) { + t.Parallel() + + validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} + err := errors.New("error") + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte, round uint64, shardId uint32, epoch uint32) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { + return nil, nil, err + } + container := consensusMocks.InitConsensusCore() + container.SetValidatorGroupSelector(validatorGroupSelector) + + srStartRound := initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.False(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenMainMachineIsActive(t *testing.T) { + t.Parallel() + + nodeRedundancyMock := &mock.NodeRedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + } + container := consensusMocks.InitConsensusCore() + container.SetNodeRedundancyHandler(nodeRedundancyMock) + + srStartRound := initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.True(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t *testing.T) { + t.Parallel() + + validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} + leader := &shardingMocks.ValidatorMock{PubKeyCalled: func() []byte { + return []byte("leader") + }} + + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + epoch uint32, + ) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { + // will cause an error in GetLeader because of empty consensus group + return leader, []nodesCoordinator.Validator{}, nil + } + + container := consensusMocks.InitConsensusCore() + container.SetValidatorGroupSelector(validatorGroupSelector) + + srStartRound := initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.False(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenIsNotInTheConsensusGroup(t *testing.T) { + t.Parallel() + + container := consensusMocks.InitConsensusCore() + consensusState := initializers.InitConsensusState() + consensusState.SetSelfPubKey(consensusState.SelfPubKey() + "X") + ch := make(chan bool, 1) + + sr, _ := defaultSubround(consensusState, ch, container) + + srStartRound := defaultWithoutErrorSubroundStartRoundFromSubround(sr) + + r := srStartRound.InitCurrentRound() + assert.True(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenTimeIsOut(t *testing.T) { + t.Parallel() + + roundHandlerMock := initRoundHandlerMock() + + roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { + return time.Duration(-1) + } + + container := consensusMocks.InitConsensusCore() + container.SetRoundHandler(roundHandlerMock) + + srStartRound := initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.False(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { + t.Parallel() + + bootstrapperMock := &bootstrapperStubs.BootstrapperStub{} + + bootstrapperMock.GetNodeStateCalled = func() common.NodeState { + return common.NsSynchronized + } + + container := consensusMocks.InitConsensusCore() + container.SetBootStrapper(bootstrapperMock) + + srStartRound := initSubroundStartRoundWithContainer(container) + + r := srStartRound.InitCurrentRound() + assert.True(t, r) +} + +func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { + t.Parallel() + + t.Run("not in consensus node", func(t *testing.T) { + t.Parallel() + + wasCalled := false + container := consensusMocks.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, "not in consensus group", value) + } + }, + } + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("not in consensus") + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + }) + t.Run("main key participant", func(t *testing.T) { + t.Parallel() + + wasCalled := false + wasIncrementCalled := false + container := consensusMocks.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "B" + }, + } + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, "participant", value) + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("B") + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) + }) + t.Run("multi key participant", func(t *testing.T) { + t.Parallel() + + wasCalled := false + wasIncrementCalled := false + container := consensusMocks.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasCalled = true + assert.Equal(t, "participant", value) + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountConsensus { + wasIncrementCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) + consensusState.SetSelfPubKey("B") + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return string(pkBytes) == consensusState.SelfPubKey() + } + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasCalled) + assert.True(t, wasIncrementCalled) + }) + t.Run("main key leader", func(t *testing.T) { + t.Parallel() + + wasMetricConsensusStateCalled := false + wasMetricCountLeaderCalled := false + cntMetricConsensusRoundStateCalled := 0 + container := consensusMocks.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasMetricConsensusStateCalled = true + assert.Equal(t, "proposer", value) + } + if key == common.MetricConsensusRoundState { + cntMetricConsensusRoundStateCalled++ + switch cntMetricConsensusRoundStateCalled { + case 1: + assert.Equal(t, "", value) + case 2: + assert.Equal(t, "proposed", value) + default: + assert.Fail(t, "should have been called only twice") + } + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountLeader { + wasMetricCountLeaderCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) + leader, _ := consensusState.GetLeader() + consensusState.SetSelfPubKey(leader) + + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasMetricConsensusStateCalled) + assert.True(t, wasMetricCountLeaderCalled) + assert.Equal(t, 2, cntMetricConsensusRoundStateCalled) + }) + t.Run("managed key leader", func(t *testing.T) { + t.Parallel() + + wasMetricConsensusStateCalled := false + wasMetricCountLeaderCalled := false + cntMetricConsensusRoundStateCalled := 0 + container := consensusMocks.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + if key == common.MetricConsensusState { + wasMetricConsensusStateCalled = true + assert.Equal(t, "proposer", value) + } + if key == common.MetricConsensusRoundState { + cntMetricConsensusRoundStateCalled++ + switch cntMetricConsensusRoundStateCalled { + case 1: + assert.Equal(t, "", value) + case 2: + assert.Equal(t, "proposed", value) + default: + assert.Fail(t, "should have been called only twice") + } + } + }, + IncrementHandler: func(key string) { + if key == common.MetricCountLeader { + wasMetricCountLeaderCalled = true + } + }, + } + ch := make(chan bool, 1) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) + leader, _ := consensusState.GetLeader() + consensusState.SetSelfPubKey(leader) + keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { + return string(pkBytes) == leader + } + sr, _ := spos.NewSubround( + -1, + bls.SrStartRound, + bls.SrBlock, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(START_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + appStatusHandler, + ) + + srStartRound, _ := v1.NewSubroundStartRound( + sr, + extend, + v1.ProcessingThresholdPercent, + displayStatistics, + executeStoredMessages, + &testscommon.SentSignatureTrackerStub{}, + ) + srStartRound.Check() + assert.True(t, wasMetricConsensusStateCalled) + assert.True(t, wasMetricCountLeaderCalled) + assert.Equal(t, 2, cntMetricConsensusRoundStateCalled) + }) +} + +func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing.T) { + t.Parallel() + + validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} + + err := errors.New("error") + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + epoch uint32, + ) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { + return nil, nil, err + } + container := consensusMocks.InitConsensusCore() + container.SetValidatorGroupSelector(validatorGroupSelector) + + srStartRound := initSubroundStartRoundWithContainer(container) + + err2 := srStartRound.GenerateNextConsensusGroup(0) + + assert.Equal(t, err, err2) +} diff --git a/consensus/spos/bls/benchmark_test.go b/consensus/spos/bls/v2/benchmark_test.go similarity index 89% rename from consensus/spos/bls/benchmark_test.go rename to consensus/spos/bls/v2/benchmark_test.go index 4a0802760b8..b7c4b962071 100644 --- a/consensus/spos/bls/benchmark_test.go +++ b/consensus/spos/bls/v2/benchmark_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "context" @@ -15,13 +15,14 @@ import ( "github.com/stretchr/testify/require" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" cryptoFactory "github.com/multiversx/mx-chain-go/factory/crypto" - nodeMock "github.com/multiversx/mx-chain-go/node/mock" "github.com/multiversx/mx-chain-go/testscommon" + nodeMock "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" @@ -73,7 +74,7 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number } args := cryptoFactory.ArgsSigningHandler{ - PubKeys: createEligibleListFromMap(mapKeys), + PubKeys: initializers.CreateEligibleListFromMap(mapKeys), MultiSignerContainer: &cryptoMocks.MultiSignerContainerStub{ GetMultiSignerCalled: func(epoch uint32) (crypto.MultiSigner, error) { return multiSigHandler, nil @@ -86,7 +87,7 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number require.Nil(b, err) container.SetSigningHandler(signingHandler) - consensusState := initConsensusStateWithArgs(keysHandlerMock, mapKeys) + consensusState := initializers.InitConsensusStateWithArgs(keysHandlerMock, mapKeys) ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -107,7 +108,7 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number signatureSentForPks := make(map[string]struct{}) mutex := sync.Mutex{} - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -117,11 +118,11 @@ func benchmarkSubroundSignatureDoSignatureJobForManagedKeys(b *testing.B, number mutex.Unlock() }, }, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, &nodeMock.ThrottlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) sr.SetSelfPubKey("OTHER") b.ResetTimer() diff --git a/consensus/spos/bls/benchmark_verify_signatures_test.go b/consensus/spos/bls/v2/benchmark_verify_signatures_test.go similarity index 93% rename from consensus/spos/bls/benchmark_verify_signatures_test.go rename to consensus/spos/bls/v2/benchmark_verify_signatures_test.go index 85b14c9a2c2..09a276dc3a3 100644 --- a/consensus/spos/bls/benchmark_verify_signatures_test.go +++ b/consensus/spos/bls/v2/benchmark_verify_signatures_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "context" @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos/bls" dataRetrieverMocks "github.com/multiversx/mx-chain-go/dataRetriever/mock" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/common" factoryCrypto "github.com/multiversx/mx-chain-go/factory/crypto" @@ -102,13 +103,13 @@ func BenchmarkSubroundEndRound_VerifyNodesOnAggSigFailTime(b *testing.B) { require.Nil(b, err) container.SetSigningHandler(signingHandler) - consensusState := initConsensusStateWithArgsVerifySignature(keysHandlerMock, keys) + consensusState := initializers.InitConsensusStateWithArgsVerifySignature(keysHandlerMock, keys) dataToBeSigned := []byte("message") consensusState.Data = dataToBeSigned sr := initSubroundEndRoundWithContainerAndConsensusState(container, &statusHandler.AppStatusHandlerStub{}, consensusState, &dataRetrieverMocks.ThrottlerStub{}) for i := 0; i < len(sr.ConsensusGroup()); i++ { - _, err := sr.SigningHandler().CreateSignatureShareForPublicKey(dataToBeSigned, uint16(i), (*sr).EnableEpochsHandler().GetCurrentEpoch(), []byte(keys[i])) + _, err := sr.SigningHandler().CreateSignatureShareForPublicKey(dataToBeSigned, uint16(i), sr.EnableEpochsHandler().GetCurrentEpoch(), []byte(keys[i])) require.Nil(b, err) _ = sr.SetJobDone(keys[i], bls.SrSignature, true) } diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/v2/blsSubroundsFactory.go similarity index 80% rename from consensus/spos/bls/blsSubroundsFactory.go rename to consensus/spos/bls/v2/blsSubroundsFactory.go index 28531a6af49..52baeb375c2 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "time" @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/outport" ) @@ -14,7 +15,7 @@ import ( // functionality type factory struct { consensusCore spos.ConsensusCoreHandler - consensusState *spos.ConsensusState + consensusState spos.ConsensusStateHandler worker spos.WorkerHandler appStatusHandler core.AppStatusHandler @@ -28,14 +29,16 @@ type factory struct { // NewSubroundsFactory creates a new consensusState object func NewSubroundsFactory( consensusDataContainer spos.ConsensusCoreHandler, - consensusState *spos.ConsensusState, + consensusState spos.ConsensusStateHandler, worker spos.WorkerHandler, chainID []byte, currentPid core.PeerID, appStatusHandler core.AppStatusHandler, sentSignaturesTracker spos.SentSignaturesTracker, signatureThrottler core.Throttler, + outportHandler outport.OutportHandler, ) (*factory, error) { + // no need to check the outport handler, it can be nil err := checkNewFactoryParams( consensusDataContainer, consensusState, @@ -58,6 +61,7 @@ func NewSubroundsFactory( currentPid: currentPid, sentSignaturesTracker: sentSignaturesTracker, signatureThrottler: signatureThrottler, + outportHandler: outportHandler, } return &fct, nil @@ -65,7 +69,7 @@ func NewSubroundsFactory( func checkNewFactoryParams( container spos.ConsensusCoreHandler, - state *spos.ConsensusState, + state spos.ConsensusStateHandler, worker spos.WorkerHandler, chainID []byte, appStatusHandler core.AppStatusHandler, @@ -139,11 +143,11 @@ func (fct *factory) getTimeDuration() time.Duration { func (fct *factory) generateStartRoundSubround() error { subround, err := spos.NewSubround( -1, - SrStartRound, - SrBlock, + bls.SrStartRound, + bls.SrBlock, int64(float64(fct.getTimeDuration())*srStartStartTime), int64(float64(fct.getTimeDuration())*srStartEndTime), - getSubroundName(SrStartRound), + bls.GetSubroundName(bls.SrStartRound), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -178,12 +182,12 @@ func (fct *factory) generateStartRoundSubround() error { func (fct *factory) generateBlockSubround() error { subround, err := spos.NewSubround( - SrStartRound, - SrBlock, - SrSignature, + bls.SrStartRound, + bls.SrBlock, + bls.SrSignature, int64(float64(fct.getTimeDuration())*srBlockStartTime), int64(float64(fct.getTimeDuration())*srBlockEndTime), - getSubroundName(SrBlock), + bls.GetSubroundName(bls.SrBlock), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -205,9 +209,9 @@ func (fct *factory) generateBlockSubround() error { return err } - fct.worker.AddReceivedMessageCall(MtBlockBodyAndHeader, subroundBlockInstance.receivedBlockBodyAndHeader) - fct.worker.AddReceivedMessageCall(MtBlockBody, subroundBlockInstance.receivedBlockBody) - fct.worker.AddReceivedMessageCall(MtBlockHeader, subroundBlockInstance.receivedBlockHeaderBeforeEquivalentProofs) + fct.worker.AddReceivedMessageCall(bls.MtBlockBodyAndHeader, subroundBlockInstance.receivedBlockBodyAndHeader) + fct.worker.AddReceivedMessageCall(bls.MtBlockBody, subroundBlockInstance.receivedBlockBody) + fct.worker.AddReceivedMessageCall(bls.MtBlockHeader, subroundBlockInstance.receivedBlockHeaderBeforeEquivalentProofs) fct.worker.AddReceivedHeaderHandler(subroundBlockInstance.receivedBlockHeader) fct.consensusCore.Chronology().AddSubround(subroundBlockInstance) @@ -216,12 +220,12 @@ func (fct *factory) generateBlockSubround() error { func (fct *factory) generateSignatureSubround() error { subround, err := spos.NewSubround( - SrBlock, - SrSignature, - SrEndRound, + bls.SrBlock, + bls.SrSignature, + bls.SrEndRound, int64(float64(fct.getTimeDuration())*srSignatureStartTime), int64(float64(fct.getTimeDuration())*srSignatureEndTime), - getSubroundName(SrSignature), + bls.GetSubroundName(bls.SrSignature), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -246,7 +250,7 @@ func (fct *factory) generateSignatureSubround() error { } // TODO[cleanup cns finality]: remove this - fct.worker.AddReceivedMessageCall(MtSignature, subroundSignatureObject.receivedSignature) + fct.worker.AddReceivedMessageCall(bls.MtSignature, subroundSignatureObject.receivedSignature) fct.consensusCore.Chronology().AddSubround(subroundSignatureObject) return nil @@ -254,12 +258,12 @@ func (fct *factory) generateSignatureSubround() error { func (fct *factory) generateEndRoundSubround() error { subround, err := spos.NewSubround( - SrSignature, - SrEndRound, + bls.SrSignature, + bls.SrEndRound, -1, int64(float64(fct.getTimeDuration())*srEndStartTime), int64(float64(fct.getTimeDuration())*srEndEndTime), - getSubroundName(SrEndRound), + bls.GetSubroundName(bls.SrEndRound), fct.consensusState, fct.worker.GetConsensusStateChangedChannel(), fct.worker.ExecuteStoredMessages, @@ -284,9 +288,9 @@ func (fct *factory) generateEndRoundSubround() error { return err } - fct.worker.AddReceivedMessageCall(MtBlockHeaderFinalInfo, subroundEndRoundObject.receivedBlockHeaderFinalInfo) - fct.worker.AddReceivedMessageCall(MtInvalidSigners, subroundEndRoundObject.receivedInvalidSignersInfo) - fct.worker.AddReceivedMessageCall(MtSignature, subroundEndRoundObject.receivedSignature) + fct.worker.AddReceivedMessageCall(bls.MtBlockHeaderFinalInfo, subroundEndRoundObject.receivedBlockHeaderFinalInfo) + fct.worker.AddReceivedMessageCall(bls.MtInvalidSigners, subroundEndRoundObject.receivedInvalidSignersInfo) + fct.worker.AddReceivedMessageCall(bls.MtSignature, subroundEndRoundObject.receivedSignature) fct.worker.AddReceivedHeaderHandler(subroundEndRoundObject.receivedHeader) fct.consensusCore.Chronology().AddSubround(subroundEndRoundObject) @@ -296,10 +300,10 @@ func (fct *factory) generateEndRoundSubround() error { func (fct *factory) initConsensusThreshold() { pBFTThreshold := core.GetPBFTThreshold(fct.consensusState.ConsensusGroupSize()) pBFTFallbackThreshold := core.GetPBFTFallbackThreshold(fct.consensusState.ConsensusGroupSize()) - fct.consensusState.SetThreshold(SrBlock, 1) - fct.consensusState.SetThreshold(SrSignature, pBFTThreshold) - fct.consensusState.SetFallbackThreshold(SrBlock, 1) - fct.consensusState.SetFallbackThreshold(SrSignature, pBFTFallbackThreshold) + fct.consensusState.SetThreshold(bls.SrBlock, 1) + fct.consensusState.SetThreshold(bls.SrSignature, pBFTThreshold) + fct.consensusState.SetFallbackThreshold(bls.SrBlock, 1) + fct.consensusState.SetFallbackThreshold(bls.SrSignature, pBFTFallbackThreshold) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/bls/blsSubroundsFactory_test.go b/consensus/spos/bls/v2/blsSubroundsFactory_test.go similarity index 83% rename from consensus/spos/bls/blsSubroundsFactory_test.go rename to consensus/spos/bls/v2/blsSubroundsFactory_test.go index ce976c27c58..bfafd967169 100644 --- a/consensus/spos/bls/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/v2/blsSubroundsFactory_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "context" @@ -10,13 +10,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" dataRetrieverMocks "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/testscommon" testscommonConsensus "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) @@ -31,8 +32,8 @@ const roundTimeDuration = 100 * time.Millisecond func executeStoredMessages() { } -func initRoundHandlerMock() *mock.RoundHandlerMock { - return &mock.RoundHandlerMock{ +func initRoundHandlerMock() *testscommonConsensus.RoundHandlerMock { + return &testscommonConsensus.RoundHandlerMock{ RoundIndex: 0, TimeStampCalled: func() time.Time { return time.Unix(0, 0) @@ -44,7 +45,7 @@ func initRoundHandlerMock() *mock.RoundHandlerMock { } func initWorker() spos.WorkerHandler { - sposWorker := &mock.SposWorkerMock{} + sposWorker := &testscommonConsensus.SposWorkerMock{} sposWorker.GetConsensusStateChangedChannelsCalled = func() chan bool { return make(chan bool) } @@ -57,11 +58,11 @@ func initWorker() spos.WorkerHandler { return sposWorker } -func initFactoryWithContainer(container *testscommonConsensus.ConsensusCoreMock) bls.Factory { +func initFactoryWithContainer(container *testscommonConsensus.ConsensusCoreMock) v2.Factory { worker := initWorker() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() - fct, _ := bls.NewSubroundsFactory( + fct, _ := v2.NewSubroundsFactory( container, consensusState, worker, @@ -70,12 +71,13 @@ func initFactoryWithContainer(container *testscommonConsensus.ConsensusCoreMock) &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) return fct } -func initFactory() bls.Factory { +func initFactory() v2.Factory { container := testscommonConsensus.InitConsensusCore() return initFactoryWithContainer(container) } @@ -108,10 +110,10 @@ func TestFactory_GetMessageTypeName(t *testing.T) { func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( nil, consensusState, worker, @@ -120,6 +122,7 @@ func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -132,7 +135,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, nil, worker, @@ -141,6 +144,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -150,12 +154,12 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetBlockchain(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -164,6 +168,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -173,12 +178,12 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetBlockProcessor(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -187,6 +192,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -196,12 +202,12 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetBootStrapper(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -210,6 +216,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -219,12 +226,12 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetChronology(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -233,6 +240,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -242,12 +250,12 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetHasher(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -256,6 +264,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -265,12 +274,12 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetMarshalizer(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -279,6 +288,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -288,12 +298,12 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetMultiSignerContainer(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -302,6 +312,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -311,12 +322,12 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetRoundHandler(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -325,6 +336,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -334,12 +346,12 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetShardCoordinator(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -348,6 +360,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -357,12 +370,12 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetSyncTimer(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -371,6 +384,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -380,12 +394,12 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() container.SetValidatorGroupSelector(nil) - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -394,6 +408,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -403,10 +418,10 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, nil, @@ -415,6 +430,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -424,11 +440,11 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -437,6 +453,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { nil, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -446,11 +463,11 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -459,20 +476,21 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, nil, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) - assert.Equal(t, bls.ErrNilSentSignatureTracker, err) + assert.Equal(t, v2.ErrNilSentSignatureTracker, err) } func TestFactory_NewFactoryNilThrottlerShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -481,6 +499,7 @@ func TestFactory_NewFactoryNilThrottlerShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, nil, + nil, ) assert.Nil(t, fct) @@ -498,11 +517,11 @@ func TestFactory_NewFactoryShouldWork(t *testing.T) { func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { t.Parallel() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := testscommonConsensus.InitConsensusCore() worker := initWorker() - fct, err := bls.NewSubroundsFactory( + fct, err := v2.NewSubroundsFactory( container, consensusState, worker, @@ -511,6 +530,7 @@ func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, &dataRetrieverMocks.ThrottlerStub{}, + nil, ) assert.Nil(t, fct) @@ -521,7 +541,7 @@ func TestFactory_GenerateSubroundStartRoundShouldFailWhenNewSubroundFail(t *test t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*testscommonConsensus.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -546,7 +566,7 @@ func TestFactory_GenerateSubroundBlockShouldFailWhenNewSubroundFail(t *testing.T t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*testscommonConsensus.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -571,7 +591,7 @@ func TestFactory_GenerateSubroundSignatureShouldFailWhenNewSubroundFail(t *testi t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*testscommonConsensus.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } @@ -596,7 +616,7 @@ func TestFactory_GenerateSubroundEndRoundShouldFailWhenNewSubroundFail(t *testin t.Parallel() fct := *initFactory() - fct.Worker().(*mock.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { + fct.Worker().(*testscommonConsensus.SposWorkerMock).GetConsensusStateChangedChannelsCalled = func() chan bool { return nil } diff --git a/consensus/spos/bls/v2/constants.go b/consensus/spos/bls/v2/constants.go new file mode 100644 index 00000000000..ccfd6c27395 --- /dev/null +++ b/consensus/spos/bls/v2/constants.go @@ -0,0 +1,37 @@ +package v2 + +import ( + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("consensus/spos/bls") + +// waitingAllSigsMaxTimeThreshold specifies the max allocated time for waiting all signatures from the total time of the subround signature +const waitingAllSigsMaxTimeThreshold = 0.5 + +// processingThresholdPercent specifies the max allocated time for processing the block as a percentage of the total time of the round +const processingThresholdPercent = 85 + +// srStartStartTime specifies the start time, from the total time of the round, of Subround Start +const srStartStartTime = 0.0 + +// srEndStartTime specifies the end time, from the total time of the round, of Subround Start +const srStartEndTime = 0.05 + +// srBlockStartTime specifies the start time, from the total time of the round, of Subround Block +const srBlockStartTime = 0.05 + +// srBlockEndTime specifies the end time, from the total time of the round, of Subround Block +const srBlockEndTime = 0.25 + +// srSignatureStartTime specifies the start time, from the total time of the round, of Subround Signature +const srSignatureStartTime = 0.25 + +// srSignatureEndTime specifies the end time, from the total time of the round, of Subround Signature +const srSignatureEndTime = 0.85 + +// srEndStartTime specifies the start time, from the total time of the round, of Subround End +const srEndStartTime = 0.85 + +// srEndEndTime specifies the end time, from the total time of the round, of Subround End +const srEndEndTime = 0.95 diff --git a/consensus/spos/bls/v2/errors.go b/consensus/spos/bls/v2/errors.go new file mode 100644 index 00000000000..97c8e1eb685 --- /dev/null +++ b/consensus/spos/bls/v2/errors.go @@ -0,0 +1,6 @@ +package v2 + +import "errors" + +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/consensus/spos/bls/export_test.go b/consensus/spos/bls/v2/export_test.go similarity index 95% rename from consensus/spos/bls/export_test.go rename to consensus/spos/bls/v2/export_test.go index e36bce4c94e..696fec6a98c 100644 --- a/consensus/spos/bls/export_test.go +++ b/consensus/spos/bls/v2/export_test.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "context" @@ -20,8 +20,6 @@ import ( ) const ProcessingThresholdPercent = processingThresholdPercent -const DefaultMaxNumOfMessageTypeAccepted = defaultMaxNumOfMessageTypeAccepted -const MaxNumOfMessageTypeSignatureAccepted = maxNumOfMessageTypeSignatureAccepted // factory @@ -49,7 +47,7 @@ func (fct *factory) ChronologyHandler() consensus.ChronologyHandler { } // ConsensusState gets the consensus state struct pointer -func (fct *factory) ConsensusState() *spos.ConsensusState { +func (fct *factory) ConsensusState() spos.ConsensusStateHandler { return fct.consensusState } @@ -130,8 +128,8 @@ func (fct *factory) Outport() outport.OutportHandler { // subroundStartRound -// SubroundStartRound defines a type for the subroundStartRound structure -type SubroundStartRound *subroundStartRound +// SubroundStartRound defines an alias for the subroundStartRound structure +type SubroundStartRound = *subroundStartRound // DoStartRoundJob method does the job of the subround StartRound func (sr *subroundStartRound) DoStartRoundJob() bool { @@ -160,8 +158,8 @@ func (sr *subroundStartRound) GetSentSignatureTracker() spos.SentSignaturesTrack // subroundBlock -// SubroundBlock defines a type for the subroundBlock structure -type SubroundBlock *subroundBlock +// SubroundBlock defines an alias for the subroundBlock structure +type SubroundBlock = *subroundBlock // Blockchain gets the ChainHandler stored in the ConsensusCore func (sr *subroundBlock) BlockChain() data.ChainHandler { @@ -235,8 +233,8 @@ func (sr *subroundBlock) ReceivedBlockBodyAndHeader(cnsDta *consensus.Message) b // subroundSignature -// SubroundSignature defines a type for the subroundSignature structure -type SubroundSignature *subroundSignature +// SubroundSignature defines an alias to the subroundSignature structure +type SubroundSignature = *subroundSignature // DoSignatureJob method does the job of the subround Signature func (sr *subroundSignature) DoSignatureJob() bool { @@ -358,11 +356,6 @@ func (sr *subroundEndRound) GetSentSignatureTracker() spos.SentSignaturesTracker return sr.sentSignatureTracker } -// GetStringValue calls the unexported getStringValue function -func GetStringValue(messageType consensus.MessageType) string { - return getStringValue(messageType) -} - // ChangeEpoch calls the unexported changeEpoch function func (sr *subroundStartRound) ChangeEpoch(epoch uint32) { sr.changeEpoch(epoch) diff --git a/consensus/spos/bls/subroundBlock.go b/consensus/spos/bls/v2/subroundBlock.go similarity index 93% rename from consensus/spos/bls/subroundBlock.go rename to consensus/spos/bls/v2/subroundBlock.go index cec1c657c41..6e4a115c043 100644 --- a/consensus/spos/bls/subroundBlock.go +++ b/consensus/spos/bls/v2/subroundBlock.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "context" @@ -8,9 +8,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) // maxAllowedSizeInBytes defines how many bytes are allowed as payload in a message @@ -59,7 +61,7 @@ func checkNewSubroundBlockParams( return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -127,7 +129,7 @@ func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { // placeholder for subroundBlock.doBlockJob script - sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(header, body, sr.RoundTimeStamp) + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(header, body, sr.GetRoundTimeStamp()) return true } @@ -181,7 +183,7 @@ func (sr *subroundBlock) couldBeSentTogether(marshalizedBody []byte, marshalized } func (sr *subroundBlock) createBlock(header data.HeaderHandler) (data.HeaderHandler, data.BodyHandler, error) { - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := time.Duration(sr.EndTime()) haveTimeInCurrentSubround := func() bool { return sr.RoundHandler().RemainingTime(startTime, maxTime) > 0 @@ -220,7 +222,7 @@ func (sr *subroundBlock) sendHeaderAndBlockBody( marshalizedHeader, []byte(leader), nil, - int(MtBlockBodyAndHeader), + int(bls.MtBlockBodyAndHeader), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -240,9 +242,9 @@ func (sr *subroundBlock) sendHeaderAndBlockBody( "nonce", headerHandler.GetNonce(), "hash", headerHash) - sr.Data = headerHash - sr.Body = bodyHandler - sr.Header = headerHandler + sr.SetData(headerHash) + sr.SetBody(bodyHandler) + sr.SetHeader(headerHandler) return true } @@ -265,7 +267,7 @@ func (sr *subroundBlock) sendBlockBody( nil, []byte(leader), nil, - int(MtBlockBody), + int(bls.MtBlockBody), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -283,7 +285,7 @@ func (sr *subroundBlock) sendBlockBody( log.Debug("step 1: block body has been sent") - sr.Body = bodyHandler + sr.SetBody(bodyHandler) return true } @@ -315,8 +317,8 @@ func (sr *subroundBlock) sendBlockHeader( "nonce", headerHandler.GetNonce(), "hash", headerHash) - sr.Data = headerHash - sr.Header = headerHandler + sr.SetData(headerHash) + sr.SetHeader(headerHandler) return true } @@ -341,7 +343,7 @@ func (sr *subroundBlock) sendBlockHeaderBeforeEquivalentProofs( marshalledHeader, []byte(leader), nil, - int(MtBlockHeader), + int(bls.MtBlockHeader), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -361,8 +363,8 @@ func (sr *subroundBlock) sendBlockHeaderBeforeEquivalentProofs( "nonce", headerHandler.GetNonce(), "hash", headerHash) - sr.Data = headerHash - sr.Header = headerHandler + sr.SetData(headerHash) + sr.SetHeader(headerHandler) return true } @@ -523,11 +525,11 @@ func (sr *subroundBlock) receivedBlockBodyAndHeader(ctx context.Context, cnsDta header := sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header) - sr.Data = cnsDta.BlockHeaderHash - sr.Body = sr.BlockProcessor().DecodeBlockBody(cnsDta.Body) - sr.Header = header + sr.SetData(cnsDta.BlockHeaderHash) + sr.SetBody(sr.BlockProcessor().DecodeBlockBody(cnsDta.Body)) + sr.SetHeader(header) - isInvalidData := check.IfNil(sr.Body) || sr.isInvalidHeaderOrData() + isInvalidData := check.IfNil(sr.GetBody()) || sr.isInvalidHeaderOrData() if isInvalidData { return false } @@ -535,7 +537,7 @@ func (sr *subroundBlock) receivedBlockBodyAndHeader(ctx context.Context, cnsDta sr.saveProofForPreviousHeaderIfNeeded() log.Debug("step 1: block body and header have been received", - "nonce", sr.Header.GetNonce(), + "nonce", sr.GetHeader().GetNonce(), "hash", cnsDta.BlockHeaderHash) sw.Start("processReceivedBlock") @@ -574,7 +576,7 @@ func (sr *subroundBlock) saveProofForPreviousHeaderIfNeeded() { return } - proof = sr.Header.GetPreviousProof() + proof = sr.GetHeader().GetPreviousProof() err = sr.EquivalentProofsPool().AddProof(proof) if err != nil { log.Debug("saveProofForPreviousHeaderIfNeeded: failed to add proof, %w", err) @@ -583,7 +585,7 @@ func (sr *subroundBlock) saveProofForPreviousHeaderIfNeeded() { } func (sr *subroundBlock) isInvalidHeaderOrData() bool { - return sr.Data == nil || check.IfNil(sr.Header) || sr.Header.CheckFieldsForNil() != nil + return sr.GetData() == nil || check.IfNil(sr.GetHeader()) || sr.GetHeader().CheckFieldsForNil() != nil } // receivedBlockBody method is called when a block body is received through the block body channel @@ -608,9 +610,9 @@ func (sr *subroundBlock) receivedBlockBody(ctx context.Context, cnsDta *consensu return false } - sr.Body = sr.BlockProcessor().DecodeBlockBody(cnsDta.Body) + sr.SetBody(sr.BlockProcessor().DecodeBlockBody(cnsDta.Body)) - if check.IfNil(sr.Body) { + if check.IfNil(sr.GetBody()) { return false } @@ -658,8 +660,8 @@ func (sr *subroundBlock) receivedBlockHeaderBeforeEquivalentProofs(ctx context.C header := sr.BlockProcessor().DecodeBlockHeader(cnsDta.Header) - sr.Data = cnsDta.BlockHeaderHash - sr.Header = header + sr.SetData(cnsDta.BlockHeaderHash) + sr.SetHeader(header) if sr.isInvalidHeaderOrData() { return false @@ -668,7 +670,7 @@ func (sr *subroundBlock) receivedBlockHeaderBeforeEquivalentProofs(ctx context.C sr.saveProofForPreviousHeaderIfNeeded() log.Debug("step 1: block header has been received", - "nonce", sr.Header.GetNonce(), + "nonce", sr.GetHeader().GetNonce(), "hash", cnsDta.BlockHeaderHash) blockProcessedWithSuccess := sr.processReceivedBlock(ctx, cnsDta.RoundIndex, cnsDta.PubKey) @@ -721,14 +723,14 @@ func (sr *subroundBlock) receivedBlockHeader(headerHandler data.HeaderHandler) { return } - sr.Data = sr.Hasher().Compute(string(marshalledHeader)) - sr.Header = headerHandler + sr.SetData(sr.Hasher().Compute(string(marshalledHeader))) + sr.SetHeader(headerHandler) sr.saveProofForPreviousHeaderIfNeeded() log.Debug("step 1: block header has been received", - "nonce", sr.Header.GetNonce(), - "hash", sr.Data) + "nonce", sr.GetHeader().GetNonce(), + "hash", sr.GetData()) sr.PeerHonestyHandler().ChangeScore( sr.Leader(), @@ -749,10 +751,10 @@ func (sr *subroundBlock) processReceivedBlock( round int64, senderPK []byte, ) bool { - if check.IfNil(sr.Body) { + if check.IfNil(sr.GetBody()) { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } @@ -762,13 +764,13 @@ func (sr *subroundBlock) processReceivedBlock( sr.SetProcessingBlock(true) - shouldNotProcessBlock := sr.ExtendedCalled || round < sr.RoundHandler().Index() + shouldNotProcessBlock := sr.GetExtendedCalled() || round < sr.RoundHandler().Index() if shouldNotProcessBlock { log.Debug("canceled round, extended has been called or round index has been changed", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "cnsDta round", round, - "extended called", sr.ExtendedCalled, + "extended called", sr.GetExtendedCalled(), ) return false } @@ -781,7 +783,7 @@ func (sr *subroundBlock) processBlock( roundIndex int64, pubkey []byte, ) bool { - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 remainingTimeInCurrentRound := func() time.Duration { return sr.RoundHandler().RemainingTime(startTime, maxTime) @@ -791,8 +793,8 @@ func (sr *subroundBlock) processBlock( defer sr.computeSubroundProcessingMetric(metricStatTime, common.MetricProcessedProposedBlock) err := sr.BlockProcessor().ProcessBlock( - sr.Header, - sr.Body, + sr.GetHeader(), + sr.GetBody(), remainingTimeInCurrentRound, ) @@ -807,7 +809,7 @@ func (sr *subroundBlock) processBlock( if err != nil { sr.printCancelRoundLogMessage(ctx, err) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -819,7 +821,7 @@ func (sr *subroundBlock) processBlock( return false } - sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(sr.Header, sr.Body, sr.RoundTimeStamp) + sr.ConsensusCoreHandler.ScheduledProcessor().StartScheduledProcessing(sr.GetHeader(), sr.GetBody(), sr.GetRoundTimeStamp()) return true } @@ -849,7 +851,7 @@ func (sr *subroundBlock) computeSubroundProcessingMetric(startTime time.Time, me // doBlockConsensusCheck method checks if the consensus in the subround Block is achieved func (sr *subroundBlock) doBlockConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } diff --git a/consensus/spos/bls/subroundBlock_test.go b/consensus/spos/bls/v2/subroundBlock_test.go similarity index 87% rename from consensus/spos/bls/subroundBlock_test.go rename to consensus/spos/bls/v2/subroundBlock_test.go index d24713cd413..94b1bd1060c 100644 --- a/consensus/spos/bls/subroundBlock_test.go +++ b/consensus/spos/bls/v2/subroundBlock_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "errors" @@ -18,8 +18,10 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -62,21 +64,21 @@ func createDefaultHeader() *block.Header { } } -func defaultSubroundBlockFromSubround(sr *spos.Subround) (bls.SubroundBlock, error) { - srBlock, err := bls.NewSubroundBlock( +func defaultSubroundBlockFromSubround(sr *spos.Subround) (v2.SubroundBlock, error) { + srBlock, err := v2.NewSubroundBlock( sr, - bls.ProcessingThresholdPercent, - &mock.SposWorkerMock{}, + v2.ProcessingThresholdPercent, + &consensusMocks.SposWorkerMock{}, ) return srBlock, err } -func defaultSubroundBlockWithoutErrorFromSubround(sr *spos.Subround) bls.SubroundBlock { - srBlock, _ := bls.NewSubroundBlock( +func defaultSubroundBlockWithoutErrorFromSubround(sr *spos.Subround) v2.SubroundBlock { + srBlock, _ := v2.NewSubroundBlock( sr, - bls.ProcessingThresholdPercent, - &mock.SposWorkerMock{}, + v2.ProcessingThresholdPercent, + &consensusMocks.SposWorkerMock{}, ) return srBlock @@ -86,7 +88,7 @@ func initSubroundBlock( blockChain data.ChainHandler, container *consensusMocks.ConsensusCoreMock, appStatusHandler core.AppStatusHandler, -) bls.SubroundBlock { +) v2.SubroundBlock { if blockChain == nil { blockChain = &testscommon.ChainHandlerStub{ GetCurrentBlockHeaderCalled: func() data.HeaderHandler { @@ -105,7 +107,7 @@ func initSubroundBlock( } } - consensusState := initConsensusStateWithNodesCoordinator(container.NodesCoordinator()) + consensusState := initializers.InitConsensusStateWithNodesCoordinator(container.NodesCoordinator()) ch := make(chan bool, 1) container.SetBlockchain(blockChain) @@ -127,7 +129,7 @@ func createConsensusContainers() []*consensusMocks.ConsensusCoreMock { func initSubroundBlockWithBlockProcessor( bp *testscommon.BlockProcessorStub, container *consensusMocks.ConsensusCoreMock, -) bls.SubroundBlock { +) v2.SubroundBlock { blockChain := &testscommon.ChainHandlerStub{ GetGenesisHeaderCalled: func() data.HeaderHandler { return &block.Header{ @@ -143,7 +145,7 @@ func initSubroundBlockWithBlockProcessor( container.SetBlockchain(blockChain) container.SetBlockProcessor(blockProcessorMock) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -154,10 +156,10 @@ func initSubroundBlockWithBlockProcessor( func TestSubroundBlock_NewSubroundBlockNilSubroundShouldFail(t *testing.T) { t.Parallel() - srBlock, err := bls.NewSubroundBlock( + srBlock, err := v2.NewSubroundBlock( nil, - bls.ProcessingThresholdPercent, - &mock.SposWorkerMock{}, + v2.ProcessingThresholdPercent, + &consensusMocks.SposWorkerMock{}, ) assert.Nil(t, srBlock) assert.Equal(t, spos.ErrNilSubround, err) @@ -167,7 +169,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockchainShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -183,7 +185,7 @@ func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -198,11 +200,11 @@ func TestSubroundBlock_NewSubroundBlockNilBlockProcessorShouldFail(t *testing.T) func TestSubroundBlock_NewSubroundBlockNilConsensusStateShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srBlock, err := defaultSubroundBlockFromSubround(sr) assert.Nil(t, srBlock) @@ -213,7 +215,7 @@ func TestSubroundBlock_NewSubroundBlockNilHasherShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -228,7 +230,7 @@ func TestSubroundBlock_NewSubroundBlockNilMarshalizerShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -243,7 +245,7 @@ func TestSubroundBlock_NewSubroundBlockNilMultiSignerContainerShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -258,7 +260,7 @@ func TestSubroundBlock_NewSubroundBlockNilRoundHandlerShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -273,7 +275,7 @@ func TestSubroundBlock_NewSubroundBlockNilShardCoordinatorShouldFail(t *testing. t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -288,7 +290,7 @@ func TestSubroundBlock_NewSubroundBlockNilSyncTimerShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) @@ -303,14 +305,14 @@ func TestSubroundBlock_NewSubroundBlockNilWorkerShouldFail(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - srBlock, err := bls.NewSubroundBlock( + srBlock, err := v2.NewSubroundBlock( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, nil, ) assert.Nil(t, srBlock) @@ -321,7 +323,7 @@ func TestSubroundBlock_NewSubroundBlockShouldWork(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) srBlock, err := defaultSubroundBlockFromSubround(sr) @@ -335,14 +337,14 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("not leader should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) r := sr.DoBlockJob() assert.False(t, r) }) t.Run("round index lower than last committed block should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -354,7 +356,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("leader job done should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -371,7 +373,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("subround finished should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -389,7 +391,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("create header error should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -412,7 +414,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("create block error should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -437,7 +439,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { t.Run("send block error should return false", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -479,14 +481,14 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { } container.SetBlockchain(chainHandler) - consensusState := initConsensusStateWithNodesCoordinator(container.NodesCoordinator()) + consensusState := initializers.InitConsensusStateWithNodesCoordinator(container.NodesCoordinator()) ch := make(chan bool, 1) baseSr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - srBlock, _ := bls.NewSubroundBlock( + srBlock, _ := v2.NewSubroundBlock( baseSr, - bls.ProcessingThresholdPercent, - &mock.SposWorkerMock{}, + v2.ProcessingThresholdPercent, + &consensusMocks.SposWorkerMock{}, ) sr := *srBlock @@ -532,7 +534,7 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { }, } container.SetBroadcastMessenger(bm) - container.SetRoundHandler(&mock.RoundHandlerMock{ + container.SetRoundHandler(&consensusMocks.RoundHandlerMock{ RoundIndex: 1, }) container.SetEquivalentProofsPool(&dataRetriever.ProofsPoolMock{ @@ -547,16 +549,16 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { r := sr.DoBlockJob() assert.True(t, r) - assert.Equal(t, uint64(1), sr.Header.GetNonce()) + assert.Equal(t, uint64(1), sr.GetHeader().GetNonce()) - proof := sr.Header.GetPreviousProof() + proof := sr.GetHeader().GetPreviousProof() assert.Equal(t, providedSignature, proof.GetAggregatedSignature()) assert.Equal(t, providedBitmap, proof.GetPubKeysBitmap()) }) t.Run("should work, equivalent messages flag not enabled", func(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) container.SetRoundHandler(&testscommon.RoundHandlerMock{ IndexCalled: func() int64 { @@ -575,12 +577,12 @@ func TestSubroundBlock_DoBlockJob(t *testing.T) { }, } container.SetBroadcastMessenger(bm) - container.SetRoundHandler(&mock.RoundHandlerMock{ + container.SetRoundHandler(&consensusMocks.RoundHandlerMock{ RoundIndex: 1, }) r := sr.DoBlockJob() assert.True(t, r) - assert.Equal(t, uint64(1), sr.Header.GetNonce()) + assert.Equal(t, uint64(1), sr.GetHeader().GetNonce()) }) } @@ -589,7 +591,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -598,7 +600,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderDataAlreadySet(t *testing.T) { assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = []byte("some data") + sr.SetData([]byte("some data")) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -607,14 +609,14 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderNodeNotLeaderInCurrentRound(t * t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[1]), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -623,7 +625,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -632,7 +634,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderCannotProcessJobDone(t *testing assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) _ = sr.SetJobDone(leader, bls.SrBlock, true) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) @@ -650,7 +652,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { } container.SetBlockProcessor(blProc) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -659,7 +661,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderErrorDecoding(t *testing.T) { assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -669,7 +671,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing. t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -678,8 +680,8 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderBodyAlreadyReceived(t *testing. assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil - sr.Body = &block.Body{} + sr.SetData(nil) + sr.SetBody(&block.Body{}) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) @@ -689,7 +691,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testin t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{Nonce: 1} blkBody := &block.Body{} @@ -698,8 +700,8 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderHeaderAlreadyReceived(t *testin assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil - sr.Header = &block.Header{Nonce: 1} + sr.SetData(nil) + sr.SetHeader(&block.Header{Nonce: 1}) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) } @@ -711,13 +713,13 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := createDefaultHeader() blkBody := &block.Body{} leader, err := sr.GetLeader() assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.True(t, r) }) @@ -725,13 +727,13 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{ Nonce: 1, } blkBody := &block.Body{} cnsMsg := createConsensusMessage(hdr, blkBody, []byte(sr.ConsensusGroup()[0]), bls.MtBlockBodyAndHeader) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.False(t, r) }) @@ -757,7 +759,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { return &block.HeaderV2{} }, } - sr := *initSubroundBlock(chainHandler, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(chainHandler, container, &statusHandler.AppStatusHandlerStub{}) blkBody := &block.Body{} hdr := &block.HeaderV2{ Header: createDefaultHeader(), @@ -774,7 +776,7 @@ func TestSubroundBlock_ReceivedBlockBodyAndHeaderOK(t *testing.T) { assert.Nil(t, err) cnsMsg := createConsensusMessage(hdr, blkBody, []byte(leader), bls.MtBlockBodyAndHeader) cnsMsg.SignatureShare = []byte("signature") - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedBlockBodyAndHeader(cnsMsg) assert.True(t, r) }) @@ -809,7 +811,7 @@ func createConsensusMessage(header data.HeaderHandler, body *block.Body, leader func TestSubroundBlock_ReceivedBlock(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) blockProcessorMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) blkBody := &block.Body{} blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) @@ -831,11 +833,11 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { currentPid, nil, ) - sr.Body = &block.Body{} + sr.SetBody(&block.Body{}) r := sr.ReceivedBlockBody(cnsMsg) assert.False(t, r) - sr.Body = nil + sr.SetBody(nil) cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) r = sr.ReceivedBlockBody(cnsMsg) assert.False(t, r) @@ -873,12 +875,12 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { r = sr.ReceivedBlockHeaderBeforeEquivalentProofs(cnsMsg) assert.False(t, r) - sr.Data = nil - sr.Header = hdr + sr.SetData(nil) + sr.SetHeader(hdr) r = sr.ReceivedBlockHeaderBeforeEquivalentProofs(cnsMsg) assert.False(t, r) - sr.Header = nil + sr.SetHeader(nil) cnsMsg.PubKey = []byte(sr.ConsensusGroup()[1]) r = sr.ReceivedBlockHeaderBeforeEquivalentProofs(cnsMsg) assert.False(t, r) @@ -890,8 +892,8 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { sr.SetStatus(bls.SrBlock, spos.SsNotFinished) container.SetBlockProcessor(blockProcessorMock) - sr.Data = nil - sr.Header = nil + sr.SetData(nil) + sr.SetHeader(nil) hdr = createDefaultHeader() hdr.Nonce = 1 hdrStr, _ = mock.MarshalizerMock{}.Marshal(hdr) @@ -905,7 +907,7 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAreNotSet(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) leader, _ := sr.GetLeader() cnsMsg := consensus.NewConsensusMessage( nil, @@ -929,7 +931,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAre func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFails(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) blProcMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) err := errors.New("error process block") blProcMock.ProcessBlockCalled = func(data.HeaderHandler, data.BodyHandler, func() time.Duration) error { @@ -956,15 +958,15 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) } func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockReturnsInNextRound(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr := &block.Header{} blkBody := &block.Body{} blkBodyStr, _ := mock.MarshalizerMock{}.Marshal(blkBody) @@ -985,14 +987,14 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockRetu currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) blockProcessorMock := consensusMocks.InitBlockProcessorMock(container.Marshalizer()) blockProcessorMock.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return expectedErr } container.SetBlockProcessor(blockProcessorMock) - container.SetRoundHandler(&mock.RoundHandlerMock{RoundIndex: 1}) + container.SetRoundHandler(&consensusMocks.RoundHandlerMock{RoundIndex: 1}) assert.False(t, sr.ProcessReceivedBlock(cnsMsg)) } @@ -1001,7 +1003,7 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) hdr, _ := container.BlockProcessor().CreateNewHeader(1, 1) hdr, blkBody, _ := container.BlockProcessor().CreateBlock(hdr, func() bool { return true }) @@ -1023,8 +1025,8 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) assert.True(t, sr.ProcessReceivedBlock(cnsMsg)) } } @@ -1035,7 +1037,7 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { roundHandlerMock := initRoundHandlerMock() container.SetRoundHandler(roundHandlerMock) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) remainingTimeInThisRound := func() time.Duration { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -1044,19 +1046,19 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { return remainingTime } - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMocks.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 84 / 100) }}) ret := remainingTimeInThisRound() assert.True(t, ret > 0) - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMocks.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 85 / 100) }}) ret = remainingTimeInThisRound() assert.True(t, ret == 0) - container.SetSyncTimer(&mock.SyncTimerMock{CurrentTimeCalled: func() time.Time { + container.SetSyncTimer(&consensusMocks.SyncTimerMock{CurrentTimeCalled: func() time.Time { return time.Unix(0, 0).Add(roundTimeDuration * 86 / 100) }}) ret = remainingTimeInThisRound() @@ -1066,15 +1068,15 @@ func TestSubroundBlock_RemainingTimeShouldReturnNegativeValue(t *testing.T) { func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr.SetRoundCanceled(true) assert.False(t, sr.DoBlockConsensusCheck()) } func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) sr.SetStatus(bls.SrBlock, spos.SsFinished) assert.True(t, sr.DoBlockConsensusCheck()) } @@ -1082,7 +1084,7 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenSubroundIsFinish func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedReturnTrue(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < sr.Threshold(bls.SrBlock); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, true) } @@ -1092,14 +1094,14 @@ func TestSubroundBlock_DoBlockConsensusCheckShouldReturnTrueWhenBlockIsReceivedR func TestSubroundBlock_DoBlockConsensusCheckShouldReturnFalseWhenBlockIsReceivedReturnFalse(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) assert.False(t, sr.DoBlockConsensusCheck()) } func TestSubroundBlock_IsBlockReceived(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) for i := 0; i < len(sr.ConsensusGroup()); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, false) @@ -1121,7 +1123,7 @@ func TestSubroundBlock_IsBlockReceived(t *testing.T) { func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) haveTimeInCurrentSubound := func() bool { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -1130,14 +1132,14 @@ func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { return time.Duration(remainingTime) > 0 } - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.TimeDurationCalled = func() time.Duration { return 4000 * time.Millisecond } roundHandlerMock.TimeStampCalled = func() time.Time { return time.Unix(0, 0) } - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} timeElapsed := sr.EndTime() - 1 syncTimerMock.CurrentTimeCalled = func() time.Time { return time.Unix(0, timeElapsed) @@ -1151,7 +1153,7 @@ func TestSubroundBlock_HaveTimeInCurrentSubroundShouldReturnTrue(t *testing.T) { func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) haveTimeInCurrentSubound := func() bool { roundStartTime := sr.RoundHandler().TimeStamp() currentTime := sr.SyncTimer().CurrentTime() @@ -1160,14 +1162,14 @@ func TestSubroundBlock_HaveTimeInCurrentSuboundShouldReturnFalse(t *testing.T) { return time.Duration(remainingTime) > 0 } - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} roundHandlerMock.TimeDurationCalled = func() time.Duration { return 4000 * time.Millisecond } roundHandlerMock.TimeStampCalled = func() time.Time { return time.Unix(0, 0) } - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} timeElapsed := sr.EndTime() + 1 syncTimerMock.CurrentTimeCalled = func() time.Time { return time.Unix(0, timeElapsed) @@ -1197,7 +1199,7 @@ func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(blockChain, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(blockChain, container, &statusHandler.AppStatusHandlerStub{}) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(nil, nil) header, _ := sr.CreateHeader() header, body, _ := sr.CreateBlock(header) @@ -1228,7 +1230,7 @@ func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { func TestSubroundBlock_CreateHeaderNotNilCurrentHeader(t *testing.T) { consensusContainers := createConsensusContainers() for _, container := range consensusContainers { - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ Nonce: 1, }, []byte("root hash")) @@ -1281,7 +1283,7 @@ func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { return shardHeader, &block.Body{}, nil } - sr := *initSubroundBlockWithBlockProcessor(bp, container) + sr := initSubroundBlockWithBlockProcessor(bp, container) container.SetBlockchain(&blockChainMock) header, _ := sr.CreateHeader() @@ -1312,7 +1314,7 @@ func TestSubroundBlock_CreateHeaderNilMiniBlocks(t *testing.T) { bp.CreateBlockCalled = func(header data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { return nil, nil, expectedErr } - sr := *initSubroundBlockWithBlockProcessor(bp, container) + sr := initSubroundBlockWithBlockProcessor(bp, container) _ = sr.BlockChain().SetCurrentBlockHeaderAndRootHash(&block.Header{ Nonce: 1, }, []byte("root hash")) @@ -1372,7 +1374,7 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { return nil }, }) - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{ + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{ SetUInt64ValueHandler: func(key string, value uint64) { receivedValue = value }}) @@ -1398,8 +1400,8 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDuration(t *testing.T) { currentPid, nil, ) - sr.Header = hdr - sr.Body = blkBody + sr.SetHeader(hdr) + sr.SetBody(blkBody) minimumExpectedValue := uint64(delay * 100 / srDuration) _ = sr.ProcessReceivedBlock(cnsMsg) @@ -1422,11 +1424,11 @@ func TestSubroundBlock_ReceivedBlockComputeProcessDurationWithZeroDurationShould container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubroundForSRBlock(consensusState, ch, container, &statusHandler.AppStatusHandlerStub{}) - srBlock := *defaultSubroundBlockWithoutErrorFromSubround(sr) + srBlock := defaultSubroundBlockWithoutErrorFromSubround(sr) srBlock.ComputeSubroundProcessingMetric(time.Now(), "dummy") } @@ -1435,7 +1437,7 @@ func TestSubroundBlock_ReceivedBlockHeader(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) + sr := initSubroundBlock(nil, container, &statusHandler.AppStatusHandlerStub{}) // nil header sr.ReceivedBlockHeader(nil) @@ -1463,14 +1465,14 @@ func TestSubroundBlock_ReceivedBlockHeader(t *testing.T) { sr.SetLeader(defaultLeader) // consensus data already set - sr.Data = []byte("some data") + sr.SetData([]byte("some data")) sr.ReceivedBlockHeader(&testscommon.HeaderHandlerStub{}) - sr.Data = nil + sr.SetData(nil) // header already received - sr.Header = &testscommon.HeaderHandlerStub{} + sr.SetHeader(&testscommon.HeaderHandlerStub{}) sr.ReceivedBlockHeader(&testscommon.HeaderHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) // self job already done _ = sr.SetJobDone(sr.SelfPubKey(), sr.Current(), true) diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/v2/subroundEndRound.go similarity index 90% rename from consensus/spos/bls/subroundEndRound.go rename to consensus/spos/bls/v2/subroundEndRound.go index 6bd52cd8adc..554868bbcd1 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/v2/subroundEndRound.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "bytes" @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/headerCheck" ) @@ -83,7 +84,7 @@ func checkNewSubroundEndRoundParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -102,13 +103,13 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD if !sr.IsConsensusDataSet() { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } // TODO[cleanup cns finality]: remove if statement isSenderAllowed := sr.IsNodeInConsensusGroup(messageSender) - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { isSenderAllowed = sr.IsNodeLeaderInCurrentRound(messageSender) } if !isSenderAllowed { // is NOT this node leader in current round? @@ -123,7 +124,7 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD // TODO[cleanup cns finality]: remove if isSelfSender := sr.IsNodeSelf(messageSender) || sr.IsKeyManagedBySelf([]byte(messageSender)) - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { isSelfSender = sr.IsSelfLeader() } if isSelfSender { @@ -139,7 +140,7 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD } hasProof := sr.EquivalentProofsPool().HasProof(sr.ShardCoordinator().SelfId(), cnsDta.BlockHeaderHash) - if hasProof && sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if hasProof && sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -162,11 +163,11 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD } func (sr *subroundEndRound) isBlockHeaderFinalInfoValid(cnsDta *consensus.Message) bool { - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } - header := sr.Header.ShallowClone() + header := sr.GetHeader().ShallowClone() // TODO[cleanup cns finality]: remove this if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, header.GetEpoch()) { @@ -222,13 +223,13 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta if !sr.IsConsensusDataSet() { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } // TODO[cleanup cns finality]: remove if statement isSenderAllowed := sr.IsNodeInConsensusGroup(messageSender) - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { isSenderAllowed = sr.IsNodeLeaderInCurrentRound(messageSender) } if !isSenderAllowed { // is NOT this node leader in current round? @@ -243,7 +244,7 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta // TODO[cleanup cns finality]: update this check isSelfSender := sr.IsNodeSelf(messageSender) || sr.IsKeyManagedBySelf([]byte(messageSender)) - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { isSelfSender = sr.IsSelfLeader() } if isSelfSender { @@ -347,12 +348,12 @@ func (sr *subroundEndRound) receivedHeader(headerHandler data.HeaderHandler) { // doEndRoundJob method does the job of the subround EndRound func (sr *subroundEndRound) doEndRoundJob(_ context.Context) bool { - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } // TODO[cleanup cns finality]: remove this code block - isFlagEnabled := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) + isFlagEnabled := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) if !sr.IsSelfLeader() && !isFlagEnabled { if sr.IsSelfInConsensusGroup() { err := sr.prepareBroadcastBlockDataForValidator() @@ -403,15 +404,15 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { // broadcast header // TODO[cleanup cns finality]: remove this, header already broadcast during subroundBlock - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { - err = sr.BroadcastMessenger().BroadcastHeader(sr.Header, sender) + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { + err = sr.BroadcastMessenger().BroadcastHeader(sr.GetHeader(), sender) if err != nil { log.Warn("doEndRoundJobByLeader.BroadcastHeader", "error", err.Error()) } } startTime := time.Now() - err = sr.BlockProcessor().CommitBlock(sr.Header, sr.Body) + err = sr.BlockProcessor().CommitBlock(sr.GetHeader(), sr.GetBody()) elapsedTime := time.Since(startTime) if elapsedTime >= common.CommitMaxTime { log.Warn("doEndRoundJobByLeader.CommitBlock", "elapsed time", elapsedTime) @@ -425,7 +426,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { return false } - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { err = sr.EquivalentProofsPool().AddProof(proof) if err != nil { log.Debug("doEndRoundJobByLeader.AddProof", "error", err) @@ -444,7 +445,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { log.Debug("doEndRoundJobByLeader.broadcastBlockDataLeader", "error", err.Error()) } - msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", sr.Header.GetNonce()) + msg := fmt.Sprintf("Added proposed block with nonce %d in blockchain", sr.GetHeader().GetNonce()) log.Debug(display.Headline(msg, sr.SyncTimer().FormattedCurrentTime(), "+")) sr.updateMetricsForLeader() @@ -453,7 +454,7 @@ func (sr *subroundEndRound) doEndRoundJobByLeader() bool { } func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandler, bool) { - bitmap := sr.GenerateBitmap(SrSignature) + bitmap := sr.GenerateBitmap(bls.SrSignature) err := sr.checkSignaturesValidity(bitmap) if err != nil { log.Debug("sendFinalInfo.checkSignaturesValidity", "error", err.Error()) @@ -468,14 +469,14 @@ func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandle } // TODO[cleanup cns finality]: remove this code block - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { - err = sr.Header.SetPubKeysBitmap(bitmap) + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { + err = sr.GetHeader().SetPubKeysBitmap(bitmap) if err != nil { log.Debug("sendFinalInfo.SetPubKeysBitmap", "error", err.Error()) return nil, false } - err = sr.Header.SetSignature(sig) + err = sr.GetHeader().SetSignature(sig) if err != nil { log.Debug("sendFinalInfo.SetSignature", "error", err.Error()) return nil, false @@ -488,7 +489,7 @@ func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandle return nil, false } - err = sr.Header.SetLeaderSignature(leaderSignature) + err = sr.GetHeader().SetLeaderSignature(leaderSignature) if err != nil { log.Debug("sendFinalInfo.SetLeaderSignature", "error", err.Error()) return nil, false @@ -511,8 +512,8 @@ func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandle // broadcast header and final info section // TODO[cleanup cns finality]: remove leaderSigToBroadcast - leaderSigToBroadcast := sr.Header.GetLeaderSignature() - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + leaderSigToBroadcast := sr.GetHeader().GetLeaderSignature() + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { leaderSigToBroadcast = nil } @@ -524,15 +525,15 @@ func (sr *subroundEndRound) sendFinalInfo(sender []byte) (data.HeaderProofHandle PubKeysBitmap: bitmap, AggregatedSignature: sig, HeaderHash: sr.GetData(), - HeaderEpoch: sr.Header.GetEpoch(), - HeaderNonce: sr.Header.GetNonce(), - HeaderShardId: sr.Header.GetShardID(), + HeaderEpoch: sr.GetHeader().GetEpoch(), + HeaderNonce: sr.GetHeader().GetNonce(), + HeaderShardId: sr.GetHeader().GetShardID(), }, true } func (sr *subroundEndRound) shouldSendFinalInfo() bool { // TODO[cleanup cns finality]: remove this check - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -546,7 +547,7 @@ func (sr *subroundEndRound) shouldSendFinalInfo() bool { } func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) ([]byte, []byte, error) { - sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) + sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.GetHeader().GetEpoch()) if err != nil { log.Debug("doEndRoundJobByLeader.AggregateSigs", "error", err.Error()) @@ -559,7 +560,7 @@ func (sr *subroundEndRound) aggregateSigsAndHandleInvalidSigners(bitmap []byte) return nil, nil, err } - err = sr.SigningHandler().Verify(sr.GetData(), bitmap, sr.Header.GetEpoch()) + err = sr.SigningHandler().Verify(sr.GetData(), bitmap, sr.GetHeader().GetEpoch()) if err != nil { log.Debug("doEndRoundJobByLeader.Verify", "error", err.Error()) @@ -587,10 +588,10 @@ func (sr *subroundEndRound) checkGoRoutinesThrottler(ctx context.Context) error // verifySignature implements parallel signature verification func (sr *subroundEndRound) verifySignature(i int, pk string, sigShare []byte) error { - err := sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.Header.GetEpoch()) + err := sr.SigningHandler().VerifySignatureShare(uint16(i), sigShare, sr.GetData(), sr.GetHeader().GetEpoch()) if err != nil { log.Trace("VerifySignatureShare returned an error: ", err) - errSetJob := sr.SetJobDone(pk, SrSignature, false) + errSetJob := sr.SetJobDone(pk, bls.SrSignature, false) if errSetJob != nil { return errSetJob } @@ -616,12 +617,12 @@ func (sr *subroundEndRound) verifyNodesOnAggSigFail(ctx context.Context) ([]stri invalidPubKeys := make([]string, 0) pubKeys := sr.ConsensusGroup() - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return nil, spos.ErrNilHeader } for i, pk := range pubKeys { - isJobDone, err := sr.JobDone(pk, SrSignature) + isJobDone, err := sr.JobDone(pk, bls.SrSignature) if err != nil || !isJobDone { continue } @@ -708,10 +709,10 @@ func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte, } func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) { - threshold := sr.Threshold(SrSignature) - numValidSigShares := sr.ComputeSize(SrSignature) + threshold := sr.Threshold(bls.SrSignature) + numValidSigShares := sr.ComputeSize(bls.SrSignature) - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return nil, nil, spos.ErrNilHeader } @@ -720,13 +721,13 @@ func (sr *subroundEndRound) computeAggSigOnValidNodes() ([]byte, []byte, error) spos.ErrInvalidNumSigShares, numValidSigShares, threshold) } - bitmap := sr.GenerateBitmap(SrSignature) + bitmap := sr.GenerateBitmap(bls.SrSignature) err := sr.checkSignaturesValidity(bitmap) if err != nil { return nil, nil, err } - sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.Header.GetEpoch()) + sig, err := sr.SigningHandler().AggregateSigs(bitmap, sr.GetHeader().GetEpoch()) if err != nil { return nil, nil, err } @@ -747,7 +748,7 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfoForKey(signature [] nil, pubKey, nil, - int(MtBlockHeaderFinalInfo), + int(bls.MtBlockHeaderFinalInfo), sr.RoundHandler().Index(), sr.ChainID(), bitmap, @@ -763,7 +764,7 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfoForKey(signature [] return false } - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { err = sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg) if err != nil { log.Debug("createAndBroadcastHeaderFinalInfoForKey.BroadcastConsensusMessage", "error", err.Error()) @@ -790,7 +791,7 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfoForKey(signature [] func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) { // TODO[cleanup cns finality]: remove the leader check - isEquivalentMessagesFlagEnabled := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) + isEquivalentMessagesFlagEnabled := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) if !sr.IsSelfLeader() && !isEquivalentMessagesFlagEnabled { return } @@ -808,7 +809,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by nil, sender, nil, - int(MtInvalidSigners), + int(bls.MtInvalidSigners), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -829,7 +830,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by } func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message) bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } if !sr.IsConsensusDataSet() { @@ -853,13 +854,13 @@ func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message sr.SetProcessingBlock(true) - shouldNotCommitBlock := sr.ExtendedCalled || int64(header.GetRound()) < sr.RoundHandler().Index() + shouldNotCommitBlock := sr.GetExtendedCalled() || int64(header.GetRound()) < sr.RoundHandler().Index() if shouldNotCommitBlock { log.Debug("canceled round, extended has been called or round index has been changed", "round", sr.RoundHandler().Index(), "subround", sr.Name(), "header round", header.GetRound(), - "extended called", sr.ExtendedCalled, + "extended called", sr.GetExtendedCalled(), ) return false } @@ -874,7 +875,7 @@ func (sr *subroundEndRound) doEndRoundJobByParticipant(cnsDta *consensus.Message } startTime := time.Now() - err := sr.BlockProcessor().CommitBlock(header, sr.Body) + err := sr.BlockProcessor().CommitBlock(header, sr.GetBody()) elapsedTime := time.Since(startTime) if elapsedTime >= common.CommitMaxTime { log.Warn("doEndRoundJobByParticipant.CommitBlock", "elapsed time", elapsedTime) @@ -935,11 +936,11 @@ func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Me return sr.isConsensusHeaderReceived() } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false, nil } - header := sr.Header.ShallowClone() + header := sr.GetHeader().ShallowClone() // TODO[cleanup cns finality]: remove this if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, header.GetEpoch()) { err := header.SetPubKeysBitmap(cnsDta.PubKeysBitmap) @@ -964,11 +965,11 @@ func (sr *subroundEndRound) haveConsensusHeaderWithFullInfo(cnsDta *consensus.Me } func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandler) { - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false, nil } - consensusHeaderHash, err := core.CalculateHash(sr.Marshalizer(), sr.Hasher(), sr.Header) + consensusHeaderHash, err := core.CalculateHash(sr.Marshalizer(), sr.Hasher(), sr.GetHeader()) if err != nil { log.Debug("isConsensusHeaderReceived: calculate consensus header hash", "error", err.Error()) return false, nil @@ -1015,7 +1016,7 @@ func (sr *subroundEndRound) isConsensusHeaderReceived() (bool, data.HeaderHandle } func (sr *subroundEndRound) signBlockHeader(leader []byte) ([]byte, error) { - headerClone := sr.Header.ShallowClone() + headerClone := sr.GetHeader().ShallowClone() err := headerClone.SetLeaderSignature(nil) if err != nil { return nil, err @@ -1038,16 +1039,16 @@ func (sr *subroundEndRound) updateMetricsForLeader() { func (sr *subroundEndRound) broadcastBlockDataLeader(sender []byte) error { // TODO[cleanup cns finality]: remove this method, block data was already broadcast during subroundBlock - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return nil } - miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.Header, sr.Body) + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.GetHeader(), sr.GetBody()) if err != nil { return err } - return sr.BroadcastMessenger().BroadcastBlockDataLeader(sr.Header, miniBlocks, transactions, sender) + return sr.BroadcastMessenger().BroadcastBlockDataLeader(sr.GetHeader(), miniBlocks, transactions, sender) } func (sr *subroundEndRound) setHeaderForValidator(header data.HeaderHandler) error { @@ -1067,14 +1068,14 @@ func (sr *subroundEndRound) prepareBroadcastBlockDataForValidator() error { return err } - go sr.BroadcastMessenger().PrepareBroadcastBlockDataValidator(sr.Header, miniBlocks, transactions, idx, pk) + go sr.BroadcastMessenger().PrepareBroadcastBlockDataValidator(sr.GetHeader(), miniBlocks, transactions, idx, pk) return nil } // doEndRoundConsensusCheck method checks if the consensus is achieved func (sr *subroundEndRound) doEndRoundConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -1089,7 +1090,7 @@ func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { consensusGroup := sr.ConsensusGroup() signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, bitmap) for _, pubKey := range signers { - isSigJobDone, err := sr.JobDone(pubKey, SrSignature) + isSigJobDone, err := sr.JobDone(pubKey, bls.SrSignature) if err != nil { return err } @@ -1104,7 +1105,7 @@ func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { func (sr *subroundEndRound) hasProposerSignature(bitmap []byte) bool { // TODO[cleanup cns finality]: remove this check - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -1112,14 +1113,14 @@ func (sr *subroundEndRound) hasProposerSignature(bitmap []byte) bool { } func (sr *subroundEndRound) isOutOfTime() bool { - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { log.Debug("canceled round, time is out", "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), "subround", sr.Name()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return true } @@ -1140,7 +1141,7 @@ func (sr *subroundEndRound) getIndexPkAndDataToBroadcast() (int, []byte, map[uin return -1, nil, nil, nil, err } - miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.Header, sr.Body) + miniBlocks, transactions, err := sr.BlockProcessor().MarshalizedDataToBroadcast(sr.GetHeader(), sr.GetBody()) if err != nil { return -1, nil, nil, nil, err } @@ -1169,7 +1170,7 @@ func (sr *subroundEndRound) getMinConsensusGroupIndexOfManagedKeys() int { func (sr *subroundEndRound) getSender() ([]byte, error) { // TODO[cleanup cns finality]: remove this code block - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { leader, errGetLeader := sr.GetLeader() if errGetLeader != nil { log.Debug("GetLeader", "error", errGetLeader) @@ -1193,7 +1194,7 @@ func (sr *subroundEndRound) getSender() ([]byte, error) { func (sr *subroundEndRound) waitForSignalSync() bool { // TODO[cleanup cns finality]: remove this - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -1238,7 +1239,7 @@ func (sr *subroundEndRound) waitAllSignatures() { return } - sr.WaitingAllSignaturesTimeOut = true + sr.SetWaitingAllSignaturesTimeOut(true) select { case sr.ConsensusChannel() <- true: @@ -1259,7 +1260,7 @@ func (sr *subroundEndRound) remainingTime() time.Duration { // is set on true for the subround Signature func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consensus.Message) bool { // TODO[cleanup cns finality]: remove this check - if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if !sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -1305,7 +1306,7 @@ func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consens return false } - err = sr.SetJobDone(node, SrSignature, true) + err = sr.SetJobDone(node, bls.SrSignature, true) if err != nil { log.Debug("receivedSignature.SetJobDone", "node", pkForLogs, @@ -1324,9 +1325,9 @@ func (sr *subroundEndRound) receivedSignature(_ context.Context, cnsDta *consens } func (sr *subroundEndRound) checkReceivedSignatures() bool { - threshold := sr.Threshold(SrSignature) - if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.Header) { - threshold = sr.FallbackThreshold(SrSignature) + threshold := sr.Threshold(bls.SrSignature) + if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.GetHeader()) { + threshold = sr.FallbackThreshold(bls.SrSignature) log.Warn("subroundEndRound.checkReceivedSignatures: fallback validation has been applied", "minimum number of signatures required", threshold, "actual number of signatures received", sr.getNumOfSignaturesCollected(), @@ -1336,9 +1337,9 @@ func (sr *subroundEndRound) checkReceivedSignatures() bool { areSignaturesCollected, numSigs := sr.areSignaturesCollected(threshold) areAllSignaturesCollected := numSigs == sr.ConsensusGroupSize() - isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.WaitingAllSignaturesTimeOut) + isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.GetWaitingAllSignaturesTimeOut()) - isSelfJobDone := sr.IsSelfJobDone(SrSignature) + isSelfJobDone := sr.IsSelfJobDone(bls.SrSignature) shouldStopWaitingSignatures := isSelfJobDone && isSignatureCollectionDone if shouldStopWaitingSignatures { @@ -1359,7 +1360,7 @@ func (sr *subroundEndRound) getNumOfSignaturesCollected() int { for i := 0; i < len(sr.ConsensusGroup()); i++ { node := sr.ConsensusGroup()[i] - isSignJobDone, err := sr.JobDone(node, SrSignature) + isSignJobDone, err := sr.JobDone(node, bls.SrSignature) if err != nil { log.Debug("getNumOfSignaturesCollected.JobDone", "node", node, diff --git a/consensus/spos/bls/subroundEndRound_test.go b/consensus/spos/bls/v2/subroundEndRound_test.go similarity index 91% rename from consensus/spos/bls/subroundEndRound_test.go rename to consensus/spos/bls/v2/subroundEndRound_test.go index b435b1e9f9b..f43d0e6024a 100644 --- a/consensus/spos/bls/subroundEndRound_test.go +++ b/consensus/spos/bls/v2/subroundEndRound_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "bytes" @@ -22,12 +22,14 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" dataRetrieverMocks "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/p2p/factory" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" @@ -37,9 +39,9 @@ import ( func initSubroundEndRoundWithContainer( container *consensusMocks.ConsensusCoreMock, appStatusHandler core.AppStatusHandler, -) bls.SubroundEndRound { +) v2.SubroundEndRound { ch := make(chan bool, 1) - consensusState := initConsensusStateWithNodesCoordinator(container.NodesCoordinator()) + consensusState := initializers.InitConsensusStateWithNodesCoordinator(container.NodesCoordinator()) sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -55,16 +57,16 @@ func initSubroundEndRoundWithContainer( currentPid, appStatusHandler, ) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, appStatusHandler, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -76,7 +78,7 @@ func initSubroundEndRoundWithContainerAndConsensusState( appStatusHandler core.AppStatusHandler, consensusState *spos.ConsensusState, signatureThrottler core.Throttler, -) bls.SubroundEndRound { +) v2.SubroundEndRound { ch := make(chan bool, 1) sr, _ := spos.NewSubround( bls.SrSignature, @@ -93,28 +95,28 @@ func initSubroundEndRoundWithContainerAndConsensusState( currentPid, appStatusHandler, ) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, appStatusHandler, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, signatureThrottler, ) return srEndRound } -func initSubroundEndRound(appStatusHandler core.AppStatusHandler) bls.SubroundEndRound { +func initSubroundEndRound(appStatusHandler core.AppStatusHandler) v2.SubroundEndRound { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, appStatusHandler) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) return sr } @@ -122,7 +124,7 @@ func TestNewSubroundEndRound(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( bls.SrSignature, @@ -143,12 +145,12 @@ func TestNewSubroundEndRound(t *testing.T) { t.Run("nil subround should error", func(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( nil, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -158,12 +160,12 @@ func TestNewSubroundEndRound(t *testing.T) { t.Run("nil app status handler should error", func(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, nil, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -173,24 +175,24 @@ func TestNewSubroundEndRound(t *testing.T) { t.Run("nil sent signatures tracker should error", func(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, nil, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) assert.Nil(t, srEndRound) - assert.Equal(t, bls.ErrNilSentSignatureTracker, err) + assert.Equal(t, v2.ErrNilSentSignatureTracker, err) }) t.Run("nil worker should error", func(t *testing.T) { t.Parallel() - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, nil, @@ -206,7 +208,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -225,12 +227,12 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. &statusHandler.AppStatusHandlerStub{}, ) container.SetBlockchain(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -242,7 +244,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -261,12 +263,12 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test &statusHandler.AppStatusHandlerStub{}, ) container.SetBlockProcessor(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -278,7 +280,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -297,13 +299,13 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test &statusHandler.AppStatusHandlerStub{}, ) - sr.ConsensusState = nil - srEndRound, err := bls.NewSubroundEndRound( + sr.ConsensusStateHandler = nil + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -315,7 +317,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -334,12 +336,12 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t &statusHandler.AppStatusHandlerStub{}, ) container.SetMultiSignerContainer(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -351,7 +353,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -370,12 +372,12 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin &statusHandler.AppStatusHandlerStub{}, ) container.SetRoundHandler(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -387,7 +389,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -406,12 +408,12 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T &statusHandler.AppStatusHandlerStub{}, ) container.SetSyncTimer(nil) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -423,7 +425,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilThrottlerShouldFail(t *testing.T t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -442,12 +444,12 @@ func TestSubroundEndRound_NewSubroundEndRoundNilThrottlerShouldFail(t *testing.T &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, nil, ) @@ -459,7 +461,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -478,12 +480,12 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, err := bls.NewSubroundEndRound( + srEndRound, err := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -496,7 +498,7 @@ func TestSubroundEndRound_DoEndRoundJobNilHeaderShouldFail(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) r := sr.DoEndRoundJob() assert.False(t, r) @@ -514,7 +516,7 @@ func TestSubroundEndRound_DoEndRoundJobErrAggregatingSigShouldFail(t *testing.T) } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) sr.SetSelfPubKey("A") @@ -539,7 +541,7 @@ func TestSubroundEndRound_DoEndRoundJobErrCommitBlockShouldFail(t *testing.T) { } container.SetBlockProcessor(blProcMock) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.False(t, r) @@ -553,14 +555,14 @@ func TestSubroundEndRound_DoEndRoundJobErrTimeIsOutShouldFail(t *testing.T) { sr.SetSelfPubKey("A") remainingTime := time.Millisecond - roundHandlerMock := &mock.RoundHandlerMock{ + roundHandlerMock := &consensusMocks.RoundHandlerMock{ RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { return remainingTime }, } container.SetRoundHandler(roundHandlerMock) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -584,7 +586,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastBlockOK(t *testing.T) { sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -618,7 +620,7 @@ func TestSubroundEndRound_DoEndRoundJobErrMarshalizedDataToBroadcastOK(t *testin sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -653,7 +655,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastMiniBlocksOK(t *testing.T) { sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -689,7 +691,7 @@ func TestSubroundEndRound_DoEndRoundJobErrBroadcastTransactionsOK(t *testing.T) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -710,7 +712,7 @@ func TestSubroundEndRound_DoEndRoundJobAllOK(t *testing.T) { sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJob() assert.True(t, r) @@ -738,18 +740,18 @@ func TestSubroundEndRound_CheckIfSignatureIsFilled(t *testing.T) { sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) sr.SetSelfPubKey("A") - sr.Header = &block.Header{Nonce: 5} + sr.SetHeader(&block.Header{Nonce: 5}) r := sr.DoEndRoundJob() assert.True(t, r) - assert.Equal(t, expectedSignature, sr.Header.GetLeaderSignature()) + assert.Equal(t, expectedSignature, sr.GetHeader().GetLeaderSignature()) } func TestSubroundEndRound_DoEndRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) ok := sr.DoEndRoundConsensusCheck() assert.False(t, ok) @@ -798,7 +800,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_RoundCanceledShouldReturnFa t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) cnsData := consensus.Message{} res := sr.DoEndRoundJobByParticipant(&cnsData) @@ -809,7 +811,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusDataNotSetShouldRe t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Data = nil + sr.SetData(nil) cnsData := consensus.Message{} res := sr.DoEndRoundJobByParticipant(&cnsData) @@ -846,7 +848,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ConsensusHeaderNotReceivedS t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) // set previous as finished sr.SetStatus(2, spos.SsFinished) @@ -864,7 +866,7 @@ func TestSubroundEndRound_DoEndRoundJobByParticipant_ShouldReturnTrue(t *testing hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) // set previous as finished @@ -883,7 +885,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceived_NoReceivedHeadersShouldRetur hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) res, retHdr := sr.IsConsensusHeaderReceived() assert.False(t, res) @@ -897,7 +899,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceived_HeaderNotReceivedShouldRetur hdrToSearchFor := &block.Header{Nonce: 38} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) sr.AddReceivedHeader(hdr) - sr.Header = hdrToSearchFor + sr.SetHeader(hdrToSearchFor) res, retHdr := sr.IsConsensusHeaderReceived() assert.False(t, res) @@ -909,7 +911,7 @@ func TestSubroundEndRound_IsConsensusHeaderReceivedShouldReturnTrue(t *testing.T hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) res, retHdr := sr.IsConsensusHeaderReceived() @@ -921,7 +923,7 @@ func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoNilHdrShouldNotWork(t * t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) cnsData := consensus.Message{} @@ -945,7 +947,7 @@ func TestSubroundEndRound_HaveConsensusHeaderWithFullInfoShouldWork(t *testing.T LeaderSignature: originalLeaderSig, } sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = &hdr + sr.SetHeader(&hdr) cnsData := consensus.Message{ PubKeysBitmap: newPubKeyBitMap, @@ -975,7 +977,7 @@ func TestSubroundEndRound_CreateAndBroadcastHeaderFinalInfoBroadcastShouldBeCall } container.SetBroadcastMessenger(messenger) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{LeaderSignature: leaderSigInHdr} + sr.SetHeader(&block.Header{LeaderSignature: leaderSigInHdr}) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -997,7 +999,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { hdr := &block.Header{Nonce: 37} sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = hdr + sr.SetHeader(hdr) sr.AddReceivedHeader(hdr) sr.SetStatus(2, spos.SsFinished) @@ -1055,7 +1057,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { } ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1072,16 +1074,16 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) - srEndRound.Header = hdr + srEndRound.SetHeader(hdr) srEndRound.AddReceivedHeader(hdr) srEndRound.SetStatus(2, spos.SsFinished) @@ -1094,7 +1096,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { t.Parallel() sr := initSubroundEndRound(&statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) cnsData := consensus.Message{ // apply the data which is mocked in consensus state so the checks will pass @@ -1125,7 +1127,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { BlockHeaderHash: []byte("X"), PubKey: []byte("A"), } - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) assert.False(t, res) }) @@ -1134,7 +1136,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Data = nil + sr.SetData(nil) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), @@ -1172,7 +1174,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Data = []byte("Y") + sr.SetData([]byte("Y")) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), @@ -1198,7 +1200,7 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { }) ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1214,16 +1216,16 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfo(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1252,7 +1254,7 @@ func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { // update roundHandler's mock, so it will calculate for real the duration container := consensusMocks.InitConsensusCore() - roundHandler := mock.RoundHandlerMock{RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { + roundHandler := consensusMocks.RoundHandlerMock{RemainingTimeCalled: func(startTime time.Time, maxTime time.Duration) time.Duration { currentTime := time.Now() elapsedTime := currentTime.Sub(startTime) remainingTime := maxTime - elapsedTime @@ -1262,7 +1264,7 @@ func TestSubroundEndRound_IsOutOfTimeShouldReturnTrue(t *testing.T) { container.SetRoundHandler(&roundHandler) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.RoundTimeStamp = time.Now().AddDate(0, 0, -1) + sr.SetRoundTimeStamp(time.Now().AddDate(0, 0, -1)) res := sr.IsOutOfTime() assert.True(t, res) @@ -1285,7 +1287,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.False(t, isValid) } @@ -1307,7 +1309,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnFalseWhenVerify container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.False(t, isValid) } @@ -1329,7 +1331,7 @@ func TestSubroundEndRound_IsBlockHeaderFinalInfoValidShouldReturnTrue(t *testing container.SetHeaderSigVerifier(headerSigVerifier) sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) cnsDta := &consensus.Message{} - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) isValid := sr.IsBlockHeaderFinalInfoValid(cnsDta) assert.True(t, isValid) } @@ -1351,7 +1353,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) leader, err := sr.GetLeader() require.Nil(t, err) _ = sr.SetJobDone(leader, bls.SrSignature, true) @@ -1375,7 +1377,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { }, } - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) leader, err := sr.GetLeader() require.Nil(t, err) _ = sr.SetJobDone(leader, bls.SrSignature, true) @@ -1409,7 +1411,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[2], bls.SrSignature, true) @@ -1452,7 +1454,7 @@ func TestVerifyNodesOnAggSigVerificationFail(t *testing.T) { } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) invalidSigners, err := sr.VerifyNodesOnAggSigFail(context.TODO()) @@ -1469,7 +1471,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) sr.SetThreshold(bls.SrEndRound, 2) _, _, err := sr.ComputeAggSigOnValidNodes() @@ -1489,7 +1491,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) for _, participant := range sr.ConsensusGroup() { _ = sr.SetJobDone(participant, bls.SrSignature, true) } @@ -1510,7 +1512,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { }, } container.SetSigningHandler(signingHandler) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) for _, participant := range sr.ConsensusGroup() { _ = sr.SetJobDone(participant, bls.SrSignature, true) } @@ -1524,7 +1526,7 @@ func TestComputeAddSigOnValidNodes(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) for _, participant := range sr.ConsensusGroup() { _ = sr.SetJobDone(participant, bls.SrSignature, true) } @@ -1559,7 +1561,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { }) ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1575,16 +1577,16 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1640,7 +1642,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { _ = sr.SetJobDone(sr.ConsensusGroup()[0], bls.SrSignature, true) _ = sr.SetJobDone(sr.ConsensusGroup()[1], bls.SrSignature, true) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJobByLeader() require.False(t, r) @@ -1693,7 +1695,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { _ = sr.SetJobDone(participant, bls.SrSignature, true) } - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) r := sr.DoEndRoundJobByLeader() require.True(t, r) @@ -1730,7 +1732,7 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { }) ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1747,12 +1749,12 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1762,13 +1764,13 @@ func TestSubroundEndRound_DoEndRoundJobByLeader(t *testing.T) { _ = srEndRound.SetJobDone(participant, bls.SrSignature, true) } - srEndRound.Header = &block.HeaderV2{ + srEndRound.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), ScheduledRootHash: []byte("sch root hash"), ScheduledAccumulatedFees: big.NewInt(0), ScheduledDeveloperFees: big.NewInt(0), PreviousHeaderProof: nil, - } + }) r := srEndRound.DoEndRoundJobByLeader() require.True(t, r) @@ -1785,7 +1787,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.ConsensusState.Data = nil + sr.ConsensusStateHandler.SetData(nil) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), @@ -1801,7 +1803,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = nil + sr.SetHeader(nil) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), @@ -1854,7 +1856,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -1871,12 +1873,12 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) @@ -1965,9 +1967,9 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { container := consensusMocks.InitConsensusCore() sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) - sr.Header = &block.HeaderV2{ + sr.SetHeader(&block.HeaderV2{ Header: createDefaultHeader(), - } + }) cnsData := consensus.Message{ BlockHeaderHash: []byte("X"), PubKey: []byte("A"), @@ -2222,7 +2224,7 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { container := consensusMocks.InitConsensusCore() keysHandler := &testscommon.KeysHandlerStub{} ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) sr, _ := spos.NewSubround( bls.SrSignature, bls.SrEndRound, @@ -2239,12 +2241,12 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srEndRound, _ := bls.NewSubroundEndRound( + srEndRound, _ := v2.NewSubroundEndRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMocks.ThrottlerStub{}, ) diff --git a/consensus/spos/bls/subroundSignature.go b/consensus/spos/bls/v2/subroundSignature.go similarity index 95% rename from consensus/spos/bls/subroundSignature.go rename to consensus/spos/bls/v2/subroundSignature.go index f08ab7c8e27..77c0b5a05eb 100644 --- a/consensus/spos/bls/subroundSignature.go +++ b/consensus/spos/bls/v2/subroundSignature.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "context" @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls" ) const timeSpentBetweenChecks = time.Millisecond @@ -72,7 +73,7 @@ func checkNewSubroundSignatureParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -86,13 +87,13 @@ func (sr *subroundSignature) doSignatureJob(ctx context.Context) bool { if !sr.CanDoSubroundJob(sr.Current()) { return false } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { log.Error("doSignatureJob", "error", spos.ErrNilHeader) return false } isSelfSingleKeyLeader := sr.IsNodeLeaderInCurrentRound(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() - isFlagActive := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) + isFlagActive := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) isSelfSingleKeyInConsensusGroup := sr.IsNodeInConsensusGroup(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() if isSelfSingleKeyLeader || isSelfSingleKeyInConsensusGroup { if !sr.doSignatureJobForSingleKey(isSelfSingleKeyLeader, isFlagActive) { @@ -122,7 +123,7 @@ func (sr *subroundSignature) createAndSendSignatureMessage(signatureShare []byte nil, pkBytes, nil, - int(MtSignature), + int(bls.MtSignature), sr.RoundHandler().Index(), sr.ChainID(), nil, @@ -168,7 +169,7 @@ func (sr *subroundSignature) completeSignatureSubRound(pk string, shouldWaitForA // is set on true for the subround Signature func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consensus.Message) bool { // TODO[cleanup cns finality]: remove this method, received signatures will be handled on subroundEndRound - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return true } @@ -238,7 +239,7 @@ func (sr *subroundSignature) receivedSignature(_ context.Context, cnsDta *consen // doSignatureConsensusCheck method checks if the consensus in the subround Signature is achieved func (sr *subroundSignature) doSignatureConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -246,7 +247,7 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { return true } - if check.IfNil(sr.Header) { + if check.IfNil(sr.GetHeader()) { return false } @@ -260,14 +261,14 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { } // TODO[cleanup cns finality]: simply return false and remove the rest of the method. This will be handled by subroundEndRound - if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) { + if sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) { return false } isSelfLeader := sr.IsSelfLeader() threshold := sr.Threshold(sr.Current()) - if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.Header) { + if sr.FallbackHeaderValidator().ShouldApplyFallbackValidation(sr.GetHeader()) { threshold = sr.FallbackThreshold(sr.Current()) log.Warn("subroundSignature.doSignatureConsensusCheck: fallback validation has been applied", "minimum number of signatures required", threshold, @@ -278,7 +279,7 @@ func (sr *subroundSignature) doSignatureConsensusCheck() bool { areSignaturesCollected, numSigs := sr.areSignaturesCollected(threshold) areAllSignaturesCollected := numSigs == sr.ConsensusGroupSize() - isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.WaitingAllSignaturesTimeOut) + isSignatureCollectionDone := areAllSignaturesCollected || (areSignaturesCollected && sr.GetWaitingAllSignaturesTimeOut()) isJobDoneByLeader := isSelfLeader && isSignatureCollectionDone isSelfJobDone := sr.IsSelfJobDone(sr.Current()) @@ -346,7 +347,7 @@ func (sr *subroundSignature) waitAllSignatures() { return } - sr.WaitingAllSignaturesTimeOut = true + sr.SetWaitingAllSignaturesTimeOut(true) select { case sr.ConsensusChannel() <- true: @@ -412,14 +413,14 @@ func (sr *subroundSignature) doSignatureJobForManagedKeys(ctx context.Context) b func (sr *subroundSignature) sendSignatureForManagedKey(idx int, pk string) bool { isCurrentNodeMultiKeyLeader := sr.IsMultiKeyLeaderInCurrentRound() - isFlagActive := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.Header.GetEpoch()) + isFlagActive := sr.EnableEpochsHandler().IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, sr.GetHeader().GetEpoch()) pkBytes := []byte(pk) signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( sr.GetData(), uint16(idx), - sr.Header.GetEpoch(), + sr.GetHeader().GetEpoch(), pkBytes, ) if err != nil { @@ -479,7 +480,7 @@ func (sr *subroundSignature) doSignatureJobForSingleKey(isSelfLeader bool, isFla signatureShare, err := sr.SigningHandler().CreateSignatureShareForPublicKey( sr.GetData(), uint16(selfIndex), - sr.Header.GetEpoch(), + sr.GetHeader().GetEpoch(), []byte(sr.SelfPubKey()), ) if err != nil { diff --git a/consensus/spos/bls/subroundSignature_test.go b/consensus/spos/bls/v2/subroundSignature_test.go similarity index 87% rename from consensus/spos/bls/subroundSignature_test.go rename to consensus/spos/bls/v2/subroundSignature_test.go index bb76513bfc7..bedacbcf163 100644 --- a/consensus/spos/bls/subroundSignature_test.go +++ b/consensus/spos/bls/v2/subroundSignature_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "context" @@ -16,20 +16,21 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" dataRetrieverMock "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/testscommon" consensusMocks "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) const setThresholdJobsDone = "threshold" -func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreMock) bls.SubroundSignature { - consensusState := initConsensusState() +func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreMock) v2.SubroundSignature { + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -48,18 +49,18 @@ func initSubroundSignatureWithContainer(container *consensusMocks.ConsensusCoreM &statusHandler.AppStatusHandlerStub{}, ) - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) return srSignature } -func initSubroundSignature() bls.SubroundSignature { +func initSubroundSignature() v2.SubroundSignature { container := consensusMocks.InitConsensusCore() return initSubroundSignatureWithContainer(container) } @@ -68,7 +69,7 @@ func TestNewSubroundSignature(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -90,11 +91,11 @@ func TestNewSubroundSignature(t *testing.T) { t.Run("nil subround should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( nil, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -104,7 +105,7 @@ func TestNewSubroundSignature(t *testing.T) { t.Run("nil worker should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, @@ -118,11 +119,11 @@ func TestNewSubroundSignature(t *testing.T) { t.Run("nil app status handler should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, nil, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -132,26 +133,26 @@ func TestNewSubroundSignature(t *testing.T) { t.Run("nil sent signatures tracker should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, nil, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) assert.Nil(t, srSignature) - assert.Equal(t, bls.ErrNilSentSignatureTracker, err) + assert.Equal(t, v2.ErrNilSentSignatureTracker, err) }) t.Run("nil signatureThrottler should error", func(t *testing.T) { t.Parallel() - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, nil, ) @@ -164,7 +165,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -183,12 +184,12 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te &statusHandler.AppStatusHandlerStub{}, ) - sr.ConsensusState = nil - srSignature, err := bls.NewSubroundSignature( + sr.ConsensusStateHandler = nil + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -200,7 +201,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -219,11 +220,11 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) &statusHandler.AppStatusHandlerStub{}, ) container.SetHasher(nil) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -235,7 +236,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -254,11 +255,11 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail &statusHandler.AppStatusHandlerStub{}, ) container.SetMultiSignerContainer(nil) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -270,7 +271,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -290,11 +291,11 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test ) container.SetRoundHandler(nil) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -306,7 +307,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -325,11 +326,11 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing &statusHandler.AppStatusHandlerStub{}, ) container.SetSyncTimer(nil) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -341,7 +342,7 @@ func TestSubroundSignature_NewSubroundSignatureNilAppStatusHandlerShouldFail(t * t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -360,11 +361,11 @@ func TestSubroundSignature_NewSubroundSignatureNilAppStatusHandlerShouldFail(t * &statusHandler.AppStatusHandlerStub{}, ) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, nil, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -376,7 +377,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := spos.NewSubround( @@ -395,11 +396,11 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srSignature, err := bls.NewSubroundSignature( + srSignature, err := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -414,20 +415,20 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) - sr.Header = &block.Header{} - sr.Data = nil + sr.SetHeader(&block.Header{}) + sr.SetData(nil) r := sr.DoSignatureJob() assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) - sr.Header = nil + sr.SetHeader(nil) r = sr.DoSignatureJob() assert.False(t, r) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) err := errors.New("create signature share error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -469,14 +470,14 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { }, }) _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) - sr.RoundCanceled = false + sr.SetRoundCanceled(false) leader, err := sr.GetLeader() assert.Nil(t, err) sr.SetSelfPubKey(leader) r = sr.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) }) t.Run("with equivalent messages flag active should work", func(t *testing.T) { t.Parallel() @@ -488,9 +489,9 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { }, } container.SetEnableEpochsHandler(enableEpochsHandler) - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) leader, err := sr.GetLeader() assert.Nil(t, err) sr.SetSelfPubKey(leader) @@ -503,7 +504,7 @@ func TestSubroundSignature_DoSignatureJob(t *testing.T) { r := sr.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) assert.Nil(t, err) leaderJobDone, err := sr.JobDone(leader, bls.SrSignature) assert.NoError(t, err) @@ -519,7 +520,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -546,7 +547,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { signatureSentForPks := make(map[string]struct{}) mutex := sync.Mutex{} - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -556,16 +557,16 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { mutex.Unlock() }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) - srSignature.Header = &block.Header{} - srSignature.Data = nil + srSignature.SetHeader(&block.Header{}) + srSignature.SetData(nil) r := srSignature.DoSignatureJob() assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) err := errors.New("create signature share error") signingHandler := &consensusMocks.SigningHandlerStub{ @@ -589,13 +590,13 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { assert.True(t, r) _ = sr.SetJobDone(sr.SelfPubKey(), bls.SrSignature, false) - sr.RoundCanceled = false + sr.SetRoundCanceled(false) leader, err := sr.GetLeader() assert.Nil(t, err) sr.SetSelfPubKey(leader) r = srSignature.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) expectedMap := map[string]struct{}{ "A": {}, "B": {}, @@ -626,7 +627,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { }, } container.SetSigningHandler(signingHandler) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -653,7 +654,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { signatureSentForPks := make(map[string]struct{}) mutex := sync.Mutex{} - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -663,11 +664,11 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { mutex.Unlock() }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signaturesBroadcast := make(map[string]int) container.SetBroadcastMessenger(&consensusMocks.BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { @@ -683,7 +684,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { r := srSignature.DoSignatureJob() assert.True(t, r) - assert.False(t, sr.RoundCanceled) + assert.False(t, sr.GetRoundCanceled()) assert.True(t, sr.IsSubroundFinished(bls.SrSignature)) for _, pk := range sr.ConsensusGroup() { @@ -733,7 +734,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { return make([]byte, 0), expErr }, }) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -758,10 +759,10 @@ func TestSubroundSignature_SendSignature(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signatureSentForPks := make(map[string]struct{}) - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -769,7 +770,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { signatureSentForPks[string(pkBytes)] = struct{}{} }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -800,7 +801,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { return fmt.Errorf("error") }, }) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -825,10 +826,10 @@ func TestSubroundSignature_SendSignature(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signatureSentForPks := make(map[string]struct{}) - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -836,7 +837,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { signatureSentForPks[string(pkBytes)] = struct{}{} }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -867,7 +868,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { return nil }, }) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -892,11 +893,11 @@ func TestSubroundSignature_SendSignature(t *testing.T) { currentPid, &statusHandler.AppStatusHandlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signatureSentForPks := make(map[string]struct{}) varCalled := false - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -905,7 +906,7 @@ func TestSubroundSignature_SendSignature(t *testing.T) { varCalled = true }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) @@ -935,7 +936,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { }, } container.SetSigningHandler(signingHandler) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -962,7 +963,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { signatureSentForPks := make(map[string]struct{}) mutex := sync.Mutex{} - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{ @@ -972,11 +973,11 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { mutex.Unlock() }, }, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{}, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) signaturesBroadcast := make(map[string]int) container.SetBroadcastMessenger(&consensusMocks.BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { @@ -1035,7 +1036,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { } container.SetEnableEpochsHandler(enableEpochsHandler) - consensusState := initConsensusStateWithKeysHandler( + consensusState := initializers.InitConsensusStateWithKeysHandler( &testscommon.KeysHandlerStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return true @@ -1060,11 +1061,11 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { &statusHandler.AppStatusHandlerStub{}, ) - srSignature, _ := bls.NewSubroundSignature( + srSignature, _ := v2.NewSubroundSignature( sr, &statusHandler.AppStatusHandlerStub{}, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensusMocks.SposWorkerMock{}, &dataRetrieverMock.ThrottlerStub{ CanProcessCalled: func() bool { return false @@ -1072,7 +1073,7 @@ func TestSubroundSignature_DoSignatureJobForManagedKeys(t *testing.T) { }, ) - sr.Header = &block.Header{} + sr.SetHeader(&block.Header{}) ctx, cancel := context.WithCancel(context.TODO()) cancel() r := srSignature.DoSignatureJobForManagedKeys(ctx) @@ -1086,10 +1087,10 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { t.Parallel() container := consensusMocks.InitConsensusCore() - sr := *initSubroundSignatureWithContainer(container) + sr := initSubroundSignatureWithContainer(container) signature := []byte("signature") cnsMsg := consensus.NewConsensusMessage( - sr.Data, + sr.GetData(), signature, nil, nil, @@ -1105,16 +1106,16 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { nil, ) - sr.Header = &block.Header{} - sr.Data = nil + sr.SetHeader(&block.Header{}) + sr.SetData(nil) r := sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("Y") + sr.SetData([]byte("Y")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) @@ -1168,12 +1169,12 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { container := consensusMocks.InitConsensusCore() container.SetSigningHandler(signingHandler) - sr := *initSubroundSignatureWithContainer(container) - sr.Header = &block.Header{} + sr := initSubroundSignatureWithContainer(container) + sr.SetHeader(&block.Header{}) signature := []byte("signature") cnsMsg := consensus.NewConsensusMessage( - sr.Data, + sr.GetData(), signature, nil, nil, @@ -1189,15 +1190,15 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { nil, ) - sr.Data = nil + sr.SetData(nil) r := sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("Y") + sr.SetData([]byte("Y")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) - sr.Data = []byte("X") + sr.SetData([]byte("X")) r = sr.ReceivedSignature(cnsMsg) assert.False(t, r) @@ -1229,7 +1230,7 @@ func TestSubroundSignature_ReceivedSignatureStoreShareFailed(t *testing.T) { func TestSubroundSignature_SignaturesCollected(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() for i := 0; i < len(sr.ConsensusGroup()); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrBlock, false) @@ -1258,15 +1259,15 @@ func TestSubroundSignature_SignaturesCollected(t *testing.T) { func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() - sr.RoundCanceled = true + sr := initSubroundSignature() + sr.SetRoundCanceled(true) assert.False(t, sr.DoSignatureConsensusCheck()) } func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubroundIsFinished(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() sr.SetStatus(bls.SrSignature, spos.SsFinished) assert.True(t, sr.DoSignatureConsensusCheck()) } @@ -1274,21 +1275,21 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSubround func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenSignaturesCollectedReturnTrue(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() for i := 0; i < sr.Threshold(bls.SrSignature); i++ { _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } - sr.Header = &block.HeaderV2{} + sr.SetHeader(&block.HeaderV2{}) assert.True(t, sr.DoSignatureConsensusCheck()) } func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenSignaturesCollectedReturnFalse(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() - sr.Header = &block.HeaderV2{Header: createDefaultHeader()} + sr := initSubroundSignature() + sr.SetHeader(&block.HeaderV2{Header: createDefaultHeader()}) assert.False(t, sr.DoSignatureConsensusCheck()) } @@ -1362,8 +1363,8 @@ func testSubroundSignatureDoSignatureConsensusCheck(args argTestSubroundSignatur return false }, }) - sr := *initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = args.waitingAllSignaturesTimeOut + sr := initSubroundSignatureWithContainer(container) + sr.SetWaitingAllSignaturesTimeOut(args.waitingAllSignaturesTimeOut) if !args.flagActive { leader, err := sr.GetLeader() @@ -1379,7 +1380,7 @@ func testSubroundSignatureDoSignatureConsensusCheck(args argTestSubroundSignatur _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } - sr.Header = &block.HeaderV2{} + sr.SetHeader(&block.HeaderV2{}) assert.Equal(t, args.expectedResult, sr.DoSignatureConsensusCheck()) } } @@ -1393,8 +1394,8 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnFalseWhenFallbac return false }, }) - sr := *initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = false + sr := initSubroundSignatureWithContainer(container) + sr.SetWaitingAllSignaturesTimeOut(false) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -1416,8 +1417,8 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback return true }, }) - sr := *initSubroundSignatureWithContainer(container) - sr.WaitingAllSignaturesTimeOut = true + sr := initSubroundSignatureWithContainer(container) + sr.SetWaitingAllSignaturesTimeOut(true) leader, err := sr.GetLeader() assert.Nil(t, err) @@ -1427,20 +1428,20 @@ func TestSubroundSignature_DoSignatureConsensusCheckShouldReturnTrueWhenFallback _ = sr.SetJobDone(sr.ConsensusGroup()[i], bls.SrSignature, true) } - sr.Header = &block.HeaderV2{} + sr.SetHeader(&block.HeaderV2{}) assert.True(t, sr.DoSignatureConsensusCheck()) } func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqual(t *testing.T) { t.Parallel() - sr := *initSubroundSignature() + sr := initSubroundSignature() leader, err := sr.GetLeader() require.Nil(t, err) cnsMsg := consensus.NewConsensusMessage( - append(sr.Data, []byte("X")...), + append(sr.GetData(), []byte("X")...), []byte("signature"), nil, nil, @@ -1456,6 +1457,6 @@ func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqu nil, ) - sr.Header = &block.HeaderV2{} + sr.SetHeader(&block.HeaderV2{}) assert.False(t, sr.ReceivedSignature(cnsMsg)) } diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/v2/subroundStartRound.go similarity index 92% rename from consensus/spos/bls/subroundStartRound.go rename to consensus/spos/bls/v2/subroundStartRound.go index 6f8c6d03908..887532c02fa 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/v2/subroundStartRound.go @@ -1,4 +1,4 @@ -package bls +package v2 import ( "context" @@ -71,7 +71,7 @@ func checkNewSubroundStartRoundParams( if baseSubround == nil { return spos.ErrNilSubround } - if baseSubround.ConsensusState == nil { + if check.IfNil(baseSubround.ConsensusStateHandler) { return spos.ErrNilConsensusState } @@ -96,25 +96,18 @@ func (sr *subroundStartRound) SetOutportHandler(outportHandler outport.OutportHa // doStartRoundJob method does the job of the subround StartRound func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool { sr.ResetConsensusState() - sr.RoundIndex = sr.RoundHandler().Index() - sr.RoundTimeStamp = sr.RoundHandler().TimeStamp() + sr.SetRoundIndex(sr.RoundHandler().Index()) + sr.SetRoundTimeStamp(sr.RoundHandler().TimeStamp()) topic := spos.GetConsensusTopicID(sr.ShardCoordinator()) sr.GetAntiFloodHandler().ResetForTopic(topic) - // reset the consensus messages, but still keep the proofs for current hash and previous hash - currentHash := sr.Blockchain().GetCurrentBlockHeaderHash() - prevHash := make([]byte, 0) - currentHeader := sr.Blockchain().GetCurrentBlockHeader() - if !check.IfNil(currentHeader) { - prevHash = currentHeader.GetPrevHash() - } - sr.worker.ResetConsensusMessages(currentHash, prevHash) + sr.worker.ResetConsensusMessages() return true } // doStartRoundConsensusCheck method checks if the consensus is achieved in the subround StartRound func (sr *subroundStartRound) doStartRoundConsensusCheck() bool { - if sr.RoundCanceled { + if sr.GetRoundCanceled() { return false } @@ -143,7 +136,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { "round index", sr.RoundHandler().Index(), "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -162,7 +155,7 @@ func (sr *subroundStartRound) initCurrentRound() bool { if err != nil { log.Debug("initCurrentRound.GetLeader", "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -201,19 +194,19 @@ func (sr *subroundStartRound) initCurrentRound() bool { if err != nil { log.Debug("initCurrentRound.Reset", "error", err.Error()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } - startTime := sr.RoundTimeStamp + startTime := sr.GetRoundTimeStamp() maxTime := sr.RoundHandler().TimeDuration() * time.Duration(sr.processingThresholdPercentage) / 100 if sr.RoundHandler().RemainingTime(startTime, maxTime) < 0 { log.Debug("canceled round, time is out", "round", sr.SyncTimer().FormattedCurrentTime(), sr.RoundHandler().Index(), "subround", sr.Name()) - sr.RoundCanceled = true + sr.SetRoundCanceled(true) return false } @@ -286,7 +279,7 @@ func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { BlockWasProposed: false, ShardId: shardId, Epoch: epoch, - Timestamp: uint64(sr.RoundTimeStamp.Unix()), + Timestamp: uint64(sr.GetRoundTimeStamp().Unix()), } roundsInfo := &outportcore.RoundsInfo{ ShardID: shardId, @@ -313,7 +306,7 @@ func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error leader, nextConsensusGroup, err := sr.GetNextConsensusGroup( randomSeed, - uint64(sr.RoundIndex), + uint64(sr.GetRoundIndex()), shardId, sr.NodesCoordinator(), currentHeader.GetEpoch(), @@ -362,5 +355,5 @@ func (sr *subroundStartRound) changeEpoch(currentEpoch uint32) { // NotifyOrder returns the notification order for a start of epoch event func (sr *subroundStartRound) NotifyOrder() uint32 { - return common.ConsensusOrder + return common.ConsensusStartRoundOrder } diff --git a/consensus/spos/bls/subroundStartRound_test.go b/consensus/spos/bls/v2/subroundStartRound_test.go similarity index 83% rename from consensus/spos/bls/subroundStartRound_test.go rename to consensus/spos/bls/v2/subroundStartRound_test.go index c87a678857d..28f063277c0 100644 --- a/consensus/spos/bls/subroundStartRound_test.go +++ b/consensus/spos/bls/v2/subroundStartRound_test.go @@ -1,4 +1,4 @@ -package bls_test +package v2_test import ( "fmt" @@ -9,9 +9,11 @@ import ( outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/stretchr/testify/require" + v2 "github.com/multiversx/mx-chain-go/consensus/spos/bls/v2" processMock "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon/bootstrapperStubs" "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/multiversx/mx-chain-go/testscommon/consensus/initializers" "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/stretchr/testify/assert" @@ -28,23 +30,23 @@ import ( var expErr = fmt.Errorf("expected error") -func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (bls.SubroundStartRound, error) { - startRound, err := bls.NewSubroundStartRound( +func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (v2.SubroundStartRound, error) { + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) return startRound, err } -func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) bls.SubroundStartRound { - startRound, _ := bls.NewSubroundStartRound( +func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) v2.SubroundStartRound { + startRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) return startRound @@ -73,21 +75,21 @@ func defaultSubround( ) } -func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) bls.SubroundStartRound { - consensusState := initConsensusState() +func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) v2.SubroundStartRound { + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) return srStartRound } -func initSubroundStartRound() bls.SubroundStartRound { +func initSubroundStartRound() v2.SubroundStartRound { container := consensus.InitConsensusCore() return initSubroundStartRoundWithContainer(container) } @@ -96,7 +98,7 @@ func TestNewSubroundStartRound(t *testing.T) { t.Parallel() ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() container := consensus.InitConsensusCore() sr, _ := spos.NewSubround( -1, @@ -117,11 +119,11 @@ func TestNewSubroundStartRound(t *testing.T) { t.Run("nil subround should error", func(t *testing.T) { t.Parallel() - srStartRound, err := bls.NewSubroundStartRound( + srStartRound, err := v2.NewSubroundStartRound( nil, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) assert.Nil(t, srStartRound) @@ -130,22 +132,22 @@ func TestNewSubroundStartRound(t *testing.T) { t.Run("nil sent signatures tracker should error", func(t *testing.T) { t.Parallel() - srStartRound, err := bls.NewSubroundStartRound( + srStartRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, nil, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) assert.Nil(t, srStartRound) - assert.Equal(t, bls.ErrNilSentSignatureTracker, err) + assert.Equal(t, v2.ErrNilSentSignatureTracker, err) }) t.Run("nil worker should error", func(t *testing.T) { t.Parallel() - srStartRound, err := bls.NewSubroundStartRound( + srStartRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, nil, ) @@ -160,7 +162,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilBlockChainShouldFail(t *test container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -176,7 +178,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilBootstrapperShouldFail(t *te container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -191,12 +193,12 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * t.Parallel() container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) - sr.ConsensusState = nil + sr.ConsensusStateHandler = nil srStartRound, err := defaultSubroundStartRoundFromSubround(sr) assert.Nil(t, srStartRound) @@ -208,7 +210,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerContainerShouldFa container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -224,7 +226,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilRoundHandlerShouldFail(t *te container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -240,7 +242,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilSyncTimerShouldFail(t *testi container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -256,7 +258,7 @@ func TestSubroundStartRound_NewSubroundStartRoundNilValidatorGroupSelectorShould container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -272,7 +274,7 @@ func TestSubroundStartRound_NewSubroundStartRoundShouldWork(t *testing.T) { container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) @@ -288,12 +290,12 @@ func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) - srStartRound := *defaultWithoutErrorSubroundStartRoundFromSubround(sr) + srStartRound := defaultWithoutErrorSubroundStartRoundFromSubround(sr) r := srStartRound.DoStartRoundJob() assert.True(t, r) @@ -302,9 +304,9 @@ func TestSubroundStartRound_DoStartRoundShouldReturnTrue(t *testing.T) { func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRoundIsCanceled(t *testing.T) { t.Parallel() - sr := *initSubroundStartRound() + sr := initSubroundStartRound() - sr.RoundCanceled = true + sr.SetRoundCanceled(true) ok := sr.DoStartRoundConsensusCheck() assert.False(t, ok) @@ -313,7 +315,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenRound func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenRoundIsFinished(t *testing.T) { t.Parallel() - sr := *initSubroundStartRound() + sr := initSubroundStartRound() sr.SetStatus(bls.SrStartRound, spos.SsFinished) @@ -331,7 +333,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCu container := consensus.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - sr := *initSubroundStartRoundWithContainer(container) + sr := initSubroundStartRoundWithContainer(container) sentTrackerInterface := sr.GetSentSignatureTracker() sentTracker := sentTrackerInterface.(*testscommon.SentSignatureTrackerStub) startRoundCalled := false @@ -355,7 +357,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnFalseWhenInitC container.SetBootStrapper(bootstrapperMock) container.SetRoundHandler(initRoundHandlerMock()) - sr := *initSubroundStartRoundWithContainer(container) + sr := initSubroundStartRoundWithContainer(container) ok := sr.DoStartRoundConsensusCheck() assert.False(t, ok) @@ -372,7 +374,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetNodeStateNot container := consensus.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -390,7 +392,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -407,7 +409,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenMainMachineIsAct container := consensus.InitConsensusCore() container.SetNodeRedundancyHandler(nodeRedundancyMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -434,7 +436,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t container := consensus.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -444,13 +446,13 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrueWhenIsNotInTheConsen t.Parallel() container := consensus.InitConsensusCore() - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() consensusState.SetSelfPubKey(consensusState.SelfPubKey() + "X") ch := make(chan bool, 1) sr, _ := defaultSubround(consensusState, ch, container) - srStartRound := *defaultWithoutErrorSubroundStartRoundFromSubround(sr) + srStartRound := defaultWithoutErrorSubroundStartRoundFromSubround(sr) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -468,7 +470,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenTimeIsOut(t *te container := consensus.InitConsensusCore() container.SetRoundHandler(roundHandlerMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.False(t, r) @@ -486,7 +488,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { container := consensus.InitConsensusCore() container.SetBootStrapper(bootstrapperMock) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) r := srStartRound.InitCurrentRound() assert.True(t, r) @@ -505,12 +507,12 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasCalled = true - assert.Equal(t, value, "not in consensus group") + assert.Equal(t, "not in consensus group", value) } }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) consensusState.SetSelfPubKey("not in consensus") sr, _ := spos.NewSubround( -1, @@ -528,11 +530,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -562,7 +564,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) consensusState.SetSelfPubKey("B") sr, _ := spos.NewSubround( -1, @@ -580,11 +582,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -601,7 +603,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasCalled = true - assert.Equal(t, value, "participant") + assert.Equal(t, "participant", value) } }, IncrementHandler: func(key string) { @@ -611,7 +613,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { return string(pkBytes) == consensusState.SelfPubKey() } @@ -631,11 +633,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -653,15 +655,15 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasMetricConsensusStateCalled = true - assert.Equal(t, value, "proposer") + assert.Equal(t, "proposer", value) } if key == common.MetricConsensusRoundState { cntMetricConsensusRoundStateCalled++ switch cntMetricConsensusRoundStateCalled { case 1: - assert.Equal(t, value, "") + assert.Equal(t, "", value) case 2: - assert.Equal(t, value, "proposed") + assert.Equal(t, "proposed", value) default: assert.Fail(t, "should have been called only twice") } @@ -674,7 +676,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() consensusState.SetSelfPubKey(leader) sr, _ := spos.NewSubround( @@ -693,11 +695,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) @@ -716,15 +718,15 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { SetStringValueHandler: func(key string, value string) { if key == common.MetricConsensusState { wasMetricConsensusStateCalled = true - assert.Equal(t, value, "proposer") + assert.Equal(t, "proposer", value) } if key == common.MetricConsensusRoundState { cntMetricConsensusRoundStateCalled++ switch cntMetricConsensusRoundStateCalled { case 1: - assert.Equal(t, value, "") + assert.Equal(t, "", value) case 2: - assert.Equal(t, value, "proposed") + assert.Equal(t, "proposed", value) default: assert.Fail(t, "should have been called only twice") } @@ -737,7 +739,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { }, } ch := make(chan bool, 1) - consensusState := initConsensusStateWithKeysHandler(keysHandler) + consensusState := initializers.InitConsensusStateWithKeysHandler(keysHandler) leader, _ := consensusState.GetLeader() consensusState.SetSelfPubKey(leader) keysHandler.IsKeyManagedByCurrentNodeCalled = func(pkBytes []byte) bool { @@ -759,11 +761,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { appStatusHandler, ) - srStartRound, _ := bls.NewSubroundStartRound( + srStartRound, _ := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) @@ -774,7 +776,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { func buildDefaultSubround(container spos.ConsensusCoreHandler) *spos.Subround { ch := make(chan bool, 1) - consensusState := initConsensusState() + consensusState := initializers.InitConsensusState() sr, _ := spos.NewSubround( -1, bls.SrStartRound, @@ -808,11 +810,11 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldErrNilHeader(t *test container.SetBlockchain(chainHandlerMock) sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -835,11 +837,11 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenResetErr(t *tes container.SetSigningHandler(signingHandlerMock) sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -871,11 +873,11 @@ func TestSubroundStartRound_IndexRoundIfNeededFailShardIdForEpoch(t *testing.T) sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -915,11 +917,11 @@ func TestSubroundStartRound_IndexRoundIfNeededFailGetValidatorsIndexes(t *testin sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -954,11 +956,11 @@ func TestSubroundStartRound_IndexRoundIfNeededShouldFullyWork(t *testing.T) { sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -997,11 +999,11 @@ func TestSubroundStartRound_IndexRoundIfNeededDifferentShardIdFail(t *testing.T) sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) @@ -1049,11 +1051,11 @@ func TestSubroundStartRound_changeEpoch(t *testing.T) { sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) startRound.ChangeEpoch(1) @@ -1078,11 +1080,11 @@ func TestSubroundStartRound_changeEpoch(t *testing.T) { sr := buildDefaultSubround(container) - startRound, err := bls.NewSubroundStartRound( + startRound, err := v2.NewSubroundStartRound( sr, - bls.ProcessingThresholdPercent, + v2.ProcessingThresholdPercent, &testscommon.SentSignatureTrackerStub{}, - &mock.SposWorkerMock{}, + &consensus.SposWorkerMock{}, ) require.Nil(t, err) startRound.ChangeEpoch(1) @@ -1105,7 +1107,7 @@ func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing container := consensus.InitConsensusCore() container.SetValidatorGroupSelector(validatorGroupSelector) - srStartRound := *initSubroundStartRoundWithContainer(container) + srStartRound := initSubroundStartRoundWithContainer(container) err2 := srStartRound.GenerateNextConsensusGroup(0) diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index d35e83c4acb..5594b831311 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -25,9 +25,9 @@ func initConsensusDataContainer() *ConsensusCore { chronologyHandlerMock := consensusMocks.InitChronologyHandlerMock() multiSignerMock := cryptoMocks.NewMultiSigner() hasherMock := &hashingMocks.HasherMock{} - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensusMocks.RoundHandlerMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{} antifloodHandler := &mock.P2PAntifloodHandlerStub{} peerHonestyHandler := &testscommon.PeerHonestyHandlerStub{} diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index fa806d9c840..a7a8ee3de65 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -320,6 +320,11 @@ func (cns *ConsensusState) GetData() []byte { return cns.Data } +// SetData sets the Data of the consensusState +func (cns *ConsensusState) SetData(data []byte) { + cns.Data = data +} + // IsMultiKeyLeaderInCurrentRound method checks if one of the nodes which are controlled by this instance // is leader in the current round func (cns *ConsensusState) IsMultiKeyLeaderInCurrentRound() bool { @@ -343,9 +348,9 @@ func (cns *ConsensusState) IsLeaderJobDone(currentSubroundId int) bool { return cns.IsJobDone(leader, currentSubroundId) } -// isMultiKeyJobDone method returns true if all the nodes controlled by this instance finished the current job for +// IsMultiKeyJobDone method returns true if all the nodes controlled by this instance finished the current job for // the current subround and false otherwise -func (cns *ConsensusState) isMultiKeyJobDone(currentSubroundId int) bool { +func (cns *ConsensusState) IsMultiKeyJobDone(currentSubroundId int) bool { for _, validator := range cns.consensusGroup { if !cns.keysHandler.IsKeyManagedByCurrentNode([]byte(validator)) { continue @@ -368,7 +373,7 @@ func (cns *ConsensusState) IsSelfJobDone(currentSubroundID int) bool { multiKeyJobDone := true if cns.IsMultiKeyInConsensusGroup() { - multiKeyJobDone = cns.isMultiKeyJobDone(currentSubroundID) + multiKeyJobDone = cns.IsMultiKeyJobDone(currentSubroundID) } return selfJobDone && multiKeyJobDone @@ -384,3 +389,78 @@ func (cns *ConsensusState) GetMultikeyRedundancyStepInReason() string { func (cns *ConsensusState) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { cns.keysHandler.ResetRoundsWithoutReceivedMessages(pkBytes, pid) } + +// GetRoundCanceled returns the state of the current round +func (cns *ConsensusState) GetRoundCanceled() bool { + return cns.RoundCanceled +} + +// SetRoundCanceled sets the state of the current round +func (cns *ConsensusState) SetRoundCanceled(roundCanceled bool) { + cns.RoundCanceled = roundCanceled +} + +// GetRoundIndex returns the index of the current round +func (cns *ConsensusState) GetRoundIndex() int64 { + return cns.RoundIndex +} + +// SetRoundIndex sets the index of the current round +func (cns *ConsensusState) SetRoundIndex(roundIndex int64) { + cns.RoundIndex = roundIndex +} + +// GetRoundTimeStamp returns the time stamp of the current round +func (cns *ConsensusState) GetRoundTimeStamp() time.Time { + return cns.RoundTimeStamp +} + +// SetRoundTimeStamp sets the time stamp of the current round +func (cns *ConsensusState) SetRoundTimeStamp(roundTimeStamp time.Time) { + cns.RoundTimeStamp = roundTimeStamp +} + +// GetExtendedCalled returns the state of the extended called +func (cns *ConsensusState) GetExtendedCalled() bool { + return cns.ExtendedCalled +} + +// SetExtendedCalled sets the state of the extended called +func (cns *ConsensusState) SetExtendedCalled(extendedCalled bool) { + cns.ExtendedCalled = extendedCalled +} + +// GetBody returns the body of the current round +func (cns *ConsensusState) GetBody() data.BodyHandler { + return cns.Body +} + +// SetBody sets the body of the current round +func (cns *ConsensusState) SetBody(body data.BodyHandler) { + cns.Body = body +} + +// GetHeader returns the header of the current round +func (cns *ConsensusState) GetHeader() data.HeaderHandler { + return cns.Header +} + +// GetWaitingAllSignaturesTimeOut returns the state of the waiting all signatures time out +func (cns *ConsensusState) GetWaitingAllSignaturesTimeOut() bool { + return cns.WaitingAllSignaturesTimeOut +} + +// SetWaitingAllSignaturesTimeOut sets the state of the waiting all signatures time out +func (cns *ConsensusState) SetWaitingAllSignaturesTimeOut(waitingAllSignaturesTimeOut bool) { + cns.WaitingAllSignaturesTimeOut = waitingAllSignaturesTimeOut +} + +// SetHeader sets the header of the current round +func (cns *ConsensusState) SetHeader(header data.HeaderHandler) { + cns.Header = header +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cns *ConsensusState) IsInterfaceNil() bool { + return cns == nil +} diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go index 73634ae2af5..1ad0bbc67d5 100644 --- a/consensus/spos/export_test.go +++ b/consensus/spos/export_test.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/process" ) @@ -271,13 +272,3 @@ func (cmv *consensusMessageValidator) GetNumOfMessageTypeForPublicKey(pk []byte, func (cmv *consensusMessageValidator) ResetConsensusMessages() { cmv.resetConsensusMessages() } - -// IsSelfLeaderInCurrentRound - -func (sr *Subround) IsSelfLeaderInCurrentRound() bool { - return sr.isSelfLeaderInCurrentRound() -} - -// IsMultiKeyJobDone - -func (cns *ConsensusState) IsMultiKeyJobDone(currentSubroundId int) bool { - return cns.isMultiKeyJobDone(currentSubroundId) -} diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 028852c3116..e294ca96212 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" "github.com/multiversx/mx-chain-go/consensus" @@ -118,7 +119,7 @@ type WorkerHandler interface { // ReceivedHeader method is a wired method through which worker will receive headers from network ReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) // ResetConsensusMessages resets at the start of each round all the previous consensus messages received and equivalent messages, keeping the provided proofs - ResetConsensusMessages(currentHash []byte, prevHash []byte) + ResetConsensusMessages() // IsInterfaceNil returns true if there is no value under the interface IsInterfaceNil() bool } @@ -169,3 +170,99 @@ type EquivalentMessagesDebugger interface { DeleteEquivalentMessage(headerHash []byte) IsInterfaceNil() bool } + +// ConsensusStateHandler encapsulates all needed data for the Consensus +type ConsensusStateHandler interface { + ResetConsensusState() + AddReceivedHeader(headerHandler data.HeaderHandler) + GetReceivedHeaders() []data.HeaderHandler + AddMessageWithSignature(key string, message p2p.MessageP2P) + GetMessageWithSignature(key string) (p2p.MessageP2P, bool) + IsNodeLeaderInCurrentRound(node string) bool + GetLeader() (string, error) + GetNextConsensusGroup( + randomSource []byte, + round uint64, + shardId uint32, + nodesCoordinator nodesCoordinator.NodesCoordinator, + epoch uint32, + ) (string, []string, error) + IsConsensusDataSet() bool + IsConsensusDataEqual(data []byte) bool + IsJobDone(node string, currentSubroundId int) bool + IsSubroundFinished(subroundID int) bool + IsNodeSelf(node string) bool + IsBlockBodyAlreadyReceived() bool + IsHeaderAlreadyReceived() bool + CanDoSubroundJob(currentSubroundId int) bool + CanProcessReceivedMessage(cnsDta *consensus.Message, currentRoundIndex int64, currentSubroundId int) bool + GenerateBitmap(subroundId int) []byte + ProcessingBlock() bool + SetProcessingBlock(processingBlock bool) + GetData() []byte + SetData(data []byte) + IsMultiKeyLeaderInCurrentRound() bool + IsLeaderJobDone(currentSubroundId int) bool + IsMultiKeyJobDone(currentSubroundId int) bool + IsSelfJobDone(currentSubroundID int) bool + GetMultikeyRedundancyStepInReason() string + ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) + GetRoundCanceled() bool + SetRoundCanceled(state bool) + GetRoundIndex() int64 + SetRoundIndex(roundIndex int64) + GetRoundTimeStamp() time.Time + SetRoundTimeStamp(roundTimeStamp time.Time) + GetExtendedCalled() bool + GetBody() data.BodyHandler + SetBody(body data.BodyHandler) + GetHeader() data.HeaderHandler + SetHeader(header data.HeaderHandler) + GetWaitingAllSignaturesTimeOut() bool + SetWaitingAllSignaturesTimeOut(bool) + RoundConsensusHandler + RoundStatusHandler + RoundThresholdHandler + IsInterfaceNil() bool +} + +// RoundConsensusHandler encapsulates the methods needed for a consensus round +type RoundConsensusHandler interface { + ConsensusGroupIndex(pubKey string) (int, error) + SelfConsensusGroupIndex() (int, error) + SetEligibleList(eligibleList map[string]struct{}) + ConsensusGroup() []string + SetConsensusGroup(consensusGroup []string) + SetLeader(leader string) + ConsensusGroupSize() int + SetConsensusGroupSize(consensusGroupSize int) + SelfPubKey() string + SetSelfPubKey(selfPubKey string) + JobDone(key string, subroundId int) (bool, error) + SetJobDone(key string, subroundId int, value bool) error + SelfJobDone(subroundId int) (bool, error) + IsNodeInConsensusGroup(node string) bool + IsNodeInEligibleList(node string) bool + ComputeSize(subroundId int) int + ResetRoundState() + IsMultiKeyInConsensusGroup() bool + IsKeyManagedBySelf(pkBytes []byte) bool + IncrementRoundsWithoutReceivedMessages(pkBytes []byte) + GetKeysHandler() consensus.KeysHandler + Leader() string +} + +// RoundStatusHandler encapsulates the methods needed for the status of a subround +type RoundStatusHandler interface { + Status(subroundId int) SubroundStatus + SetStatus(subroundId int, subroundStatus SubroundStatus) + ResetRoundStatus() +} + +// RoundThresholdHandler encapsulates the methods needed for the round consensus threshold +type RoundThresholdHandler interface { + Threshold(subroundId int) int + SetThreshold(subroundId int, threshold int) + FallbackThreshold(subroundId int) int + SetFallbackThreshold(subroundId int, threshold int) +} diff --git a/consensus/spos/roundConsensus.go b/consensus/spos/roundConsensus.go index cda20e33224..503eb0b2a2a 100644 --- a/consensus/spos/roundConsensus.go +++ b/consensus/spos/roundConsensus.go @@ -234,3 +234,8 @@ func (rcns *roundConsensus) IsKeyManagedBySelf(pkBytes []byte) bool { func (rcns *roundConsensus) IncrementRoundsWithoutReceivedMessages(pkBytes []byte) { rcns.keysHandler.IncrementRoundsWithoutReceivedMessages(pkBytes) } + +// GetKeysHandler returns the keysHandler instance +func (rcns *roundConsensus) GetKeysHandler() consensus.KeysHandler { + return rcns.keysHandler +} diff --git a/consensus/spos/roundStatus.go b/consensus/spos/roundStatus.go index 8517396904a..7d3b67fdc15 100644 --- a/consensus/spos/roundStatus.go +++ b/consensus/spos/roundStatus.go @@ -5,7 +5,7 @@ import ( ) // SubroundStatus defines the type used to refer the state of the current subround -type SubroundStatus int +type SubroundStatus = int const ( // SsNotFinished defines the un-finished state of the subround diff --git a/consensus/spos/scheduledProcessor_test.go b/consensus/spos/scheduledProcessor_test.go index 7316209921b..ed1f95287a2 100644 --- a/consensus/spos/scheduledProcessor_test.go +++ b/consensus/spos/scheduledProcessor_test.go @@ -8,9 +8,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/consensus" + "github.com/stretchr/testify/require" ) @@ -30,7 +32,7 @@ func TestNewScheduledProcessorWrapper_NilSyncTimerShouldErr(t *testing.T) { args := ScheduledProcessorWrapperArgs{ SyncTimer: nil, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, err := NewScheduledProcessorWrapper(args) @@ -42,9 +44,9 @@ func TestNewScheduledProcessorWrapper_NilBlockProcessorShouldErr(t *testing.T) { t.Parallel() args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: nil, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, err := NewScheduledProcessorWrapper(args) @@ -56,7 +58,7 @@ func TestNewScheduledProcessorWrapper_NilRoundTimeDurationHandlerShouldErr(t *te t.Parallel() args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{}, RoundTimeDurationHandler: nil, } @@ -70,9 +72,9 @@ func TestNewScheduledProcessorWrapper_NilBlockProcessorOK(t *testing.T) { t.Parallel() args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, err := NewScheduledProcessorWrapper(args) @@ -85,14 +87,14 @@ func TestScheduledProcessorWrapper_IsProcessedOKEarlyExit(t *testing.T) { called := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{ + SyncTimer: &consensus.SyncTimerMock{ CurrentTimeCalled: func() time.Time { called.SetValue(true) return time.Now() }, }, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, err := NewScheduledProcessorWrapper(args) @@ -112,13 +114,13 @@ func TestScheduledProcessorWrapper_IsProcessedOKEarlyExit(t *testing.T) { func defaultScheduledProcessorWrapperArgs() ScheduledProcessorWrapperArgs { return ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{ + SyncTimer: &consensus.SyncTimerMock{ CurrentTimeCalled: func() time.Time { return time.Now() }, }, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } } @@ -227,9 +229,9 @@ func TestScheduledProcessorWrapper_StatusGetterAndSetter(t *testing.T) { t.Parallel() args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{}, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, _ := NewScheduledProcessorWrapper(args) @@ -250,14 +252,14 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV1ProcessingOK( processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{ ProcessScheduledBlockCalled: func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { processScheduledCalled.SetValue(true) return nil }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, _ := NewScheduledProcessorWrapper(args) @@ -276,14 +278,14 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ProcessingWit processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{ ProcessScheduledBlockCalled: func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { processScheduledCalled.SetValue(true) return errors.New("processing error") }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, _ := NewScheduledProcessorWrapper(args) @@ -304,14 +306,14 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ProcessingOK( processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{}, + SyncTimer: &consensus.SyncTimerMock{}, Processor: &testscommon.BlockProcessorStub{ ProcessScheduledBlockCalled: func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { processScheduledCalled.SetValue(true) return nil }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } sp, _ := NewScheduledProcessorWrapper(args) @@ -333,7 +335,7 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ForceStopped( processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{ + SyncTimer: &consensus.SyncTimerMock{ CurrentTimeCalled: func() time.Time { return time.Now() }, @@ -350,7 +352,7 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ForceStopped( } }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } spw, err := NewScheduledProcessorWrapper(args) @@ -374,7 +376,7 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ForceStopAfte processScheduledCalled := atomic.Flag{} args := ScheduledProcessorWrapperArgs{ - SyncTimer: &mock.SyncTimerMock{ + SyncTimer: &consensus.SyncTimerMock{ CurrentTimeCalled: func() time.Time { return time.Now() }, @@ -386,7 +388,7 @@ func TestScheduledProcessorWrapper_StartScheduledProcessingHeaderV2ForceStopAfte return nil }, }, - RoundTimeDurationHandler: &mock.RoundHandlerMock{}, + RoundTimeDurationHandler: &consensus.RoundHandlerMock{}, } spw, err := NewScheduledProcessorWrapper(args) diff --git a/consensus/spos/sposFactory/sposFactory.go b/consensus/spos/sposFactory/sposFactory.go index 0dd5e10011b..bb2d409a97f 100644 --- a/consensus/spos/sposFactory/sposFactory.go +++ b/consensus/spos/sposFactory/sposFactory.go @@ -12,48 +12,10 @@ import ( "github.com/multiversx/mx-chain-go/consensus/broadcast" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" - "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" ) -// GetSubroundsFactory returns a subrounds factory depending on the given parameter -func GetSubroundsFactory( - consensusDataContainer spos.ConsensusCoreHandler, - consensusState *spos.ConsensusState, - worker spos.WorkerHandler, - consensusType string, - appStatusHandler core.AppStatusHandler, - outportHandler outport.OutportHandler, - sentSignatureTracker spos.SentSignaturesTracker, - chainID []byte, - currentPid core.PeerID, - signatureThrottler core.Throttler, -) (spos.SubroundsFactory, error) { - switch consensusType { - case blsConsensusType: - subRoundFactoryBls, err := bls.NewSubroundsFactory( - consensusDataContainer, - consensusState, - worker, - chainID, - currentPid, - appStatusHandler, - sentSignatureTracker, - signatureThrottler, - ) - if err != nil { - return nil, err - } - - subRoundFactoryBls.SetOutportHandler(outportHandler) - - return subRoundFactoryBls, nil - default: - return nil, ErrInvalidConsensusType - } -} - // GetConsensusCoreFactory returns a consensus service depending on the given parameter func GetConsensusCoreFactory(consensusType string) (spos.ConsensusService, error) { switch consensusType { @@ -89,7 +51,7 @@ func GetBroadcastMessenger( LeaderCacheSize: maxDelayCacheSize, ValidatorCacheSize: maxDelayCacheSize, AlarmScheduler: alarmScheduler, - Config: config, + Config: config, } delayedBroadcaster, err := broadcast.NewDelayedBlockBroadcaster(dbbArgs) diff --git a/consensus/spos/sposFactory/sposFactory_test.go b/consensus/spos/sposFactory/sposFactory_test.go index 1e17d29f03f..1c05ff64c6f 100644 --- a/consensus/spos/sposFactory/sposFactory_test.go +++ b/consensus/spos/sposFactory/sposFactory_test.go @@ -12,14 +12,10 @@ import ( "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" - dataRetrieverMocks "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/testscommon" - testscommonConsensus "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/pool" - statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" ) var currentPid = core.PeerID("pid") @@ -42,102 +38,6 @@ func TestGetConsensusCoreFactory_BlsShouldWork(t *testing.T) { assert.False(t, check.IfNil(csf)) } -func TestGetSubroundsFactory_BlsNilConsensusCoreShouldErr(t *testing.T) { - t.Parallel() - - worker := &mock.SposWorkerMock{} - consensusType := consensus.BlsConsensusType - statusHandler := statusHandlerMock.NewAppStatusHandlerMock() - chainID := []byte("chain-id") - indexer := &outport.OutportStub{} - sf, err := sposFactory.GetSubroundsFactory( - nil, - &spos.ConsensusState{}, - worker, - consensusType, - statusHandler, - indexer, - &testscommon.SentSignatureTrackerStub{}, - chainID, - currentPid, - &dataRetrieverMocks.ThrottlerStub{}, - ) - - assert.Nil(t, sf) - assert.Equal(t, spos.ErrNilConsensusCore, err) -} - -func TestGetSubroundsFactory_BlsNilStatusHandlerShouldErr(t *testing.T) { - t.Parallel() - - consensusCore := testscommonConsensus.InitConsensusCore() - worker := &mock.SposWorkerMock{} - consensusType := consensus.BlsConsensusType - chainID := []byte("chain-id") - indexer := &outport.OutportStub{} - sf, err := sposFactory.GetSubroundsFactory( - consensusCore, - &spos.ConsensusState{}, - worker, - consensusType, - nil, - indexer, - &testscommon.SentSignatureTrackerStub{}, - chainID, - currentPid, - &dataRetrieverMocks.ThrottlerStub{}, - ) - - assert.Nil(t, sf) - assert.Equal(t, spos.ErrNilAppStatusHandler, err) -} - -func TestGetSubroundsFactory_BlsShouldWork(t *testing.T) { - t.Parallel() - - consensusCore := testscommonConsensus.InitConsensusCore() - worker := &mock.SposWorkerMock{} - consensusType := consensus.BlsConsensusType - statusHandler := statusHandlerMock.NewAppStatusHandlerMock() - chainID := []byte("chain-id") - indexer := &outport.OutportStub{} - sf, err := sposFactory.GetSubroundsFactory( - consensusCore, - &spos.ConsensusState{}, - worker, - consensusType, - statusHandler, - indexer, - &testscommon.SentSignatureTrackerStub{}, - chainID, - currentPid, - &dataRetrieverMocks.ThrottlerStub{}, - ) - assert.Nil(t, err) - assert.False(t, check.IfNil(sf)) -} - -func TestGetSubroundsFactory_InvalidConsensusTypeShouldErr(t *testing.T) { - t.Parallel() - - consensusType := "invalid" - sf, err := sposFactory.GetSubroundsFactory( - nil, - nil, - nil, - consensusType, - nil, - nil, - nil, - nil, - currentPid, - &dataRetrieverMocks.ThrottlerStub{}, - ) - - assert.Nil(t, sf) - assert.Equal(t, sposFactory.ErrInvalidConsensusType, err) -} - func TestGetBroadcastMessenger_ShardShouldWork(t *testing.T) { t.Parallel() diff --git a/consensus/spos/subround.go b/consensus/spos/subround.go index 1f06191a2c5..00b2c55fe6c 100644 --- a/consensus/spos/subround.go +++ b/consensus/spos/subround.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/consensus" ) @@ -22,7 +23,7 @@ const ( // situation of the Subround and Check function will decide if in this Subround the consensus is achieved type Subround struct { ConsensusCoreHandler - *ConsensusState + ConsensusStateHandler previous int current int @@ -50,7 +51,7 @@ func NewSubround( startTime int64, endTime int64, name string, - consensusState *ConsensusState, + consensusState ConsensusStateHandler, consensusStateChangedChannel chan bool, executeStoredMessages func(), container ConsensusCoreHandler, @@ -72,7 +73,7 @@ func NewSubround( sr := Subround{ ConsensusCoreHandler: container, - ConsensusState: consensusState, + ConsensusStateHandler: consensusState, previous: previous, current: current, next: next, @@ -93,7 +94,7 @@ func NewSubround( } func checkNewSubroundParams( - state *ConsensusState, + state ConsensusStateHandler, consensusStateChangedChannel chan bool, executeStoredMessages func(), container ConsensusCoreHandler, @@ -150,7 +151,7 @@ func (sr *Subround) DoWork(ctx context.Context, roundHandler consensus.RoundHand } case <-time.After(roundHandler.RemainingTime(startTime, maxTime)): if sr.Extend != nil { - sr.RoundCanceled = true + sr.SetRoundCanceled(true) sr.Extend(sr.current) } @@ -211,7 +212,7 @@ func (sr *Subround) ConsensusChannel() chan bool { // GetAssociatedPid returns the associated PeerID to the provided public key bytes func (sr *Subround) GetAssociatedPid(pkBytes []byte) core.PeerID { - return sr.keysHandler.GetAssociatedPid(pkBytes) + return sr.GetKeysHandler().GetAssociatedPid(pkBytes) } // ShouldConsiderSelfKeyInConsensus returns true if current machine is the main one, or it is a backup machine but the main @@ -235,11 +236,11 @@ func (sr *Subround) IsSelfInConsensusGroup() bool { // IsSelfLeader returns true is the current node is leader is single key or in // multi-key mode func (sr *Subround) IsSelfLeader() bool { - return sr.isSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() + return sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() } -// isSelfLeaderInCurrentRound method checks if the current node is leader in the current round -func (sr *Subround) isSelfLeaderInCurrentRound() bool { +// IsSelfLeaderInCurrentRound method checks if the current node is leader in the current round +func (sr *Subround) IsSelfLeaderInCurrentRound() bool { return sr.IsNodeLeaderInCurrentRound(sr.SelfPubKey()) && sr.ShouldConsiderSelfKeyInConsensus() } @@ -249,7 +250,7 @@ func (sr *Subround) GetLeaderStartRoundMessage() string { if sr.IsMultiKeyLeaderInCurrentRound() { return multiKeyStartMsg } - if sr.isSelfLeaderInCurrentRound() { + if sr.IsSelfLeaderInCurrentRound() { return singleKeyStartMsg } diff --git a/consensus/spos/subround_test.go b/consensus/spos/subround_test.go index 2e28b9a0a9d..cd54782643c 100644 --- a/consensus/spos/subround_test.go +++ b/consensus/spos/subround_test.go @@ -9,6 +9,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/consensus/mock" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" @@ -16,8 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/consensus" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var chainID = []byte("chain ID") @@ -594,7 +595,7 @@ func TestSubround_DoWorkShouldReturnFalseWhenJobFunctionIsNotSet(t *testing.T) { } maxTime := time.Now().Add(100 * time.Millisecond) - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus.RoundHandlerMock{} roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { return time.Until(maxTime) } @@ -632,7 +633,7 @@ func TestSubround_DoWorkShouldReturnFalseWhenCheckFunctionIsNotSet(t *testing.T) sr.Check = nil maxTime := time.Now().Add(100 * time.Millisecond) - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus.RoundHandlerMock{} roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { return time.Until(maxTime) } @@ -681,7 +682,7 @@ func testDoWork(t *testing.T, checkDone bool, shouldWork bool) { } maxTime := time.Now().Add(100 * time.Millisecond) - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus.RoundHandlerMock{} roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { return time.Until(maxTime) } @@ -728,7 +729,7 @@ func TestSubround_DoWorkShouldReturnTrueWhenJobIsDoneAndConsensusIsDoneAfterAWhi } maxTime := time.Now().Add(2000 * time.Millisecond) - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &consensus.RoundHandlerMock{} roundHandlerMock.RemainingTimeCalled = func(time.Time, time.Duration) time.Duration { return time.Until(maxTime) } diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index c7ec3124701..dffa665c6b9 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -751,7 +751,7 @@ func (wrk *Worker) Close() error { } // ResetConsensusMessages resets at the start of each round all the previous consensus messages received and equivalent messages, keeping the provided proofs -func (wrk *Worker) ResetConsensusMessages(currentHash []byte, prevHash []byte) { +func (wrk *Worker) ResetConsensusMessages() { wrk.consensusMessageValidator.resetConsensusMessages() wrk.equivalentMessagesDebugger.ResetEquivalentMessages() } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index b9eada158f8..5fa1355f9e0 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -82,7 +82,7 @@ func createDefaultWorkerArgs(appStatusHandler core.AppStatusHandler) *spos.Worke return nil }, } - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &consensusMocks.SyncTimerMock{} hasher := &hashingMocks.HasherMock{} blsService, _ := bls.NewConsensusService() poolAdder := cache.NewCacherMock() @@ -149,8 +149,8 @@ func initWorker(appStatusHandler core.AppStatusHandler) *spos.Worker { return sposWorker } -func initRoundHandlerMock() *mock.RoundHandlerMock { - return &mock.RoundHandlerMock{ +func initRoundHandlerMock() *consensusMocks.RoundHandlerMock { + return &consensusMocks.RoundHandlerMock{ RoundIndex: 0, TimeStampCalled: func() time.Time { return time.Unix(0, 0) @@ -797,7 +797,7 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( }, }) - wrk.SetRoundHandler(&mock.RoundHandlerMock{ + wrk.SetRoundHandler(&consensusMocks.RoundHandlerMock{ RoundIndex: 0, TimeDurationCalled: func() time.Duration { return roundDuration diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index eb7887d20da..32092341f10 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/blacklist" "github.com/multiversx/mx-chain-go/consensus/chronology" "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/consensus/spos/bls/proxy" "github.com/multiversx/mx-chain-go/consensus/spos/debug" "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -279,29 +280,30 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - fct, err := sposFactory.GetSubroundsFactory( - consensusDataContainer, - consensusState, - cc.worker, - ccf.config.Consensus.Type, - ccf.statusCoreComponents.AppStatusHandler(), - ccf.statusComponents.OutportHandler(), - ccf.processComponents.SentSignaturesTracker(), - []byte(ccf.coreComponents.ChainID()), - ccf.networkComponents.NetworkMessenger().ID(), - signatureThrottler, - ) + subroundsHandlerArgs := &proxy.SubroundsHandlerArgs{ + Chronology: cc.chronology, + ConsensusCoreHandler: consensusDataContainer, + ConsensusState: consensusState, + Worker: cc.worker, + SignatureThrottler: signatureThrottler, + AppStatusHandler: ccf.statusCoreComponents.AppStatusHandler(), + OutportHandler: ccf.statusComponents.OutportHandler(), + SentSignatureTracker: ccf.processComponents.SentSignaturesTracker(), + EnableEpochsHandler: ccf.coreComponents.EnableEpochsHandler(), + ChainID: []byte(ccf.coreComponents.ChainID()), + CurrentPid: ccf.networkComponents.NetworkMessenger().ID(), + } + + subroundsHandler, err := proxy.NewSubroundsHandler(subroundsHandlerArgs) if err != nil { return nil, err } - err = fct.GenerateSubrounds() + err = subroundsHandler.Start(epoch) if err != nil { return nil, err } - cc.chronology.StartRounds() - err = ccf.addCloserInstances(cc.chronology, cc.bootstrapper, cc.worker, ccf.coreComponents.SyncTimer()) if err != nil { return nil, err diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index de4aeff58ed..c5e2e0450b9 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -840,28 +840,6 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { require.True(t, strings.Contains(err.Error(), "signing handler")) require.Nil(t, cc) }) - t.Run("GetSubroundsFactory failure should error", func(t *testing.T) { - t.Parallel() - - args := createMockConsensusComponentsFactoryArgs() - statusCoreCompStub, ok := args.StatusCoreComponents.(*factoryMocks.StatusCoreComponentsStub) - require.True(t, ok) - cnt := 0 - statusCoreCompStub.AppStatusHandlerCalled = func() core.AppStatusHandler { - cnt++ - if cnt > 4 { - return nil - } - return &statusHandler.AppStatusHandlerStub{} - } - ccf, _ := consensusComp.NewConsensusComponentsFactory(args) - require.NotNil(t, ccf) - - cc, err := ccf.Create() - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), "AppStatusHandler")) - require.Nil(t, cc) - }) t.Run("addCloserInstances failure should error", func(t *testing.T) { t.Parallel() diff --git a/factory/interface.go b/factory/interface.go index 0bbc16f1982..762271f934b 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -14,6 +14,8 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" @@ -37,7 +39,6 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/vm" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // EpochStartNotifier defines which actions should be done for handling new epoch's events @@ -398,7 +399,7 @@ type ConsensusWorker interface { // DisplayStatistics method displays statistics of worker at the end of the round DisplayStatistics() // ResetConsensusMessages resets at the start of each round all the previous consensus messages received and equivalent messages, keeping the provided proofs - ResetConsensusMessages(currentHash []byte, prevHash []byte) + ResetConsensusMessages() // ReceivedHeader method is a wired method through which worker will receive headers from network ReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) // IsInterfaceNil returns true if there is no value under the interface diff --git a/keysManagement/managedPeersHolder.go b/keysManagement/managedPeersHolder.go index 8156b64c8eb..39f80f6bbaf 100644 --- a/keysManagement/managedPeersHolder.go +++ b/keysManagement/managedPeersHolder.go @@ -12,10 +12,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" crypto "github.com/multiversx/mx-chain-crypto-go" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/redundancy/common" - logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("keysManagement") diff --git a/node/mock/throttlerStub.go b/testscommon/common/throttlerStub.go similarity index 98% rename from node/mock/throttlerStub.go rename to testscommon/common/throttlerStub.go index 24ab94c45c3..f4f5e0a34d0 100644 --- a/node/mock/throttlerStub.go +++ b/testscommon/common/throttlerStub.go @@ -1,4 +1,4 @@ -package mock +package common // ThrottlerStub - type ThrottlerStub struct { diff --git a/testscommon/consensus/consensusStateMock.go b/testscommon/consensus/consensusStateMock.go index 943b0f5b5b4..dae02a0323c 100644 --- a/testscommon/consensus/consensusStateMock.go +++ b/testscommon/consensus/consensusStateMock.go @@ -1,111 +1,608 @@ package consensus import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) // ConsensusStateMock - type ConsensusStateMock struct { - ResetConsensusStateCalled func() - IsNodeLeaderInCurrentRoundCalled func(node string) bool - IsSelfLeaderInCurrentRoundCalled func() bool - GetLeaderCalled func() (string, error) - GetNextConsensusGroupCalled func(randomSource string, vgs nodesCoordinator.NodesCoordinator) ([]string, error) - IsConsensusDataSetCalled func() bool - IsConsensusDataEqualCalled func(data []byte) bool - IsJobDoneCalled func(node string, currentSubroundId int) bool - IsSelfJobDoneCalled func(currentSubroundId int) bool - IsCurrentSubroundFinishedCalled func(currentSubroundId int) bool - IsNodeSelfCalled func(node string) bool - IsBlockBodyAlreadyReceivedCalled func() bool - IsHeaderAlreadyReceivedCalled func() bool - CanDoSubroundJobCalled func(currentSubroundId int) bool - CanProcessReceivedMessageCalled func(cnsDta consensus.Message, currentRoundIndex int32, currentSubroundId int) bool - GenerateBitmapCalled func(subroundId int) []byte - ProcessingBlockCalled func() bool - SetProcessingBlockCalled func(processingBlock bool) - ConsensusGroupSizeCalled func() int - SetThresholdCalled func(subroundId int, threshold int) + ResetConsensusStateCalled func() + IsNodeLeaderInCurrentRoundCalled func(node string) bool + IsSelfLeaderInCurrentRoundCalled func() bool + GetLeaderCalled func() (string, error) + GetNextConsensusGroupCalled func(randomSource []byte, round uint64, shardId uint32, nodesCoordinator nodesCoordinator.NodesCoordinator, epoch uint32) (string, []string, error) + IsConsensusDataSetCalled func() bool + IsConsensusDataEqualCalled func(data []byte) bool + IsJobDoneCalled func(node string, currentSubroundId int) bool + IsSelfJobDoneCalled func(currentSubroundId int) bool + IsCurrentSubroundFinishedCalled func(currentSubroundId int) bool + IsNodeSelfCalled func(node string) bool + IsBlockBodyAlreadyReceivedCalled func() bool + IsHeaderAlreadyReceivedCalled func() bool + CanDoSubroundJobCalled func(currentSubroundId int) bool + CanProcessReceivedMessageCalled func(cnsDta *consensus.Message, currentRoundIndex int64, currentSubroundId int) bool + GenerateBitmapCalled func(subroundId int) []byte + ProcessingBlockCalled func() bool + SetProcessingBlockCalled func(processingBlock bool) + ConsensusGroupSizeCalled func() int + SetThresholdCalled func(subroundId int, threshold int) + AddReceivedHeaderCalled func(headerHandler data.HeaderHandler) + GetReceivedHeadersCalled func() []data.HeaderHandler + AddMessageWithSignatureCalled func(key string, message p2p.MessageP2P) + GetMessageWithSignatureCalled func(key string) (p2p.MessageP2P, bool) + IsSubroundFinishedCalled func(subroundID int) bool + GetDataCalled func() []byte + SetDataCalled func(data []byte) + IsMultiKeyLeaderInCurrentRoundCalled func() bool + IsLeaderJobDoneCalled func(currentSubroundId int) bool + IsMultiKeyJobDoneCalled func(currentSubroundId int) bool + GetMultikeyRedundancyStepInReasonCalled func() string + ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) + GetRoundCanceledCalled func() bool + SetRoundCanceledCalled func(state bool) + GetRoundIndexCalled func() int64 + SetRoundIndexCalled func(roundIndex int64) + GetRoundTimeStampCalled func() time.Time + SetRoundTimeStampCalled func(roundTimeStamp time.Time) + GetExtendedCalledCalled func() bool + GetBodyCalled func() data.BodyHandler + SetBodyCalled func(body data.BodyHandler) + GetHeaderCalled func() data.HeaderHandler + SetHeaderCalled func(header data.HeaderHandler) + GetWaitingAllSignaturesTimeOutCalled func() bool + SetWaitingAllSignaturesTimeOutCalled func(b bool) + ConsensusGroupIndexCalled func(pubKey string) (int, error) + SelfConsensusGroupIndexCalled func() (int, error) + SetEligibleListCalled func(eligibleList map[string]struct{}) + ConsensusGroupCalled func() []string + SetConsensusGroupCalled func(consensusGroup []string) + SetLeaderCalled func(leader string) + SetConsensusGroupSizeCalled func(consensusGroupSize int) + SelfPubKeyCalled func() string + SetSelfPubKeyCalled func(selfPubKey string) + JobDoneCalled func(key string, subroundId int) (bool, error) + SetJobDoneCalled func(key string, subroundId int, value bool) error + SelfJobDoneCalled func(subroundId int) (bool, error) + IsNodeInConsensusGroupCalled func(node string) bool + IsNodeInEligibleListCalled func(node string) bool + ComputeSizeCalled func(subroundId int) int + ResetRoundStateCalled func() + IsMultiKeyInConsensusGroupCalled func() bool + IsKeyManagedBySelfCalled func(pkBytes []byte) bool + IncrementRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) + GetKeysHandlerCalled func() consensus.KeysHandler + LeaderCalled func() string + StatusCalled func(subroundId int) int + SetStatusCalled func(subroundId int, subroundStatus int) + ResetRoundStatusCalled func() + ThresholdCalled func(subroundId int) int + FallbackThresholdCalled func(subroundId int) int + SetFallbackThresholdCalled func(subroundId int, threshold int) +} + +// AddReceivedHeader - +func (cnsm *ConsensusStateMock) AddReceivedHeader(headerHandler data.HeaderHandler) { + if cnsm.AddReceivedHeaderCalled != nil { + cnsm.AddReceivedHeaderCalled(headerHandler) + } +} + +// GetReceivedHeaders - +func (cnsm *ConsensusStateMock) GetReceivedHeaders() []data.HeaderHandler { + if cnsm.GetReceivedHeadersCalled != nil { + return cnsm.GetReceivedHeadersCalled() + } + return nil +} + +// AddMessageWithSignature - +func (cnsm *ConsensusStateMock) AddMessageWithSignature(key string, message p2p.MessageP2P) { + if cnsm.AddMessageWithSignatureCalled != nil { + cnsm.AddMessageWithSignatureCalled(key, message) + } +} + +// GetMessageWithSignature - +func (cnsm *ConsensusStateMock) GetMessageWithSignature(key string) (p2p.MessageP2P, bool) { + if cnsm.GetMessageWithSignatureCalled != nil { + return cnsm.GetMessageWithSignatureCalled(key) + } + return nil, false +} + +// IsSubroundFinished - +func (cnsm *ConsensusStateMock) IsSubroundFinished(subroundID int) bool { + if cnsm.IsSubroundFinishedCalled != nil { + return cnsm.IsSubroundFinishedCalled(subroundID) + } + return false +} + +// GetData - +func (cnsm *ConsensusStateMock) GetData() []byte { + if cnsm.GetDataCalled != nil { + return cnsm.GetDataCalled() + } + return nil +} + +// SetData - +func (cnsm *ConsensusStateMock) SetData(data []byte) { + if cnsm.SetDataCalled != nil { + cnsm.SetDataCalled(data) + } +} + +// IsMultiKeyLeaderInCurrentRound - +func (cnsm *ConsensusStateMock) IsMultiKeyLeaderInCurrentRound() bool { + if cnsm.IsMultiKeyLeaderInCurrentRoundCalled != nil { + return cnsm.IsMultiKeyLeaderInCurrentRoundCalled() + } + return false +} + +// IsLeaderJobDone - +func (cnsm *ConsensusStateMock) IsLeaderJobDone(currentSubroundId int) bool { + if cnsm.IsLeaderJobDoneCalled != nil { + return cnsm.IsLeaderJobDoneCalled(currentSubroundId) + } + return false +} + +// IsMultiKeyJobDone - +func (cnsm *ConsensusStateMock) IsMultiKeyJobDone(currentSubroundId int) bool { + if cnsm.IsMultiKeyJobDoneCalled != nil { + return cnsm.IsMultiKeyJobDoneCalled(currentSubroundId) + } + return false +} + +// GetMultikeyRedundancyStepInReason - +func (cnsm *ConsensusStateMock) GetMultikeyRedundancyStepInReason() string { + if cnsm.GetMultikeyRedundancyStepInReasonCalled != nil { + return cnsm.GetMultikeyRedundancyStepInReasonCalled() + } + return "" +} + +// ResetRoundsWithoutReceivedMessages - +func (cnsm *ConsensusStateMock) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { + if cnsm.ResetRoundsWithoutReceivedMessagesCalled != nil { + cnsm.ResetRoundsWithoutReceivedMessagesCalled(pkBytes, pid) + } +} + +// GetRoundCanceled - +func (cnsm *ConsensusStateMock) GetRoundCanceled() bool { + if cnsm.GetRoundCanceledCalled != nil { + return cnsm.GetRoundCanceledCalled() + } + return false +} + +// SetRoundCanceled - +func (cnsm *ConsensusStateMock) SetRoundCanceled(state bool) { + if cnsm.SetRoundCanceledCalled != nil { + cnsm.SetRoundCanceledCalled(state) + } +} + +// GetRoundIndex - +func (cnsm *ConsensusStateMock) GetRoundIndex() int64 { + if cnsm.GetRoundIndexCalled != nil { + return cnsm.GetRoundIndexCalled() + } + return 0 +} + +// SetRoundIndex - +func (cnsm *ConsensusStateMock) SetRoundIndex(roundIndex int64) { + if cnsm.SetRoundIndexCalled != nil { + cnsm.SetRoundIndexCalled(roundIndex) + } +} + +// GetRoundTimeStamp - +func (cnsm *ConsensusStateMock) GetRoundTimeStamp() time.Time { + if cnsm.GetRoundTimeStampCalled != nil { + return cnsm.GetRoundTimeStampCalled() + } + return time.Time{} +} + +// SetRoundTimeStamp - +func (cnsm *ConsensusStateMock) SetRoundTimeStamp(roundTimeStamp time.Time) { + if cnsm.SetRoundTimeStampCalled != nil { + cnsm.SetRoundTimeStampCalled(roundTimeStamp) + } +} + +// GetExtendedCalled - +func (cnsm *ConsensusStateMock) GetExtendedCalled() bool { + if cnsm.GetExtendedCalledCalled != nil { + return cnsm.GetExtendedCalledCalled() + } + return false +} + +// GetBody - +func (cnsm *ConsensusStateMock) GetBody() data.BodyHandler { + if cnsm.GetBodyCalled != nil { + return cnsm.GetBodyCalled() + } + return nil +} + +// SetBody - +func (cnsm *ConsensusStateMock) SetBody(body data.BodyHandler) { + if cnsm.SetBodyCalled != nil { + cnsm.SetBodyCalled(body) + } +} + +// GetHeader - +func (cnsm *ConsensusStateMock) GetHeader() data.HeaderHandler { + if cnsm.GetHeaderCalled != nil { + return cnsm.GetHeaderCalled() + } + return nil +} + +// SetHeader - +func (cnsm *ConsensusStateMock) SetHeader(header data.HeaderHandler) { + if cnsm.SetHeaderCalled != nil { + cnsm.SetHeaderCalled(header) + } +} + +// GetWaitingAllSignaturesTimeOut - +func (cnsm *ConsensusStateMock) GetWaitingAllSignaturesTimeOut() bool { + if cnsm.GetWaitingAllSignaturesTimeOutCalled != nil { + return cnsm.GetWaitingAllSignaturesTimeOutCalled() + } + return false +} + +// SetWaitingAllSignaturesTimeOut - +func (cnsm *ConsensusStateMock) SetWaitingAllSignaturesTimeOut(b bool) { + if cnsm.SetWaitingAllSignaturesTimeOutCalled != nil { + cnsm.SetWaitingAllSignaturesTimeOutCalled(b) + } +} + +// ConsensusGroupIndex - +func (cnsm *ConsensusStateMock) ConsensusGroupIndex(pubKey string) (int, error) { + if cnsm.ConsensusGroupIndexCalled != nil { + return cnsm.ConsensusGroupIndexCalled(pubKey) + } + return 0, nil +} + +// SelfConsensusGroupIndex - +func (cnsm *ConsensusStateMock) SelfConsensusGroupIndex() (int, error) { + if cnsm.SelfConsensusGroupIndexCalled != nil { + return cnsm.SelfConsensusGroupIndexCalled() + } + return 0, nil +} + +// SetEligibleList - +func (cnsm *ConsensusStateMock) SetEligibleList(eligibleList map[string]struct{}) { + if cnsm.SetEligibleListCalled != nil { + cnsm.SetEligibleListCalled(eligibleList) + } +} + +// ConsensusGroup - +func (cnsm *ConsensusStateMock) ConsensusGroup() []string { + if cnsm.ConsensusGroupCalled != nil { + return cnsm.ConsensusGroupCalled() + } + return nil +} + +// SetConsensusGroup - +func (cnsm *ConsensusStateMock) SetConsensusGroup(consensusGroup []string) { + if cnsm.SetConsensusGroupCalled != nil { + cnsm.SetConsensusGroupCalled(consensusGroup) + } +} + +// SetLeader - +func (cnsm *ConsensusStateMock) SetLeader(leader string) { + if cnsm.SetLeaderCalled != nil { + cnsm.SetLeaderCalled(leader) + } +} + +// SetConsensusGroupSize - +func (cnsm *ConsensusStateMock) SetConsensusGroupSize(consensusGroupSize int) { + if cnsm.SetConsensusGroupSizeCalled != nil { + cnsm.SetConsensusGroupSizeCalled(consensusGroupSize) + } +} + +// SelfPubKey - +func (cnsm *ConsensusStateMock) SelfPubKey() string { + if cnsm.SelfPubKeyCalled != nil { + return cnsm.SelfPubKeyCalled() + } + return "" +} + +// SetSelfPubKey - +func (cnsm *ConsensusStateMock) SetSelfPubKey(selfPubKey string) { + if cnsm.SetSelfPubKeyCalled != nil { + cnsm.SetSelfPubKeyCalled(selfPubKey) + } +} + +// JobDone - +func (cnsm *ConsensusStateMock) JobDone(key string, subroundId int) (bool, error) { + if cnsm.JobDoneCalled != nil { + return cnsm.JobDoneCalled(key, subroundId) + } + return false, nil +} + +// SetJobDone - +func (cnsm *ConsensusStateMock) SetJobDone(key string, subroundId int, value bool) error { + if cnsm.SetJobDoneCalled != nil { + return cnsm.SetJobDoneCalled(key, subroundId, value) + } + return nil +} + +// SelfJobDone - +func (cnsm *ConsensusStateMock) SelfJobDone(subroundId int) (bool, error) { + if cnsm.SelfJobDoneCalled != nil { + return cnsm.SelfJobDoneCalled(subroundId) + } + return false, nil +} + +// IsNodeInConsensusGroup - +func (cnsm *ConsensusStateMock) IsNodeInConsensusGroup(node string) bool { + if cnsm.IsNodeInConsensusGroupCalled != nil { + return cnsm.IsNodeInConsensusGroupCalled(node) + } + return false +} + +// IsNodeInEligibleList - +func (cnsm *ConsensusStateMock) IsNodeInEligibleList(node string) bool { + if cnsm.IsNodeInEligibleListCalled != nil { + return cnsm.IsNodeInEligibleListCalled(node) + } + return false +} + +// ComputeSize - +func (cnsm *ConsensusStateMock) ComputeSize(subroundId int) int { + if cnsm.ComputeSizeCalled != nil { + return cnsm.ComputeSizeCalled(subroundId) + } + return 0 +} + +// ResetRoundState - +func (cnsm *ConsensusStateMock) ResetRoundState() { + if cnsm.ResetRoundStateCalled != nil { + cnsm.ResetRoundStateCalled() + } +} + +// IsMultiKeyInConsensusGroup - +func (cnsm *ConsensusStateMock) IsMultiKeyInConsensusGroup() bool { + if cnsm.IsMultiKeyInConsensusGroupCalled != nil { + return cnsm.IsMultiKeyInConsensusGroupCalled() + } + return false +} + +// IsKeyManagedBySelf - +func (cnsm *ConsensusStateMock) IsKeyManagedBySelf(pkBytes []byte) bool { + if cnsm.IsKeyManagedBySelfCalled != nil { + return cnsm.IsKeyManagedBySelfCalled(pkBytes) + } + return false +} + +// IncrementRoundsWithoutReceivedMessages - +func (cnsm *ConsensusStateMock) IncrementRoundsWithoutReceivedMessages(pkBytes []byte) { + if cnsm.IncrementRoundsWithoutReceivedMessagesCalled != nil { + cnsm.IncrementRoundsWithoutReceivedMessagesCalled(pkBytes) + } +} + +// GetKeysHandler - +func (cnsm *ConsensusStateMock) GetKeysHandler() consensus.KeysHandler { + if cnsm.GetKeysHandlerCalled != nil { + return cnsm.GetKeysHandlerCalled() + } + return nil +} + +// Leader - +func (cnsm *ConsensusStateMock) Leader() string { + if cnsm.LeaderCalled != nil { + return cnsm.LeaderCalled() + } + return "" +} + +// Status - +func (cnsm *ConsensusStateMock) Status(subroundId int) int { + if cnsm.StatusCalled != nil { + return cnsm.StatusCalled(subroundId) + } + return 0 +} + +// SetStatus - +func (cnsm *ConsensusStateMock) SetStatus(subroundId int, subroundStatus int) { + if cnsm.SetStatusCalled != nil { + cnsm.SetStatusCalled(subroundId, subroundStatus) + } +} + +// ResetRoundStatus - +func (cnsm *ConsensusStateMock) ResetRoundStatus() { + if cnsm.ResetRoundStatusCalled != nil { + cnsm.ResetRoundStatusCalled() + } +} + +// Threshold - +func (cnsm *ConsensusStateMock) Threshold(subroundId int) int { + if cnsm.ThresholdCalled != nil { + return cnsm.ThresholdCalled(subroundId) + } + return 0 +} + +// FallbackThreshold - +func (cnsm *ConsensusStateMock) FallbackThreshold(subroundId int) int { + if cnsm.FallbackThresholdCalled != nil { + return cnsm.FallbackThresholdCalled(subroundId) + } + return 0 +} + +func (cnsm *ConsensusStateMock) SetFallbackThreshold(subroundId int, threshold int) { + if cnsm.SetFallbackThresholdCalled != nil { + cnsm.SetFallbackThresholdCalled(subroundId, threshold) + } } // ResetConsensusState - func (cnsm *ConsensusStateMock) ResetConsensusState() { - cnsm.ResetConsensusStateCalled() + if cnsm.ResetConsensusStateCalled != nil { + cnsm.ResetConsensusStateCalled() + } } // IsNodeLeaderInCurrentRound - func (cnsm *ConsensusStateMock) IsNodeLeaderInCurrentRound(node string) bool { - return cnsm.IsNodeLeaderInCurrentRoundCalled(node) + if cnsm.IsNodeLeaderInCurrentRoundCalled != nil { + return cnsm.IsNodeLeaderInCurrentRoundCalled(node) + } + return false } // IsSelfLeaderInCurrentRound - func (cnsm *ConsensusStateMock) IsSelfLeaderInCurrentRound() bool { - return cnsm.IsSelfLeaderInCurrentRoundCalled() + if cnsm.IsSelfLeaderInCurrentRoundCalled != nil { + return cnsm.IsSelfLeaderInCurrentRoundCalled() + } + return false } // GetLeader - func (cnsm *ConsensusStateMock) GetLeader() (string, error) { - return cnsm.GetLeaderCalled() + if cnsm.GetLeaderCalled != nil { + return cnsm.GetLeaderCalled() + } + return "", nil } // GetNextConsensusGroup - func (cnsm *ConsensusStateMock) GetNextConsensusGroup( - randomSource string, - vgs nodesCoordinator.NodesCoordinator, -) ([]string, error) { - return cnsm.GetNextConsensusGroupCalled(randomSource, vgs) + randomSource []byte, + round uint64, + shardId uint32, + nodesCoordinator nodesCoordinator.NodesCoordinator, + epoch uint32, +) (string, []string, error) { + if cnsm.GetNextConsensusGroupCalled != nil { + return cnsm.GetNextConsensusGroupCalled(randomSource, round, shardId, nodesCoordinator, epoch) + } + return "", nil, nil } // IsConsensusDataSet - func (cnsm *ConsensusStateMock) IsConsensusDataSet() bool { - return cnsm.IsConsensusDataSetCalled() + if cnsm.IsConsensusDataSetCalled != nil { + return cnsm.IsConsensusDataSetCalled() + } + return false } // IsConsensusDataEqual - func (cnsm *ConsensusStateMock) IsConsensusDataEqual(data []byte) bool { - return cnsm.IsConsensusDataEqualCalled(data) + if cnsm.IsConsensusDataEqualCalled != nil { + return cnsm.IsConsensusDataEqualCalled(data) + } + return false } // IsJobDone - func (cnsm *ConsensusStateMock) IsJobDone(node string, currentSubroundId int) bool { - return cnsm.IsJobDoneCalled(node, currentSubroundId) + if cnsm.IsJobDoneCalled != nil { + return cnsm.IsJobDoneCalled(node, currentSubroundId) + } + return false } // IsSelfJobDone - func (cnsm *ConsensusStateMock) IsSelfJobDone(currentSubroundId int) bool { - return cnsm.IsSelfJobDoneCalled(currentSubroundId) + if cnsm.IsSelfJobDoneCalled != nil { + return cnsm.IsSelfJobDoneCalled(currentSubroundId) + } + return false } // IsCurrentSubroundFinished - func (cnsm *ConsensusStateMock) IsCurrentSubroundFinished(currentSubroundId int) bool { - return cnsm.IsCurrentSubroundFinishedCalled(currentSubroundId) + if cnsm.IsCurrentSubroundFinishedCalled != nil { + return cnsm.IsCurrentSubroundFinishedCalled(currentSubroundId) + } + return false } // IsNodeSelf - func (cnsm *ConsensusStateMock) IsNodeSelf(node string) bool { - return cnsm.IsNodeSelfCalled(node) + if cnsm.IsNodeSelfCalled != nil { + return cnsm.IsNodeSelfCalled(node) + } + return false } // IsBlockBodyAlreadyReceived - func (cnsm *ConsensusStateMock) IsBlockBodyAlreadyReceived() bool { - return cnsm.IsBlockBodyAlreadyReceivedCalled() + if cnsm.IsBlockBodyAlreadyReceivedCalled != nil { + return cnsm.IsBlockBodyAlreadyReceivedCalled() + } + return false } // IsHeaderAlreadyReceived - func (cnsm *ConsensusStateMock) IsHeaderAlreadyReceived() bool { - return cnsm.IsHeaderAlreadyReceivedCalled() + if cnsm.IsHeaderAlreadyReceivedCalled != nil { + return cnsm.IsHeaderAlreadyReceivedCalled() + } + return false } // CanDoSubroundJob - func (cnsm *ConsensusStateMock) CanDoSubroundJob(currentSubroundId int) bool { - return cnsm.CanDoSubroundJobCalled(currentSubroundId) + if cnsm.CanDoSubroundJobCalled != nil { + return cnsm.CanDoSubroundJobCalled(currentSubroundId) + } + return false } // CanProcessReceivedMessage - func (cnsm *ConsensusStateMock) CanProcessReceivedMessage( - cnsDta consensus.Message, - currentRoundIndex int32, + cnsDta *consensus.Message, + currentRoundIndex int64, currentSubroundId int, ) bool { return cnsm.CanProcessReceivedMessageCalled(cnsDta, currentRoundIndex, currentSubroundId) @@ -113,25 +610,43 @@ func (cnsm *ConsensusStateMock) CanProcessReceivedMessage( // GenerateBitmap - func (cnsm *ConsensusStateMock) GenerateBitmap(subroundId int) []byte { - return cnsm.GenerateBitmapCalled(subroundId) + if cnsm.GenerateBitmapCalled != nil { + return cnsm.GenerateBitmapCalled(subroundId) + } + return nil } // ProcessingBlock - func (cnsm *ConsensusStateMock) ProcessingBlock() bool { - return cnsm.ProcessingBlockCalled() + if cnsm.ProcessingBlockCalled != nil { + return cnsm.ProcessingBlockCalled() + } + return false } // SetProcessingBlock - func (cnsm *ConsensusStateMock) SetProcessingBlock(processingBlock bool) { - cnsm.SetProcessingBlockCalled(processingBlock) + if cnsm.SetProcessingBlockCalled != nil { + cnsm.SetProcessingBlockCalled(processingBlock) + } } // ConsensusGroupSize - func (cnsm *ConsensusStateMock) ConsensusGroupSize() int { - return cnsm.ConsensusGroupSizeCalled() + if cnsm.ConsensusGroupSizeCalled != nil { + return cnsm.ConsensusGroupSizeCalled() + } + return 0 } // SetThreshold - func (cnsm *ConsensusStateMock) SetThreshold(subroundId int, threshold int) { - cnsm.SetThresholdCalled(subroundId, threshold) + if cnsm.SetThresholdCalled != nil { + cnsm.SetThresholdCalled(subroundId, threshold) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cnsm *ConsensusStateMock) IsInterfaceNil() bool { + return cnsm == nil } diff --git a/testscommon/consensus/initializers/initializers.go b/testscommon/consensus/initializers/initializers.go new file mode 100644 index 00000000000..aa3381281de --- /dev/null +++ b/testscommon/consensus/initializers/initializers.go @@ -0,0 +1,156 @@ +package initializers + +import ( + crypto "github.com/multiversx/mx-chain-crypto-go" + "golang.org/x/exp/slices" + + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" +) + +func createEligibleList(size int) []string { + eligibleList := make([]string, 0) + for i := 0; i < size; i++ { + eligibleList = append(eligibleList, string([]byte{byte(i + 65)})) + } + return eligibleList +} + +// CreateEligibleListFromMap creates a list of eligible nodes from a map of private keys +func CreateEligibleListFromMap(mapKeys map[string]crypto.PrivateKey) []string { + eligibleList := make([]string, 0, len(mapKeys)) + for key := range mapKeys { + eligibleList = append(eligibleList, key) + } + slices.Sort(eligibleList) + return eligibleList +} + +// InitConsensusStateWithNodesCoordinator creates a consensus state with a nodes coordinator +func InitConsensusStateWithNodesCoordinator(validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { + return initConsensusStateWithKeysHandlerAndNodesCoordinator(&testscommon.KeysHandlerStub{}, validatorsGroupSelector) +} + +// InitConsensusState creates a consensus state +func InitConsensusState() *spos.ConsensusState { + return InitConsensusStateWithKeysHandler(&testscommon.KeysHandlerStub{}) +} + +// InitConsensusStateWithArgs creates a consensus state the given arguments +func InitConsensusStateWithArgs(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { + return initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler, mapKeys) +} + +// InitConsensusStateWithKeysHandler creates a consensus state with a keys handler +func InitConsensusStateWithKeysHandler(keysHandler consensus.KeysHandler) *spos.ConsensusState { + consensusGroupSize := 9 + return initConsensusStateWithKeysHandlerWithGroupSize(keysHandler, consensusGroupSize) +} + +func initConsensusStateWithKeysHandlerAndNodesCoordinator(keysHandler consensus.KeysHandler, validatorsGroupSelector nodesCoordinator.NodesCoordinator) *spos.ConsensusState { + leader, consensusValidators, _ := validatorsGroupSelector.GetConsensusValidatorsPublicKeys([]byte("randomness"), 0, 0, 0) + eligibleNodesPubKeys := make(map[string]struct{}) + for _, key := range consensusValidators { + eligibleNodesPubKeys[key] = struct{}{} + } + return createConsensusStateWithNodes(eligibleNodesPubKeys, consensusValidators, leader, keysHandler) +} + +// InitConsensusStateWithArgsVerifySignature creates a consensus state with the given arguments for signature verification +func InitConsensusStateWithArgsVerifySignature(keysHandler consensus.KeysHandler, keys []string) *spos.ConsensusState { + numberOfKeys := len(keys) + eligibleNodesPubKeys := make(map[string]struct{}, numberOfKeys) + for _, key := range keys { + eligibleNodesPubKeys[key] = struct{}{} + } + + indexLeader := 1 + rcns, _ := spos.NewRoundConsensus( + eligibleNodesPubKeys, + numberOfKeys, + keys[indexLeader], + keysHandler, + ) + rcns.SetConsensusGroup(keys) + rcns.ResetRoundState() + + pBFTThreshold := numberOfKeys*2/3 + 1 + pBFTFallbackThreshold := numberOfKeys*1/2 + 1 + rthr := spos.NewRoundThreshold() + rthr.SetThreshold(1, 1) + rthr.SetThreshold(2, pBFTThreshold) + rthr.SetFallbackThreshold(1, 1) + rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) + + rstatus := spos.NewRoundStatus() + rstatus.ResetRoundStatus() + cns := spos.NewConsensusState( + rcns, + rthr, + rstatus, + ) + cns.Data = []byte("X") + cns.RoundIndex = 0 + + return cns +} + +func initConsensusStateWithKeysHandlerWithGroupSize(keysHandler consensus.KeysHandler, consensusGroupSize int) *spos.ConsensusState { + eligibleList := createEligibleList(consensusGroupSize) + + eligibleNodesPubKeys := make(map[string]struct{}) + for _, key := range eligibleList { + eligibleNodesPubKeys[key] = struct{}{} + } + + return createConsensusStateWithNodes(eligibleNodesPubKeys, eligibleList, eligibleList[0], keysHandler) +} + +func initConsensusStateWithKeysHandlerWithGroupSizeWithRealKeys(keysHandler consensus.KeysHandler, mapKeys map[string]crypto.PrivateKey) *spos.ConsensusState { + eligibleList := CreateEligibleListFromMap(mapKeys) + + eligibleNodesPubKeys := make(map[string]struct{}, len(eligibleList)) + for _, key := range eligibleList { + eligibleNodesPubKeys[key] = struct{}{} + } + + return createConsensusStateWithNodes(eligibleNodesPubKeys, eligibleList, eligibleList[0], keysHandler) +} + +func createConsensusStateWithNodes(eligibleNodesPubKeys map[string]struct{}, consensusValidators []string, leader string, keysHandler consensus.KeysHandler) *spos.ConsensusState { + consensusGroupSize := len(consensusValidators) + rcns, _ := spos.NewRoundConsensus( + eligibleNodesPubKeys, + consensusGroupSize, + consensusValidators[1], + keysHandler, + ) + + rcns.SetConsensusGroup(consensusValidators) + rcns.SetLeader(leader) + rcns.ResetRoundState() + + pBFTThreshold := consensusGroupSize*2/3 + 1 + pBFTFallbackThreshold := consensusGroupSize*1/2 + 1 + + rthr := spos.NewRoundThreshold() + rthr.SetThreshold(1, 1) + rthr.SetThreshold(2, pBFTThreshold) + rthr.SetFallbackThreshold(1, 1) + rthr.SetFallbackThreshold(2, pBFTFallbackThreshold) + + rstatus := spos.NewRoundStatus() + rstatus.ResetRoundStatus() + + cns := spos.NewConsensusState( + rcns, + rthr, + rstatus, + ) + + cns.Data = []byte("X") + cns.RoundIndex = 0 + return cns +} diff --git a/testscommon/consensus/mockTestInitializer.go b/testscommon/consensus/mockTestInitializer.go index 2962a577d34..4cdd7174618 100644 --- a/testscommon/consensus/mockTestInitializer.go +++ b/testscommon/consensus/mockTestInitializer.go @@ -167,7 +167,9 @@ func InitConsensusCore() *ConsensusCoreMock { func InitConsensusCoreWithMultiSigner(multiSigner crypto.MultiSigner) *ConsensusCoreMock { blockChain := &testscommon.ChainHandlerStub{ GetGenesisHeaderCalled: func() data.HeaderHandler { - return &block.Header{} + return &block.Header{ + RandSeed: []byte("randSeed"), + } }, } marshalizerMock := mock.MarshalizerMock{} @@ -181,9 +183,9 @@ func InitConsensusCoreWithMultiSigner(multiSigner crypto.MultiSigner) *Consensus chronologyHandlerMock := InitChronologyHandlerMock() hasherMock := &hashingMocks.HasherMock{} - roundHandlerMock := &mock.RoundHandlerMock{} + roundHandlerMock := &RoundHandlerMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} - syncTimerMock := &mock.SyncTimerMock{} + syncTimerMock := &SyncTimerMock{} validatorGroupSelector := &shardingMocks.NodesCoordinatorMock{ ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (nodesCoordinator.Validator, []nodesCoordinator.Validator, error) { defaultSelectionChances := uint32(1) diff --git a/consensus/mock/rounderMock.go b/testscommon/consensus/rounderMock.go similarity index 98% rename from consensus/mock/rounderMock.go rename to testscommon/consensus/rounderMock.go index 6a0625932a1..bb463f38c33 100644 --- a/consensus/mock/rounderMock.go +++ b/testscommon/consensus/rounderMock.go @@ -1,4 +1,4 @@ -package mock +package consensus import ( "time" diff --git a/consensus/mock/sposWorkerMock.go b/testscommon/consensus/sposWorkerMock.go similarity index 76% rename from consensus/mock/sposWorkerMock.go rename to testscommon/consensus/sposWorkerMock.go index d254b827b57..3aa127287de 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/testscommon/consensus/sposWorkerMock.go @@ -1,10 +1,11 @@ -package mock +package consensus import ( "context" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/p2p" ) @@ -27,13 +28,15 @@ type SposWorkerMock struct { DisplayStatisticsCalled func() ReceivedHeaderCalled func(headerHandler data.HeaderHandler, headerHash []byte) SetAppStatusHandlerCalled func(ash core.AppStatusHandler) error - ResetConsensusMessagesCalled func(currentHash []byte, prevHash []byte) + ResetConsensusMessagesCalled func() } // AddReceivedMessageCall - func (sposWorkerMock *SposWorkerMock) AddReceivedMessageCall(messageType consensus.MessageType, receivedMessageCall func(ctx context.Context, cnsDta *consensus.Message) bool) { - sposWorkerMock.AddReceivedMessageCallCalled(messageType, receivedMessageCall) + if sposWorkerMock.AddReceivedMessageCallCalled != nil { + sposWorkerMock.AddReceivedMessageCallCalled(messageType, receivedMessageCall) + } } // AddReceivedHeaderHandler - @@ -45,32 +48,49 @@ func (sposWorkerMock *SposWorkerMock) AddReceivedHeaderHandler(handler func(data // RemoveAllReceivedMessagesCalls - func (sposWorkerMock *SposWorkerMock) RemoveAllReceivedMessagesCalls() { - sposWorkerMock.RemoveAllReceivedMessagesCallsCalled() + if sposWorkerMock.RemoveAllReceivedMessagesCallsCalled != nil { + sposWorkerMock.RemoveAllReceivedMessagesCallsCalled() + } } // ProcessReceivedMessage - func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { - return sposWorkerMock.ProcessReceivedMessageCalled(message) + if sposWorkerMock.ProcessReceivedMessageCalled == nil { + return sposWorkerMock.ProcessReceivedMessageCalled(message) + } + return nil } // SendConsensusMessage - func (sposWorkerMock *SposWorkerMock) SendConsensusMessage(cnsDta *consensus.Message) bool { - return sposWorkerMock.SendConsensusMessageCalled(cnsDta) + if sposWorkerMock.SendConsensusMessageCalled != nil { + return sposWorkerMock.SendConsensusMessageCalled(cnsDta) + } + return false } // Extend - func (sposWorkerMock *SposWorkerMock) Extend(subroundId int) { - sposWorkerMock.ExtendCalled(subroundId) + if sposWorkerMock.ExtendCalled != nil { + sposWorkerMock.ExtendCalled(subroundId) + } } // GetConsensusStateChangedChannel - func (sposWorkerMock *SposWorkerMock) GetConsensusStateChangedChannel() chan bool { - return sposWorkerMock.GetConsensusStateChangedChannelsCalled() + if sposWorkerMock.GetConsensusStateChangedChannelsCalled != nil { + return sposWorkerMock.GetConsensusStateChangedChannelsCalled() + } + + return nil } // BroadcastBlock - func (sposWorkerMock *SposWorkerMock) BroadcastBlock(body data.BodyHandler, header data.HeaderHandler) error { - return sposWorkerMock.GetBroadcastBlockCalled(body, header) + if sposWorkerMock.GetBroadcastBlockCalled != nil { + return sposWorkerMock.GetBroadcastBlockCalled(body, header) + } + return nil } // ExecuteStoredMessages - @@ -104,9 +124,9 @@ func (sposWorkerMock *SposWorkerMock) StartWorking() { } // ResetConsensusMessages - -func (sposWorkerMock *SposWorkerMock) ResetConsensusMessages(currentHash []byte, prevHash []byte) { +func (sposWorkerMock *SposWorkerMock) ResetConsensusMessages() { if sposWorkerMock.ResetConsensusMessagesCalled != nil { - sposWorkerMock.ResetConsensusMessagesCalled(currentHash, prevHash) + sposWorkerMock.ResetConsensusMessagesCalled() } } diff --git a/consensus/mock/syncTimerMock.go b/testscommon/consensus/syncTimerMock.go similarity index 98% rename from consensus/mock/syncTimerMock.go rename to testscommon/consensus/syncTimerMock.go index 2fa41d42341..32b92bbe33b 100644 --- a/consensus/mock/syncTimerMock.go +++ b/testscommon/consensus/syncTimerMock.go @@ -1,4 +1,4 @@ -package mock +package consensus import ( "time" diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index df416a9f56a..b631e6d4ba2 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -6,6 +6,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" @@ -242,6 +243,7 @@ func CreatePoolsHolderWithTxPool(txPool dataRetriever.ShardedDataCacherNotifier) PeerAuthentications: peerAuthPool, Heartbeats: heartbeatPool, ValidatorsInfo: validatorsInfo, + Proofs: &ProofsPoolMock{}, } holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolderWithTxPool", err)