diff --git a/pkg/protocol/block_dispatcher.go b/pkg/protocol/block_dispatcher.go index c7e76e1ee..6988bd39d 100644 --- a/pkg/protocol/block_dispatcher.go +++ b/pkg/protocol/block_dispatcher.go @@ -277,7 +277,7 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment } targetEngine.Reset() - // Once all blocks are booked we + // Once all blocks are booked and their weight propagated we // 1. Mark all transactions as accepted // 2. Mark all blocks as accepted // 3. Force commitment of the slot @@ -298,6 +298,17 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment b.protocol.HandleError(ierrors.Errorf("producedCommitment ID mismatch: %s != %s", producedCommitment.ID(), commitmentID)) return } + + // 5. We add all blocks as root blocks. We can only do it after the commitment of the slot because otherwise + // confirmation of the blocks can't be properly propagated (as it skips propagation to root blocks). + for slotCommitmentID, blockIDsForCommitment := range blockIDsBySlotCommitmentID { + for _, blockID := range blockIDsForCommitment { + // We need to make sure that we add all blocks as root blocks because we don't know which blocks are root blocks without + // blocks from future slots. We're committing the current slot which then leads to the eviction of the blocks from the + // block cache and thus if not root blocks no block in the next slot can become solid. + targetEngine.EvictionState.AddRootBlock(blockID, slotCommitmentID) + } + } } blockBookedFunc := func(_ bool, _ bool) { @@ -336,7 +347,7 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment return nil } - for slotCommitmentID, blockIDsForCommitment := range blockIDsBySlotCommitmentID { + for _, blockIDsForCommitment := range blockIDsBySlotCommitmentID { for _, blockID := range blockIDsForCommitment { block, _ := targetEngine.BlockDAG.GetOrRequestBlock(blockID) if block == nil { // this should never happen as we're requesting the blocks for this slot so it can't be evicted. @@ -344,12 +355,7 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment continue } - // We need to make sure that we add all blocks as root blocks because we don't know which blocks are root blocks without - // blocks from future slots. We're committing the current slot which then leads to the eviction of the blocks from the - // block cache and thus if not root blocks no block in the next slot can become solid. - targetEngine.EvictionState.AddRootBlock(blockID, slotCommitmentID) - - block.Booked().OnUpdate(blockBookedFunc) + block.WeightPropagated().OnUpdate(blockBookedFunc) } } diff --git a/pkg/protocol/commitment_verifier.go b/pkg/protocol/commitment_verifier.go index 2172c92e7..371eaf8a0 100644 --- a/pkg/protocol/commitment_verifier.go +++ b/pkg/protocol/commitment_verifier.go @@ -5,7 +5,6 @@ import ( "github.com/iotaledger/hive.go/ds" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore/mapdb" - "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts" @@ -14,15 +13,27 @@ import ( ) type CommitmentVerifier struct { - engine *engine.Engine - cumulativeWeight uint64 - validatorAccountsAtFork map[iotago.AccountID]*accounts.AccountData + engine *engine.Engine + lastCommonSlotBeforeFork iotago.SlotIndex + + // cumulativeWeight is the cumulative weight of the verified commitments. It is updated after each verification. + cumulativeWeight uint64 + + // epoch is the epoch of the currently verified commitment. Initially, it is set to the epoch of the last common commitment before the fork. + epoch iotago.EpochIndex + + // validatorAccountsData is the accounts data of the validators for the current epoch as known at lastCommonSlotBeforeFork. + // Initially, it is set to the accounts data of the validators for the epoch of the last common commitment before the fork. + validatorAccountsData map[iotago.AccountID]*accounts.AccountData } func NewCommitmentVerifier(mainEngine *engine.Engine, lastCommonCommitmentBeforeFork *model.Commitment) (*CommitmentVerifier, error) { - committeeAtForkingPoint, exists := mainEngine.SybilProtection.SeatManager().CommitteeInSlot(lastCommonCommitmentBeforeFork.Slot()) + apiForSlot := mainEngine.APIForSlot(lastCommonCommitmentBeforeFork.Slot()) + epoch := apiForSlot.TimeProvider().EpochFromSlot(lastCommonCommitmentBeforeFork.Slot()) + + committeeAtForkingPoint, exists := mainEngine.SybilProtection.SeatManager().CommitteeInEpoch(epoch) if !exists { - return nil, ierrors.Errorf("committee in slot %d does not exist", lastCommonCommitmentBeforeFork.Slot()) + return nil, ierrors.Errorf("committee in epoch %d of last commonCommitment slot %d before fork does not exist", epoch, lastCommonCommitmentBeforeFork.Slot()) } accountsAtForkingPoint, err := committeeAtForkingPoint.Accounts() @@ -30,11 +41,17 @@ func NewCommitmentVerifier(mainEngine *engine.Engine, lastCommonCommitmentBefore return nil, ierrors.Wrapf(err, "failed to get accounts from committee for slot %d", lastCommonCommitmentBeforeFork.Slot()) } + validatorAccountsDataAtForkingPoint, err := mainEngine.Ledger.PastAccounts(accountsAtForkingPoint.IDs(), lastCommonCommitmentBeforeFork.Slot()) + if err != nil { + return nil, ierrors.Wrapf(err, "failed to get past accounts for slot %d", lastCommonCommitmentBeforeFork.Slot()) + } + return &CommitmentVerifier{ - engine: mainEngine, - cumulativeWeight: lastCommonCommitmentBeforeFork.CumulativeWeight(), - validatorAccountsAtFork: lo.PanicOnErr(mainEngine.Ledger.PastAccounts(accountsAtForkingPoint.IDs(), lastCommonCommitmentBeforeFork.Slot())), - // TODO: what happens if the committee rotated after the fork? + engine: mainEngine, + cumulativeWeight: lastCommonCommitmentBeforeFork.CumulativeWeight(), + lastCommonSlotBeforeFork: lastCommonCommitmentBeforeFork.Slot(), + epoch: epoch, + validatorAccountsData: validatorAccountsDataAtForkingPoint, }, nil } @@ -58,19 +75,41 @@ func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, atte return nil, 0, ierrors.Errorf("invalid merkle proof for attestations for commitment %s", commitment.ID()) } - // 2. Verify attestations. + // 2. Update validatorAccountsData if fork happened across epoch boundaries. + // We try to use the latest accounts data (at lastCommonSlotBeforeFork) for the current epoch. + // This is necessary because the committee might have rotated at the epoch boundary and different validators might be part of it. + // In case anything goes wrong we keep using previously known accounts data (initially set to the accounts data + // of the validators for the epoch of the last common commitment before the fork). + apiForSlot := c.engine.APIForSlot(commitment.Slot()) + commitmentEpoch := apiForSlot.TimeProvider().EpochFromSlot(commitment.Slot()) + if commitmentEpoch > c.epoch { + c.epoch = commitmentEpoch + + committee, exists := c.engine.SybilProtection.SeatManager().CommitteeInEpoch(commitmentEpoch) + if exists { + validatorAccounts, err := committee.Accounts() + if err == nil { + validatorAccountsData, err := c.engine.Ledger.PastAccounts(validatorAccounts.IDs(), c.lastCommonSlotBeforeFork) + if err == nil { + c.validatorAccountsData = validatorAccountsData + } + } + } + } + + // 3. Verify attestations. blockIDs, seatCount, err := c.verifyAttestations(attestations) if err != nil { return nil, 0, ierrors.Wrapf(err, "error validating attestations for commitment %s", commitment.ID()) } - // 3. Verify that calculated cumulative weight from attestations is lower or equal to cumulative weight of commitment. + // 4. Verify that calculated cumulative weight from attestations is lower or equal to cumulative weight of commitment. // This is necessary due to public key changes of validators in the window of forking point and the current state of // the other chain (as validators could have added/removed public keys that we don't know about yet). // - // 1. The weight should be equal if all public keys are known and unchanged. + // a) The weight should be equal if all public keys are known and unchanged. // - // 2. A public key is added to an account. + // b) A public key is added to an account. // We do not count a seat for the issuer for this slot and the computed CW will be lower than the CW in // the commitment. This is fine, since this is a rare occasion and a heavier chain will become heavier anyway, eventually. // It will simply take a bit longer to accumulate enough CW so that the chain-switch rule kicks in. @@ -78,7 +117,7 @@ func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, atte // This can only be prevented by adding such key changes provably to the commitments so that these changes // can be reconstructed and verified by nodes that do not have the latest ledger state. // - // 3. A public key is removed from an account. + // c) A public key is removed from an account. // We count the seat for the issuer for this slot even though we shouldn't have. According to the protocol, a valid // chain with such a block can never exist because the block itself (here provided as an attestation) would be invalid. // However, we do not know about this yet since we do not have the latest ledger state. @@ -105,11 +144,13 @@ func (c *CommitmentVerifier) verifyAttestations(attestations []*iotago.Attestati // 1. The attestation might be fake. // 2. The issuer might have added a new public key in the meantime, but we don't know about it yet // since we only have the ledger state at the forking point. - accountData, exists := c.validatorAccountsAtFork[att.Header.IssuerID] + accountData, exists := c.validatorAccountsData[att.Header.IssuerID] - // We always need to have the accountData for a validator. + // We don't know the account data of the issuer. Ignore. + // This could be due to committee rotation at epoch boundary where a new validator (unknown at forking point) + // is selected into the committee. if !exists { - return nil, 0, ierrors.Errorf("accountData for issuerID %s does not exist", att.Header.IssuerID) + continue } switch signature := att.Signature.(type) { @@ -137,12 +178,14 @@ func (c *CommitmentVerifier) verifyAttestations(attestations []*iotago.Attestati return nil, 0, ierrors.Errorf("issuerID %s contained in multiple attestations", att.Header.IssuerID) } - // TODO: this might differ if we have a Accounts with changing weights depending on the Slot/epoch attestationBlockID, err := att.BlockID() if err != nil { return nil, 0, ierrors.Wrap(err, "error calculating blockID from attestation") } + // We need to make sure that the issuer is actually part of the committee for the slot of the attestation (issuance of the block). + // Note: here we're explicitly not using the slot of the commitment we're verifying, but the slot of the attestation. + // This is because at the time the attestation was created, the committee might have been different from the one at commitment time (due to rotation at epoch boundary). committee, exists := c.engine.SybilProtection.SeatManager().CommitteeInSlot(attestationBlockID.Slot()) if !exists { return nil, 0, ierrors.Errorf("committee for slot %d does not exist", attestationBlockID.Slot()) diff --git a/pkg/protocol/engine/attestation/slotattestation/manager.go b/pkg/protocol/engine/attestation/slotattestation/manager.go index 7cf4d45bf..5c2be5fd5 100644 --- a/pkg/protocol/engine/attestation/slotattestation/manager.go +++ b/pkg/protocol/engine/attestation/slotattestation/manager.go @@ -259,14 +259,20 @@ func (m *Manager) Commit(slot iotago.SlotIndex) (newCW uint64, attestationsRoot return 0, iotago.Identifier{}, ierrors.Wrapf(err, "failed to get attestation storage when committing slot %d", slot) } - // Add all attestations to the tree and calculate the new cumulative weight. - committee, exists := m.committeeFunc(slot) - if !exists { - return 0, iotago.Identifier{}, ierrors.Wrapf(err, "failed to get committee when committing slot %d", slot) - } - for _, a := range attestations { - // TODO: which weight are we using here? The current one? Or the one of the slot of the attestation/commitmentID? + blockID, err := a.BlockID() + if err != nil { + return 0, iotago.Identifier{}, ierrors.Wrapf(err, "failed to get blockID of attestation %s", a.Header.IssuerID) + } + + // We use the committee of the slot of the attestation in contrast to the slot that we're committing right now. + // This is because at the time the attestation was created, the committee might have been different from the current one (due to rotation at epoch boundary). + committee, exists := m.committeeFunc(blockID.Slot()) + if !exists { + return 0, iotago.Identifier{}, ierrors.Wrapf(err, "failed to get committee when committing slot %d", slot) + } + + // Add all attestations to the tree and calculate the new cumulative weight. if _, exists := committee.GetSeat(a.Header.IssuerID); exists { if err := tree.Set(a.Header.IssuerID, a); err != nil { return 0, iotago.Identifier{}, ierrors.Wrapf(err, "failed to set attestation %s in tree", a.Header.IssuerID) diff --git a/pkg/protocol/engine/blocks/block.go b/pkg/protocol/engine/blocks/block.go index bba486056..0dc76a6db 100644 --- a/pkg/protocol/engine/blocks/block.go +++ b/pkg/protocol/engine/blocks/block.go @@ -40,6 +40,7 @@ type Block struct { preConfirmed bool confirmationRatifiers ds.Set[account.SeatIndex] confirmed bool + weightPropagated reactive.Variable[bool] // Scheduler block scheduled bool @@ -86,6 +87,7 @@ func NewBlock(data *model.Block) *Block { invalid: reactive.NewVariable[bool](), booked: reactive.NewVariable[bool](), accepted: reactive.NewVariable[bool](), + weightPropagated: reactive.NewVariable[bool](), notarized: reactive.NewVariable[bool](), workScore: data.WorkScore(), } @@ -104,18 +106,20 @@ func NewRootBlock(blockID iotago.BlockID, commitmentID iotago.CommitmentID, issu commitmentID: commitmentID, issuingTime: issuingTime, }, - solid: reactive.NewVariable[bool](), - invalid: reactive.NewVariable[bool](), - booked: reactive.NewVariable[bool](), - preAccepted: true, - accepted: reactive.NewVariable[bool](), - notarized: reactive.NewVariable[bool](), - scheduled: true, + solid: reactive.NewVariable[bool](), + invalid: reactive.NewVariable[bool](), + booked: reactive.NewVariable[bool](), + preAccepted: true, + accepted: reactive.NewVariable[bool](), + weightPropagated: reactive.NewVariable[bool](), + notarized: reactive.NewVariable[bool](), + scheduled: true, } // This should be true since we commit and evict on acceptance. b.solid.Set(true) b.booked.Set(true) + b.weightPropagated.Set(true) b.notarized.Set(true) b.accepted.Set(true) @@ -135,6 +139,7 @@ func NewMissingBlock(blockID iotago.BlockID) *Block { invalid: reactive.NewVariable[bool](), booked: reactive.NewVariable[bool](), accepted: reactive.NewVariable[bool](), + weightPropagated: reactive.NewVariable[bool](), notarized: reactive.NewVariable[bool](), } } @@ -605,6 +610,18 @@ func (b *Block) SetPreConfirmed() (wasUpdated bool) { return wasUpdated } +func (b *Block) WeightPropagated() reactive.Variable[bool] { + return b.weightPropagated +} + +func (b *Block) IsWeightPropagated() bool { + return b.weightPropagated.Get() +} + +func (b *Block) SetWeightPropagated() (wasUpdated bool) { + return !b.weightPropagated.Set(true) +} + func (b *Block) Notarized() reactive.Variable[bool] { return b.notarized } @@ -633,6 +650,7 @@ func (b *Block) String() string { builder.AddField(stringify.NewStructField("PreConfirmed", b.preConfirmed)) builder.AddField(stringify.NewStructField("ConfirmationRatifiers", b.confirmationRatifiers.String())) builder.AddField(stringify.NewStructField("Confirmed", b.confirmed)) + builder.AddField(stringify.NewStructField("WeightPropagated", b.weightPropagated.Get())) builder.AddField(stringify.NewStructField("Scheduled", b.scheduled)) builder.AddField(stringify.NewStructField("Dropped", b.dropped)) builder.AddField(stringify.NewStructField("Skipped", b.skipped)) diff --git a/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/confirmation_ratification.go b/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/confirmation_ratification.go index fc66ca885..7573f5a0f 100644 --- a/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/confirmation_ratification.go +++ b/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/confirmation_ratification.go @@ -24,12 +24,12 @@ func (g *Gadget) trackConfirmationRatifierWeight(votingBlock *blocks.Block) { return false } - // Skip propagation if the block is already accepted. + // Skip propagation if the block is already confirmed. if block.IsConfirmed() { return false } - // Skip further propagation if the witness is not new. + // Skip further propagation if the ratifier is not new. propagateFurther := block.AddConfirmationRatifier(seat) if g.shouldConfirm(block) { diff --git a/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/witness_weight.go b/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/witness_weight.go index 3934e82d3..0e1d2076a 100644 --- a/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/witness_weight.go +++ b/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/witness_weight.go @@ -8,6 +8,8 @@ import ( ) func (g *Gadget) TrackWitnessWeight(votingBlock *blocks.Block) { + defer votingBlock.SetWeightPropagated() + // Only track witness weight for issuers that are part of the committee. seat, isValid := g.isCommitteeValidationBlock(votingBlock) if !isValid { diff --git a/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go b/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go index d882ed1de..70da1be59 100644 --- a/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go +++ b/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go @@ -4,11 +4,9 @@ import ( "github.com/iotaledger/hive.go/ds/shrinkingmap" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/runtime/event" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" - "github.com/iotaledger/hive.go/runtime/workerpool" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/slotgadget" @@ -19,8 +17,7 @@ import ( ) type Gadget struct { - events *slotgadget.Events - workers *workerpool.Group + events *slotgadget.Events // Keep track of votes on slots (from commitments) per slot of blocks. I.e. a slot can only be finalized if // optsSlotFinalizationThreshold is reached within a slot. @@ -49,13 +46,12 @@ func NewProvider(opts ...options.Option[Gadget]) module.Provider[*engine.Engine, g.slotTrackers = shrinkingmap.New[iotago.SlotIndex, *slottracker.SlotTracker]() e.Events.SlotGadget.LinkTo(g.events) - g.workers = e.Workers.CreateGroup("SlotGadget") e.HookConstructed(func() { g.seatManager = e.SybilProtection.SeatManager() g.TriggerConstructed() - e.Events.BlockGadget.BlockConfirmed.Hook(g.trackVotes, event.WithWorkerPool(g.workers.CreatePool("TrackAndRefresh", workerpool.WithWorkerCount(1)))) // Using just 1 worker to avoid contention + e.Events.BlockGadget.BlockConfirmed.Hook(g.trackVotes) }) g.storeLastFinalizedSlotFunc = func(slot iotago.SlotIndex) { @@ -87,7 +83,6 @@ func (g *Gadget) Reset() { func (g *Gadget) Shutdown() { g.TriggerStopped() - g.workers.Shutdown() } func (g *Gadget) setLastFinalizedSlot(i iotago.SlotIndex) { @@ -120,7 +115,6 @@ func (g *Gadget) trackVotes(block *blocks.Block) { } func (g *Gadget) refreshSlotFinalization(tracker *slottracker.SlotTracker, previousLatestSlotIndex iotago.SlotIndex, newLatestSlotIndex iotago.SlotIndex) (finalizedSlots []iotago.SlotIndex) { - for i := lo.Max(g.lastFinalizedSlot, previousLatestSlotIndex) + 1; i <= newLatestSlotIndex; i++ { committeeTotalSeats := g.seatManager.SeatCountInSlot(i) attestorsTotalSeats := len(tracker.Voters(i)) diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go index 9e11f42c7..965c57562 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go @@ -21,7 +21,7 @@ type Tracker struct { rewardsStorePerEpochFunc func(epoch iotago.EpochIndex) (kvstore.KVStore, error) poolStatsStore *epochstore.Store[*model.PoolsStats] committeeStore *epochstore.Store[*account.Accounts] - committeeCandidatesInEpochFunc func(epoch iotago.EpochIndex) (kvstore.KVStore, error) + committeeCandidatesInEpochFunc func(epoch iotago.EpochIndex) (*kvstore.TypedStore[iotago.AccountID, iotago.SlotIndex], error) nextEpochCommitteeCandidates *shrinkingmap.ShrinkingMap[iotago.AccountID, iotago.SlotIndex] validatorPerformancesFunc func(slot iotago.SlotIndex) (*slotstore.Store[iotago.AccountID, *model.ValidatorPerformance], error) latestAppliedEpoch iotago.EpochIndex @@ -38,7 +38,7 @@ func NewTracker( rewardsStorePerEpochFunc func(epoch iotago.EpochIndex) (kvstore.KVStore, error), poolStatsStore *epochstore.Store[*model.PoolsStats], committeeStore *epochstore.Store[*account.Accounts], - committeeCandidatesInEpochFunc func(epoch iotago.EpochIndex) (kvstore.KVStore, error), + committeeCandidatesInEpochFunc func(epoch iotago.EpochIndex) (*kvstore.TypedStore[iotago.AccountID, iotago.SlotIndex], error), validatorPerformancesFunc func(slot iotago.SlotIndex) (*slotstore.Store[iotago.AccountID, *model.ValidatorPerformance], error), latestAppliedEpoch iotago.EpochIndex, apiProvider iotago.APIProvider, @@ -109,7 +109,7 @@ func (t *Tracker) TrackCandidateBlock(block *blocks.Block) { return currentValue } - err = committeeCandidatesStore.Set(block.ProtocolBlock().Header.IssuerID[:], block.ID().Slot().MustBytes()) + err = committeeCandidatesStore.Set(block.ProtocolBlock().Header.IssuerID, block.ID().Slot()) if err != nil { // if there is an error, and we don't register a candidate, then we might eventually create a different commitment t.errHandler(ierrors.Wrapf(err, "error while updating candidate activity for epoch %d", blockEpoch)) @@ -158,25 +158,11 @@ func (t *Tracker) getValidatorCandidates(epoch iotago.EpochIndex) (ds.Set[iotago return nil, ierrors.Wrapf(err, "error while retrieving candidates for epoch %d", epoch) } - var innerErr error - err = candidateStore.IterateKeys(kvstore.EmptyPrefix, func(key kvstore.Key) bool { - accountID, _, err := iotago.AccountIDFromBytes(key) - if err != nil { - innerErr = err - - return false - } - + if err = candidateStore.IterateKeys(kvstore.EmptyPrefix, func(accountID iotago.AccountID) bool { candidates.Add(accountID) return true - }) - - if innerErr != nil { - return nil, ierrors.Wrapf(innerErr, "error while iterating through candidates for epoch %d", epoch) - } - - if err != nil { + }); err != nil { return nil, ierrors.Wrapf(err, "error while retrieving candidates for epoch %d", epoch) } diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go index 9d4d3211d..a7f2fcddf 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go @@ -9,6 +9,7 @@ import ( "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/kvstore/mapdb" + "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" @@ -76,7 +77,14 @@ func (t *TestSuite) InitPerformanceTracker() { rewardsStore.GetEpoch, poolStatsStore, committeeStore, - committeeCandidatesStore.GetEpoch, + func(epoch iotago.EpochIndex) (*kvstore.TypedStore[iotago.AccountID, iotago.SlotIndex], error) { + return kvstore.NewTypedStore(lo.PanicOnErr(committeeCandidatesStore.GetEpoch(epoch)), + iotago.AccountID.Bytes, + iotago.AccountIDFromBytes, + iotago.SlotIndex.Bytes, + iotago.SlotIndexFromBytes, + ), nil + }, performanceFactorFunc, t.latestCommittedEpoch, iotago.SingleVersionProvider(t.api), diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go index f95bcb26a..f0d3bcc18 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go @@ -148,64 +148,68 @@ func (o *SybilProtection) CommitSlot(slot iotago.SlotIndex) (committeeRoot iotag timeProvider := apiForSlot.TimeProvider() currentEpoch := timeProvider.EpochFromSlot(slot) nextEpoch := currentEpoch + 1 - + currentEpochEndSlot := timeProvider.EpochEnd(currentEpoch) maxCommittableAge := apiForSlot.ProtocolParameters().MaxCommittableAge() - // If the committed slot is `maxCommittableSlot` - // away from the end of the epoch, then register a committee for the next epoch. - if timeProvider.EpochEnd(currentEpoch) == slot+maxCommittableAge { - if _, committeeExists := o.seatManager.CommitteeInEpoch(nextEpoch); !committeeExists { - // If the committee for the epoch wasn't set before due to finalization of a slot, - // we promote the current committee to also serve in the next epoch. - committeeAccounts, err := o.reuseCommittee(currentEpoch, nextEpoch) - if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to reuse committee for epoch %d", nextEpoch) + // Determine the committee root. + { + // If the committed slot is `maxCommittableAge` away from the end of the epoch, then register (reuse) + // a committee for the next epoch if it hasn't been selected yet. + if slot+maxCommittableAge == currentEpochEndSlot { + if _, committeeExists := o.seatManager.CommitteeInEpoch(nextEpoch); !committeeExists { + // If the committee for the epoch wasn't set before due to finalization of a slot, + // we promote the current committee to also serve in the next epoch. + committeeAccounts, err := o.reuseCommittee(currentEpoch, nextEpoch) + if err != nil { + return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to reuse committee for epoch %d", nextEpoch) + } + + o.events.CommitteeSelected.Trigger(committeeAccounts, nextEpoch) } - - o.events.CommitteeSelected.Trigger(committeeAccounts, nextEpoch) } - } - if timeProvider.EpochEnd(currentEpoch) == slot { - committee, exists := o.performanceTracker.LoadCommitteeForEpoch(currentEpoch) - if !exists { - return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "committee for a finished epoch %d not found", currentEpoch) + targetCommitteeEpoch := currentEpoch + if slot+maxCommittableAge >= currentEpochEndSlot { + targetCommitteeEpoch = nextEpoch } - err = o.performanceTracker.ApplyEpoch(currentEpoch, committee) + committeeRoot, err = o.committeeRoot(targetCommitteeEpoch) if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to apply epoch %d", currentEpoch) + return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to calculate committee root for epoch %d", targetCommitteeEpoch) } } - var targetCommitteeEpoch iotago.EpochIndex - - if apiForSlot.TimeProvider().EpochEnd(currentEpoch) > slot+maxCommittableAge { - targetCommitteeEpoch = currentEpoch - } else { - targetCommitteeEpoch = nextEpoch - } + // Handle performance tracking for the current epoch. + { + if slot == currentEpochEndSlot { + committee, exists := o.performanceTracker.LoadCommitteeForEpoch(currentEpoch) + if !exists { + return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "committee for a finished epoch %d not found", currentEpoch) + } - committeeRoot, err = o.committeeRoot(targetCommitteeEpoch) - if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to calculate committee root for epoch %d", targetCommitteeEpoch) + err = o.performanceTracker.ApplyEpoch(currentEpoch, committee) + if err != nil { + return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to apply epoch %d", currentEpoch) + } + } } - var targetRewardsEpoch iotago.EpochIndex - if apiForSlot.TimeProvider().EpochEnd(currentEpoch) == slot { - targetRewardsEpoch = nextEpoch - } else { - targetRewardsEpoch = currentEpoch - } + // Determine the rewards root. + { + targetRewardsEpoch := currentEpoch + if slot == currentEpochEndSlot { + targetRewardsEpoch = nextEpoch + } - rewardsRoot, err = o.performanceTracker.RewardsRoot(targetRewardsEpoch) - if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to calculate rewards root for epoch %d", targetRewardsEpoch) + rewardsRoot, err = o.performanceTracker.RewardsRoot(targetRewardsEpoch) + if err != nil { + return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to calculate rewards root for epoch %d", targetRewardsEpoch) + } } o.lastCommittedSlot = slot - return + return committeeRoot, rewardsRoot, nil } func (o *SybilProtection) committeeRoot(targetCommitteeEpoch iotago.EpochIndex) (committeeRoot iotago.Identifier, err error) { diff --git a/pkg/storage/prunable/bucket_manager.go b/pkg/storage/prunable/bucket_manager.go index 7a0a08b9e..78e7920fa 100644 --- a/pkg/storage/prunable/bucket_manager.go +++ b/pkg/storage/prunable/bucket_manager.go @@ -269,10 +269,10 @@ func (b *BucketManager) DeleteBucket(epoch iotago.EpochIndex) (deleted bool) { } // PruneSlots prunes the data of all slots in the range [from, to] in the given epoch. -func (b *BucketManager) PruneSlots(epoch iotago.EpochIndex, pruningRange [2]iotago.SlotIndex) error { +func (b *BucketManager) PruneSlots(epoch iotago.EpochIndex, startPruneRange iotago.SlotIndex, endPruneRange iotago.SlotIndex) error { epochStore := b.getDBInstance(epoch).KVStore() - for slot := pruningRange[0]; slot <= pruningRange[1]; slot++ { + for slot := startPruneRange; slot <= endPruneRange; slot++ { if err := epochStore.DeletePrefix(slot.MustBytes()); err != nil { return ierrors.Wrapf(err, "error while clearing slot %d in bucket for epoch %d", slot, epoch) } diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 595a1c573..daa911be1 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -141,16 +141,16 @@ func (p *Prunable) Flush() { } } -func (p *Prunable) Rollback(targetEpoch iotago.EpochIndex, pruningRange [2]iotago.SlotIndex) error { - if err := p.prunableSlotStore.PruneSlots(targetEpoch, pruningRange); err != nil { - return ierrors.Wrapf(err, "failed to prune slots in range [%d, %d] from target epoch %d", pruningRange[0], pruningRange[1], targetEpoch) +func (p *Prunable) Rollback(targetEpoch iotago.EpochIndex, startPruneRange iotago.SlotIndex, endPruneRange iotago.SlotIndex) error { + if err := p.prunableSlotStore.PruneSlots(targetEpoch, startPruneRange, endPruneRange); err != nil { + return ierrors.Wrapf(err, "failed to prune slots in range [%d, %d] from target epoch %d", startPruneRange, endPruneRange, targetEpoch) } - if err := p.rollbackCommitteesCandidates(targetEpoch, pruningRange[0]-1); err != nil { + if err := p.rollbackCommitteesCandidates(targetEpoch, startPruneRange); err != nil { return ierrors.Wrapf(err, "failed to rollback committee candidates to target epoch %d", targetEpoch) } - lastPrunedCommitteeEpoch, err := p.rollbackCommitteeEpochs(targetEpoch+1, pruningRange[0]-1) + lastPrunedCommitteeEpoch, err := p.rollbackCommitteeEpochs(targetEpoch+1, startPruneRange-1) if err != nil { return ierrors.Wrapf(err, "failed to rollback committee epochs to target epoch %d", targetEpoch) } @@ -228,7 +228,7 @@ func (p *Prunable) shouldRollbackCommittee(epoch iotago.EpochIndex, targetSlot i return true, nil } -func (p *Prunable) rollbackCommitteesCandidates(targetSlotEpoch iotago.EpochIndex, targetSlot iotago.SlotIndex) error { +func (p *Prunable) rollbackCommitteesCandidates(targetSlotEpoch iotago.EpochIndex, deletionStartSlot iotago.SlotIndex) error { candidatesToRollback := make([]iotago.AccountID, 0) candidates, err := p.CommitteeCandidates(targetSlotEpoch) @@ -236,23 +236,8 @@ func (p *Prunable) rollbackCommitteesCandidates(targetSlotEpoch iotago.EpochInde return ierrors.Wrap(err, "failed to get candidates store") } - var innerErr error - if err = candidates.Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, value kvstore.Value) bool { - accountID, _, err := iotago.AccountIDFromBytes(key) - if err != nil { - innerErr = err - - return false - } - - candidacySlot, _, err := iotago.SlotIndexFromBytes(value) - if err != nil { - innerErr = err - - return false - } - - if candidacySlot < targetSlot { + if err = candidates.Iterate(kvstore.EmptyPrefix, func(accountID iotago.AccountID, candidacySlot iotago.SlotIndex) bool { + if candidacySlot >= deletionStartSlot { candidatesToRollback = append(candidatesToRollback, accountID) } @@ -261,13 +246,9 @@ func (p *Prunable) rollbackCommitteesCandidates(targetSlotEpoch iotago.EpochInde return ierrors.Wrap(err, "failed to collect candidates to rollback") } - if innerErr != nil { - return ierrors.Wrap(innerErr, "failed to iterate through candidates") - } - for _, candidateToRollback := range candidatesToRollback { - if err = candidates.Delete(candidateToRollback[:]); err != nil { - return ierrors.Wrapf(innerErr, "failed to rollback candidate %s", candidateToRollback) + if err = candidates.Delete(candidateToRollback); err != nil { + return ierrors.Wrapf(err, "failed to rollback candidate %s", candidateToRollback) } } diff --git a/pkg/storage/prunable/prunable_slot.go b/pkg/storage/prunable/prunable_slot.go index 9297c5977..e2e752218 100644 --- a/pkg/storage/prunable/prunable_slot.go +++ b/pkg/storage/prunable/prunable_slot.go @@ -53,10 +53,20 @@ func (p *Prunable) RootBlocks(slot iotago.SlotIndex) (*slotstore.Store[iotago.Bl ), nil } -func (p *Prunable) CommitteeCandidates(epoch iotago.EpochIndex) (kvstore.KVStore, error) { +func (p *Prunable) CommitteeCandidates(epoch iotago.EpochIndex) (*kvstore.TypedStore[iotago.AccountID, iotago.SlotIndex], error) { // Use the first slot of an epoch to avoid random clashes with other keys. // Candidates belong to an epoch, but we store them here so that they're pruned more quickly and easily without unnecessary key iteration. - return p.prunableSlotStore.Get(epoch, byteutils.ConcatBytes(p.apiProvider.APIForEpoch(epoch).TimeProvider().EpochStart(epoch).MustBytes(), kvstore.Realm{epochPrefixCommitteeCandidates})) + kv, err := p.prunableSlotStore.Get(epoch, byteutils.ConcatBytes(p.apiProvider.APIForEpoch(epoch).TimeProvider().EpochStart(epoch).MustBytes(), kvstore.Realm{epochPrefixCommitteeCandidates})) + if err != nil { + return nil, ierrors.Wrapf(database.ErrEpochPruned, "could not get committee candidates with epoch %d", epoch) + } + + return kvstore.NewTypedStore(kv, + iotago.AccountID.Bytes, + iotago.AccountIDFromBytes, + iotago.SlotIndex.Bytes, + iotago.SlotIndexFromBytes, + ), nil } func (p *Prunable) Mutations(slot iotago.SlotIndex) (kvstore.KVStore, error) { diff --git a/pkg/storage/storage_prunable.go b/pkg/storage/storage_prunable.go index 646fb3a81..08c749c8f 100644 --- a/pkg/storage/storage_prunable.go +++ b/pkg/storage/storage_prunable.go @@ -30,7 +30,7 @@ func (s *Storage) Committee() *epochstore.Store[*account.Accounts] { return s.prunable.Committee() } -func (s *Storage) CommitteeCandidates(epoch iotago.EpochIndex) (kvstore.KVStore, error) { +func (s *Storage) CommitteeCandidates(epoch iotago.EpochIndex) (*kvstore.TypedStore[iotago.AccountID, iotago.SlotIndex], error) { return s.prunable.CommitteeCandidates(epoch) } @@ -134,14 +134,18 @@ func (s *Storage) Rollback(targetSlot iotago.SlotIndex) error { return s.prunable.Rollback(s.pruningRange(targetSlot)) } -func (s *Storage) pruningRange(targetSlot iotago.SlotIndex) (targetEpoch iotago.EpochIndex, pruneRange [2]iotago.SlotIndex) { - epochOfSlot := func(slot iotago.SlotIndex) iotago.EpochIndex { - return s.Settings().APIProvider().APIForSlot(slot).TimeProvider().EpochFromSlot(slot) - } +func (s *Storage) pruningRange(targetSlot iotago.SlotIndex) (targetEpoch iotago.EpochIndex, startSlot iotago.SlotIndex, endSlot iotago.SlotIndex) { + timeProvider := s.Settings().APIProvider().APIForSlot(targetSlot).TimeProvider() + + targetEpoch = timeProvider.EpochFromSlot(targetSlot) + + startSlot = targetSlot + 1 + endSlot = s.Settings().LatestStoredSlot() - if targetEpoch, pruneRange = epochOfSlot(targetSlot), [2]iotago.SlotIndex{targetSlot + 1, s.Settings().LatestStoredSlot()}; epochOfSlot(pruneRange[0]) > targetEpoch { - pruneRange[1] = s.Settings().APIProvider().APIForEpoch(targetEpoch).TimeProvider().EpochEnd(targetEpoch) + // If startSlot is in the next epoch, there's no need to prune a range of slots as the next epoch is going to be pruned on epoch-level anyway. + if timeProvider.EpochFromSlot(startSlot) > targetEpoch { + endSlot = 0 } - return targetEpoch, pruneRange + return targetEpoch, startSlot, endSlot } diff --git a/pkg/tests/booker_test.go b/pkg/tests/booker_test.go index 0d3197dfb..abd3db23b 100644 --- a/pkg/tests/booker_test.go +++ b/pkg/tests/booker_test.go @@ -405,7 +405,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { // Advance both nodes at the edge of slot 1 committability { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{2, 3, 4}, 1, "block2.4", ts.Nodes("node1", "node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{2, 3, 4}, 1, "block2.4", ts.Nodes("node1", "node2"), false, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -424,7 +424,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { ts.Block("block2.tx1"): {"tx1"}, }, node1, node2) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, false) ts.AssertBlocksExist(ts.BlocksWithPrefix("5.0"), true, ts.Nodes()...) } @@ -440,7 +440,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { ts.SplitIntoPartitions(partitions) // Only node2 will commit after issuing this one - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, false) ts.AssertNodeState(ts.Nodes("node1"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -531,7 +531,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { // Sync up the nodes to he same point and check consistency between them. { // Let node1 catch up with commitment 1 - ts.IssueBlocksAtSlots("5.1", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, nil) + ts.IssueBlocksAtSlots("5.1", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, false) ts.AssertNodeState(ts.Nodes("node1", "node2"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -577,7 +577,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { { ts.AssertTransactionsExist(wallet.Transactions("tx1", "tx2", "tx4"), true, node1, node2) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{6, 7, 8, 9, 10}, 5, "5.1", ts.Nodes("node1", "node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{6, 7, 8, 9, 10}, 5, "5.1", ts.Nodes("node1", "node2"), false, false) ts.AssertNodeState(ts.Nodes("node1", "node2"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -662,7 +662,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { // Advance both nodes at the edge of slot 1 committability { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{2, 3, 4}, 1, "Genesis", ts.Nodes("node1", "node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{2, 3, 4}, 1, "Genesis", ts.Nodes("node1", "node2"), false, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -674,7 +674,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { ts.SetCurrentSlot(5) ts.IssueValidationBlockWithHeaderOptions("", node1, mock.WithSlotCommitment(genesisCommitment), mock.WithStrongParents(ts.BlockIDsWithPrefix("4.0")...)) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, false) ts.AssertBlocksExist(ts.BlocksWithPrefix("5.0"), true, ts.Nodes()...) } @@ -689,7 +689,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { ts.SplitIntoPartitions(partitions) // Only node2 will commit after issuing this one - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, false) ts.AssertNodeState(ts.Nodes("node1"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -731,7 +731,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { // Sync up the nodes to he same point and check consistency between them. { // Let node1 catch up with commitment 1 - ts.IssueBlocksAtSlots("5.1", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, nil) + ts.IssueBlocksAtSlots("5.1", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, false) ts.AssertNodeState(ts.Nodes("node1", "node2"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -763,7 +763,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { ts.AssertTransactionsExist(wallet.Transactions("tx1", "tx2"), true, node1, node2) ts.AssertTransactionsInCachePending(wallet.Transactions("tx1", "tx2"), true, node1, node2) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{6, 7, 8, 9, 10}, 5, "5.1", ts.Nodes("node1", "node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{6, 7, 8, 9, 10}, 5, "5.1", ts.Nodes("node1", "node2"), false, false) ts.AssertNodeState(ts.Nodes("node1", "node2"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), diff --git a/pkg/tests/committee_rotation_test.go b/pkg/tests/committee_rotation_test.go index e8bd33b50..a516c89e3 100644 --- a/pkg/tests/committee_rotation_test.go +++ b/pkg/tests/committee_rotation_test.go @@ -78,12 +78,12 @@ func Test_TopStakersRotation(t *testing.T) { // Select committee for epoch 1 and test candidacy announcements at different times. { - ts.IssueBlocksAtSlots("wave-1:", []iotago.SlotIndex{1, 2, 3, 4}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-1:", []iotago.SlotIndex{1, 2, 3, 4}, 4, "Genesis", ts.Nodes(), true, false) ts.IssueCandidacyAnnouncementInSlot("node1-candidacy:1", 4, "wave-1:4.3", ts.Wallet("node1")) ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:1", 5, "node1-candidacy:1", ts.Wallet("node4")) - ts.IssueBlocksAtSlots("wave-2:", []iotago.SlotIndex{5, 6, 7, 8, 9}, 4, "node4-candidacy:1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-2:", []iotago.SlotIndex{5, 6, 7, 8, 9}, 4, "node4-candidacy:1", ts.Nodes(), true, false) ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:2", 9, "wave-2:9.3", ts.Wallet("node4")) ts.IssueCandidacyAnnouncementInSlot("node5-candidacy:1", 9, "node4-candidacy:2", ts.Wallet("node5")) @@ -91,7 +91,7 @@ func Test_TopStakersRotation(t *testing.T) { // This candidacy should be considered as it's announced at the last possible slot. ts.IssueCandidacyAnnouncementInSlot("node6-candidacy:1", 10, "node5-candidacy:1", ts.Wallet("node6")) - ts.IssueBlocksAtSlots("wave-3:", []iotago.SlotIndex{10}, 4, "node6-candidacy:1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-3:", []iotago.SlotIndex{10}, 4, "node6-candidacy:1", ts.Nodes(), true, false) // Those candidacies should not be considered as they're issued after EpochNearingThreshold (slot 10). ts.IssueCandidacyAnnouncementInSlot("node2-candidacy:1", 11, "wave-3:10.3", ts.Wallet("node2")) @@ -107,7 +107,7 @@ func Test_TopStakersRotation(t *testing.T) { ts.Node("node6").Validator.AccountID, }, ts.Nodes()...) - ts.IssueBlocksAtSlots("wave-4:", []iotago.SlotIndex{11, 12, 13, 14, 15, 16, 17}, 4, "node5-candidacy:2", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-4:", []iotago.SlotIndex{11, 12, 13, 14, 15, 16, 17}, 4, "node5-candidacy:2", ts.Nodes(), true, false) ts.AssertLatestFinalizedSlot(14, ts.Nodes()...) ts.AssertSybilProtectionCommittee(1, []iotago.AccountID{ @@ -119,7 +119,7 @@ func Test_TopStakersRotation(t *testing.T) { // Do not announce new candidacies for epoch 2 but finalize slots. The committee should be the reused. { - ts.IssueBlocksAtSlots("wave-5:", []iotago.SlotIndex{18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, 4, "wave-4:17.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-5:", []iotago.SlotIndex{18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, 4, "wave-4:17.3", ts.Nodes(), true, false) ts.AssertSybilProtectionCandidates(1, []iotago.AccountID{}, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(28, ts.Nodes()...) @@ -134,13 +134,13 @@ func Test_TopStakersRotation(t *testing.T) { // Do not finalize slots in time for epoch 3. The committee should be the reused. Even though there are candidates. { // Issue blocks to remove the inactive committee members. - ts.IssueBlocksAtSlots("wave-6:", []iotago.SlotIndex{31, 32}, 4, "wave-5:30.3", ts.Nodes("node5", "node7"), false, nil) + ts.IssueBlocksAtSlots("wave-6:", []iotago.SlotIndex{31, 32}, 4, "wave-5:30.3", ts.Nodes("node5", "node7"), false, false) ts.AssertLatestCommitmentSlotIndex(30, ts.Nodes()...) ts.IssueCandidacyAnnouncementInSlot("node6-candidacy:2", 33, "wave-6:32.3", ts.Wallet("node6")) // Issue the rest of the epoch just before we reach epoch end - maxCommittableAge. - ts.IssueBlocksAtSlots("wave-7:", []iotago.SlotIndex{33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45}, 4, "node6-candidacy:2", ts.Nodes("node5"), true, nil) + ts.IssueBlocksAtSlots("wave-7:", []iotago.SlotIndex{33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45}, 4, "node6-candidacy:2", ts.Nodes("node5"), true, false) ts.AssertLatestCommitmentSlotIndex(43, ts.Nodes()...) // Even though we have a candidate, the committee should be reused as we did not finalize at epochNearingThreshold before epoch end - maxCommittableAge was committed @@ -157,11 +157,11 @@ func Test_TopStakersRotation(t *testing.T) { // Rotate committee to smaller committee due to too few candidates available. { - ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{46, 47, 48, 49, 50, 51, 52, 53, 54, 55}, 4, "wave-7:45.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{46, 47, 48, 49, 50, 51, 52, 53, 54, 55}, 4, "wave-7:45.3", ts.Nodes(), true, false) ts.IssueCandidacyAnnouncementInSlot("node3-candidacy:2", 56, "wave-8:55.3", ts.Wallet("node3")) - ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{56, 57, 58, 59, 60, 61}, 4, "node3-candidacy:2", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{56, 57, 58, 59, 60, 61}, 4, "node3-candidacy:2", ts.Nodes(), true, false) ts.AssertLatestCommitmentSlotIndex(59, ts.Nodes()...) ts.AssertLatestFinalizedSlot(58, ts.Nodes()...) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 884b8d823..8cbd6cb42 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -59,7 +59,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { // Need to issue to slot 52 so that all other nodes can warp sync up to slot 49 and then commit slot 50 themselves. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{51, 52}, 2, "block0", ts.Nodes("node0"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{51, 52}, 2, "block0", ts.Nodes("node0"), true, false) ts.AssertLatestCommitmentSlotIndex(50, ts.Nodes()...) ts.AssertEqualStoredCommitmentAtIndex(50, ts.Nodes()...) @@ -68,7 +68,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { // Continue issuing on all nodes for a few slots. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", ts.Nodes(), true, false) ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(55, ts.Nodes()...) @@ -87,7 +87,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { // Continue issuing on all nodes for a few slots. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{58, 59}, 3, "57.2", ts.Nodes("node0", "node1", "node2"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{58, 59}, 3, "57.2", ts.Nodes("node0", "node1", "node2"), true, false) ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("59.0"), true, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(57, ts.Nodes()...) @@ -129,7 +129,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { // Issue up to slot 10, committing slot 8. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3, "Genesis", ts.Nodes(), true, false) ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("10.0"), true, ts.Nodes()...) ts.AssertEqualStoredCommitmentAtIndex(8, ts.Nodes()...) @@ -169,7 +169,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { // Need to issue to slot 22 so that all other nodes can warp sync up to slot 19 and then commit slot 20 themselves. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{21, 22}, 2, "block0", ts.Nodes("node0-restarted"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{21, 22}, 2, "block0", ts.Nodes("node0-restarted"), true, false) ts.AssertEqualStoredCommitmentAtIndex(20, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(20, ts.Nodes()...) @@ -181,7 +181,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { // are not used again. ts.SetAutomaticTransactionIssuingCounters(node2.Partition, 24) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{23, 24, 25}, 3, "22.1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{23, 24, 25}, 3, "22.1", ts.Nodes(), true, false) ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("25.0"), true, ts.Nodes()...) ts.AssertEqualStoredCommitmentAtIndex(23, ts.Nodes()...) @@ -223,7 +223,7 @@ func TestLossOfAcceptanceWithRestartFromDisk(t *testing.T) { // Issue up to slot 10, committing slot 8. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3, "Genesis", ts.Nodes(), true, false) ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("10.0"), true, ts.Nodes()...) ts.AssertEqualStoredCommitmentAtIndex(8, ts.Nodes()...) @@ -259,7 +259,7 @@ func TestLossOfAcceptanceWithRestartFromDisk(t *testing.T) { // Need to issue to slot 22 so that all other nodes can warp sync up to slot 19 and then commit slot 20 themselves. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{21, 22}, 2, "block0", ts.Nodes("node0-restarted"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{21, 22}, 2, "block0", ts.Nodes("node0-restarted"), true, false) ts.AssertEqualStoredCommitmentAtIndex(20, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(20, ts.Nodes()...) @@ -271,7 +271,7 @@ func TestLossOfAcceptanceWithRestartFromDisk(t *testing.T) { // are not used again. ts.SetAutomaticTransactionIssuingCounters(node2.Partition, 24) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{23, 24, 25}, 3, "22.1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{23, 24, 25}, 3, "22.1", ts.Nodes(), true, false) ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("25.0"), true, ts.Nodes()...) ts.AssertEqualStoredCommitmentAtIndex(23, ts.Nodes()...) diff --git a/pkg/tests/protocol_engine_rollback_test.go b/pkg/tests/protocol_engine_rollback_test.go index 0de8dc8f6..60a491111 100644 --- a/pkg/tests/protocol_engine_rollback_test.go +++ b/pkg/tests/protocol_engine_rollback_test.go @@ -137,7 +137,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { // Issue up to slot 11 - just before committee selection for the next epoch. // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -163,7 +163,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { } { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(13), @@ -330,7 +330,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { // Issue up to slot 11 - just before committee selection for the next epoch. // Committee will be reused when slot 10 is finalized or slot 12 is committed, whichever happens first. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -363,7 +363,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { } { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -530,7 +530,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { // Issue up to slot 11 - just before committee selection for the next epoch. // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -563,7 +563,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { } { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16, 17, 18, 19}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16, 17, 18, 19}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -730,7 +730,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T // Issue up to slot 11 - just before committee selection for the next epoch. // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -763,7 +763,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T } { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), diff --git a/pkg/tests/protocol_engine_switching_test.go b/pkg/tests/protocol_engine_switching_test.go index 45cd86079..81fcd675a 100644 --- a/pkg/tests/protocol_engine_switching_test.go +++ b/pkg/tests/protocol_engine_switching_test.go @@ -16,9 +16,11 @@ import ( "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" + "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization/slotnotarization" "github.com/iotaledger/iota-core/pkg/protocol/engine/syncmanager/trivialsyncmanager" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager" mock2 "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/mock" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/topstakers" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1" "github.com/iotaledger/iota-core/pkg/storage" "github.com/iotaledger/iota-core/pkg/testsuite" @@ -142,7 +144,6 @@ func TestProtocol_EngineSwitching(t *testing.T) { } // Verify that nodes have the expected states. - { genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost @@ -164,7 +165,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { // Issue up to slot 13 in P0 (main partition with all nodes) and verify that the nodes have the expected states. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(10), @@ -219,7 +220,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { // Issue blocks in partition 1. { - ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{14, 15, 16, 17, 18, 19, 20}, 4, "P0:13.3", nodesP1[:len(nodesP1)-1], true, nil) + ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{14, 15, 16, 17, 18, 19, 20}, 4, "P0:13.3", nodesP1[:len(nodesP1)-1], true, false) ts.AssertNodeState(nodesP1, testsuite.WithLatestFinalizedSlot(17), @@ -273,7 +274,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { // Issue blocks in partition 2. { - ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{14, 15, 16, 17, 18, 19, 20}, 4, "P0:13.3", nodesP2[:len(nodesP2)-1], true, nil) + ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{14, 15, 16, 17, 18, 19, 20}, 4, "P0:13.3", nodesP2[:len(nodesP2)-1], true, false) ts.AssertNodeState(nodesP2, testsuite.WithLatestFinalizedSlot(10), @@ -380,3 +381,269 @@ func TestProtocol_EngineSwitching(t *testing.T) { ts.AssertEqualStoredCommitmentAtIndex(expectedCommittedSlotAfterPartitionMerge, ts.Nodes()...) } + +func TestProtocol_EngineSwitching_CommitteeRotation(t *testing.T) { + ts := testsuite.NewTestSuite(t, + testsuite.WithWaitFor(30*time.Second), + + testsuite.WithProtocolParametersOptions( + iotago.WithTimeProviderOptions( + 0, + testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), + testsuite.DefaultSlotDurationInSeconds, + 4, // 16 slots per epoch + ), + iotago.WithLivenessOptions( + 10, + 10, + 3, + 5, + 10, + ), + ), + ) + defer ts.Shutdown() + + node0 := ts.AddValidatorNode("node0") + node1 := ts.AddValidatorNode("node1") + node2 := ts.AddValidatorNode("node2") + node3 := ts.AddValidatorNode("node3") + + const expectedCommittedSlotAfterPartitionMerge = 19 + nodesP1 := []*mock.Node{node0, node1, node2} + nodesP2 := []*mock.Node{node3} + + nodeOpts := []options.Option[protocol.Protocol]{ + protocol.WithNotarizationProvider( + slotnotarization.NewProvider(), + ), + protocol.WithSybilProtectionProvider( + sybilprotectionv1.NewProvider( + sybilprotectionv1.WithSeatManagerProvider( + topstakers.NewProvider( + // We need to make sure that inactive nodes are evicted from the committee to continue acceptance. + topstakers.WithActivityWindow(10 * time.Second), + ), + ), + ), + ), + protocol.WithSyncManagerProvider( + trivialsyncmanager.NewProvider( + trivialsyncmanager.WithBootstrappedFunc(func(e *engine.Engine) bool { + return e.Storage.Settings().LatestCommitment().Slot() >= expectedCommittedSlotAfterPartitionMerge && e.Notarization.IsBootstrapped() + }), + ), + ), + protocol.WithStorageOptions( + storage.WithPruningDelay(20), // make sure nodes don't prune + ), + } + + ts.Run(false, map[string][]options.Option[protocol.Protocol]{ + "node0": nodeOpts, + "node1": nodeOpts, + "node2": nodeOpts, + "node3": nodeOpts, + }) + + // Verify that nodes have the expected states after startup. + { + genesisCommitment := iotago.NewEmptyCommitment(ts.API) + genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost + ts.AssertNodeState(ts.Nodes(), + testsuite.WithSnapshotImported(true), + testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), + testsuite.WithLatestCommitment(genesisCommitment), + testsuite.WithLatestFinalizedSlot(0), + testsuite.WithChainID(genesisCommitment.MustID()), + testsuite.WithStorageCommitments([]*iotago.Commitment{genesisCommitment}), + + testsuite.WithSybilProtectionCommittee(0, ts.AccountsOfNodes("node0", "node1", "node2", "node3")), + testsuite.WithSybilProtectionOnlineCommittee(ts.SeatOfNodes(0, "node0", "node1", "node2", "node3")...), + testsuite.WithEvictedSlot(0), + testsuite.WithActiveRootBlocks(ts.Blocks("Genesis")), + testsuite.WithStorageRootBlocks(ts.Blocks("Genesis")), + ) + } + + // Issue up to slot 8 in P0 (main partition with all nodes) and verify that the nodes have the expected states. + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2}, 4, "Genesis", ts.Nodes(), true, true) + + // Register candidates (node1, node2) in P0 for epoch 1. + { + ts.IssueCandidacyAnnouncementInSlot("P0:node1-candidacy:1", 3, "P0:2.3", ts.Wallet("node1")) + ts.IssueCandidacyAnnouncementInSlot("P0:node2-candidacy:1", 3, "P0:node1-candidacy:1", ts.Wallet("node2")) + } + + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{3, 4, 5, 6, 7, 8}, 4, "P0:node2-candidacy:1", ts.Nodes(), true, true) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(4), + testsuite.WithLatestCommitmentSlotIndex(5), + testsuite.WithEqualStoredCommitmentAtIndex(5), + testsuite.WithLatestCommitmentCumulativeWeight(4), // 4 * slot 5 + testsuite.WithSybilProtectionOnlineCommittee(ts.SeatOfNodes(5, "node0", "node1", "node2", "node3")...), + testsuite.WithSybilProtectionCandidates(0, ts.AccountsOfNodes("node1", "node2")), + testsuite.WithEvictedSlot(5), + ) + + ts.AssertAttestationsForSlot(5, ts.Blocks("P0:5.3-node0", "P0:5.3-node1", "P0:5.3-node2", "P0:5.3-node3"), ts.Nodes()...) + + ts.AssertStrongTips(ts.Blocks("P0:8.3-node0", "P0:8.3-node1", "P0:8.3-node2", "P0:8.3-node3"), ts.Nodes()...) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + // Split into partitions P1 and P2. + ts.SplitIntoPartitions(map[string][]*mock.Node{ + "P1": nodesP1, + "P2": nodesP2, + }) + + // Issue blocks in partition 1. + { + ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, 4, "P0:8.3", nodesP1, true, true) + + ts.AssertNodeState(nodesP1, + testsuite.WithLatestFinalizedSlot(16), + testsuite.WithLatestCommitmentSlotIndex(17), + testsuite.WithEqualStoredCommitmentAtIndex(17), + testsuite.WithLatestCommitmentCumulativeWeight(43), // 4 + see attestation assertions below for how to compute + testsuite.WithSybilProtectionOnlineCommittee(ts.SeatOfNodes(17, "node1", "node2")...), + testsuite.WithEvictedSlot(17), + ) + + // Assert committee in epoch 1. + ts.AssertSybilProtectionCandidates(0, ts.AccountsOfNodes("node1", "node2"), nodesP1...) + ts.AssertSybilProtectionCommittee(1, ts.AccountsOfNodes("node1", "node2"), nodesP1...) // we selected a new committee for epoch 1 + + // Assert committee in epoch 2. + ts.AssertSybilProtectionCandidates(1, iotago.AccountIDs{}, nodesP1...) + + ts.AssertAttestationsForSlot(6, ts.Blocks("P0:6.3-node0", "P0:6.3-node1", "P0:6.3-node2", "P0:6.3-node3"), nodesP1...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(7, ts.Blocks("P0:7.3-node0", "P0:7.3-node1", "P0:7.3-node2", "P0:7.3-node3"), nodesP1...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(8, ts.Blocks("P0:8.3-node0", "P0:8.3-node1", "P0:8.3-node2", "P0:8.3-node3"), nodesP1...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(9, ts.Blocks("P1:9.3-node0", "P1:9.3-node1", "P1:9.3-node2", "P0:8.3-node3"), nodesP1...) // Committee in epoch 1 is all nodes; and we carry attestations of others because of window + ts.AssertAttestationsForSlot(10, ts.Blocks("P1:10.3-node0", "P1:10.3-node1", "P1:10.3-node2"), nodesP1...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(11, ts.Blocks("P1:11.3-node0", "P1:11.3-node1", "P1:11.3-node2"), nodesP1...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(12, ts.Blocks("P1:12.3-node0", "P1:12.3-node1", "P1:12.3-node2"), nodesP1...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(13, ts.Blocks("P1:13.3-node0", "P1:13.3-node1", "P1:13.3-node2"), nodesP1...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(14, ts.Blocks("P1:14.3-node0", "P1:14.3-node1", "P1:14.3-node2"), nodesP1...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(15, ts.Blocks("P1:15.3-node0", "P1:15.3-node1", "P1:15.3-node2"), nodesP1...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(16, ts.Blocks("P1:15.3-node0", "P1:16.3-node1", "P1:16.3-node2"), nodesP1...) // We're in Epoch 2 (only node1, node2) but we carry attestations of others because of window + ts.AssertAttestationsForSlot(17, ts.Blocks("P1:17.3-node1", "P1:17.3-node2"), nodesP1...) // Committee in epoch 2 is only node1, node2 + + ts.AssertStrongTips(ts.Blocks("P1:20.3-node0", "P1:20.3-node1", "P1:20.3-node2"), nodesP1...) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), true, nodesP1...) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), false, nodesP2...) + } + + // Issue blocks in partition 2. + { + ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, 4, "P0:8.3", nodesP2, true, true) + + ts.AssertNodeState(nodesP2, + testsuite.WithLatestFinalizedSlot(4), + testsuite.WithLatestCommitmentSlotIndex(17), + testsuite.WithEqualStoredCommitmentAtIndex(17), + // testsuite.WithLatestCommitmentCumulativeWeight(43), // 4 + see attestation assertions below for how to compute + testsuite.WithSybilProtectionOnlineCommittee(ts.SeatOfNodes(17, "node3")...), + testsuite.WithEvictedSlot(17), + ) + + // Assert committee in epoch 1. + ts.AssertSybilProtectionCandidates(0, ts.AccountsOfNodes("node1", "node2"), nodesP2...) + ts.AssertSybilProtectionCommittee(1, ts.AccountsOfNodes("node0", "node1", "node2", "node3"), nodesP2...) // committee was reused due to no finalization at epochNearingThreshold + + // Assert committee in epoch 2. + ts.AssertSybilProtectionCandidates(1, iotago.AccountIDs{}, nodesP2...) + + ts.AssertAttestationsForSlot(6, ts.Blocks("P0:6.3-node0", "P0:6.3-node1", "P0:6.3-node2", "P0:6.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(7, ts.Blocks("P0:7.3-node0", "P0:7.3-node1", "P0:7.3-node2", "P0:7.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(8, ts.Blocks("P0:8.3-node0", "P0:8.3-node1", "P0:8.3-node2", "P0:8.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(9, ts.Blocks("P0:8.3-node0", "P0:8.3-node1", "P0:8.3-node2", "P2:9.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes; and we carry attestations of others because of window + ts.AssertAttestationsForSlot(10, ts.Blocks("P2:10.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes; only node3 is in P2 + ts.AssertAttestationsForSlot(11, ts.Blocks("P2:11.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes; only node3 is in P2 + ts.AssertAttestationsForSlot(12, ts.Blocks("P2:12.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes; only node3 is in P2 + ts.AssertAttestationsForSlot(13, ts.Blocks("P2:13.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes; only node3 is in P2 + ts.AssertAttestationsForSlot(14, ts.Blocks("P2:14.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes; only node3 is in P2 + ts.AssertAttestationsForSlot(15, ts.Blocks("P2:15.3-node3"), nodesP2...) // Committee in epoch 1 is all nodes; only node3 is in P2 + ts.AssertAttestationsForSlot(16, ts.Blocks("P2:16.3-node3"), nodesP2...) // Committee in epoch 2 (reused) is all nodes; only node3 is in P2 + ts.AssertAttestationsForSlot(17, ts.Blocks("P2:17.3-node3"), nodesP2...) // Committee in epoch 2 (reused) is all nodes; only node3 is in P2 + + ts.AssertStrongTips(ts.Blocks("P2:20.3-node3"), nodesP2...) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), true, nodesP2...) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), false, nodesP1...) + } + + // Merge the partitions + { + ts.MergePartitionsToMain() + fmt.Println("\n=========================\nMerged network partitions\n=========================") + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + ctxP1, ctxP1Cancel := context.WithCancel(ctx) + ctxP2, ctxP2Cancel := context.WithCancel(ctx) + + wg := &sync.WaitGroup{} + + // Issue blocks on both partitions after merging the networks. + node0.Validator.IssueActivity(ctxP1, wg, 21, node0) + node1.Validator.IssueActivity(ctxP1, wg, 21, node1) + node2.Validator.IssueActivity(ctxP1, wg, 21, node2) + + node3.Validator.IssueActivity(ctxP2, wg, 21, node3) + + // P1 finalized until slot 16. We do not expect any forks here because our CW is higher than the other partition's. + ts.AssertForkDetectedCount(0, nodesP1...) + // P1's chain is heavier, they should not consider switching the chain. + ts.AssertCandidateEngineActivatedCount(0, nodesP1...) + ctxP2Cancel() // we can stop issuing on P2. + + // Nodes from P2 should switch the chain. + ts.AssertForkDetectedCount(1, nodesP2...) + ts.AssertCandidateEngineActivatedCount(1, nodesP2...) + + // Here we need to let enough time pass for the nodes to sync up the candidate engines and switch them + ts.AssertMainEngineSwitchedCount(1, nodesP2...) + + ctxP1Cancel() + wg.Wait() + } + + // Make sure that nodes that switched their engine still have blocks with prefix P0 from before the fork. + // Those nodes should also have all the blocks from the target fork P1 and should not have blocks from P2. + // This is to make sure that the storage was copied correctly during engine switching. + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), true, ts.Nodes()...) // not all blocks of slot 19 are available on node3 (buffer issue?) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), false, ts.Nodes()...) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithEqualStoredCommitmentAtIndex(expectedCommittedSlotAfterPartitionMerge), + ) + + // Assert committee in epoch 1. + ts.AssertSybilProtectionCandidates(0, ts.AccountsOfNodes("node1", "node2"), ts.Nodes()...) + ts.AssertSybilProtectionCommittee(1, ts.AccountsOfNodes("node1", "node2"), ts.Nodes()...) // we selected a new committee for epoch 1 + + // Assert committee in epoch 2. + ts.AssertSybilProtectionCandidates(1, iotago.AccountIDs{}, ts.Nodes()...) + + ts.AssertAttestationsForSlot(6, ts.Blocks("P0:6.3-node0", "P0:6.3-node1", "P0:6.3-node2", "P0:6.3-node3"), ts.Nodes()...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(7, ts.Blocks("P0:7.3-node0", "P0:7.3-node1", "P0:7.3-node2", "P0:7.3-node3"), ts.Nodes()...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(8, ts.Blocks("P0:8.3-node0", "P0:8.3-node1", "P0:8.3-node2", "P0:8.3-node3"), ts.Nodes()...) // Committee in epoch 1 is all nodes + ts.AssertAttestationsForSlot(9, ts.Blocks("P1:9.3-node0", "P1:9.3-node1", "P1:9.3-node2", "P0:8.3-node3"), ts.Nodes()...) // Committee in epoch 1 is all nodes; and we carry attestations of others because of window + ts.AssertAttestationsForSlot(10, ts.Blocks("P1:10.3-node0", "P1:10.3-node1", "P1:10.3-node2"), ts.Nodes()...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(11, ts.Blocks("P1:11.3-node0", "P1:11.3-node1", "P1:11.3-node2"), ts.Nodes()...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(12, ts.Blocks("P1:12.3-node0", "P1:12.3-node1", "P1:12.3-node2"), ts.Nodes()...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(13, ts.Blocks("P1:13.3-node0", "P1:13.3-node1", "P1:13.3-node2"), ts.Nodes()...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(14, ts.Blocks("P1:14.3-node0", "P1:14.3-node1", "P1:14.3-node2"), ts.Nodes()...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(15, ts.Blocks("P1:15.3-node0", "P1:15.3-node1", "P1:15.3-node2"), ts.Nodes()...) // Committee in epoch 1 is all nodes; node3 is in P2 + ts.AssertAttestationsForSlot(16, ts.Blocks("P1:15.3-node0", "P1:16.3-node1", "P1:16.3-node2"), ts.Nodes()...) // We're in Epoch 2 (only node1, node2) but we carry attestations of others because of window + ts.AssertAttestationsForSlot(17, ts.Blocks("P1:17.3-node1", "P1:17.3-node2"), ts.Nodes()...) // Committee in epoch 2 is only node1, node2 +} diff --git a/pkg/tests/protocol_startup_test.go b/pkg/tests/protocol_startup_test.go index b05178718..c2596c178 100644 --- a/pkg/tests/protocol_startup_test.go +++ b/pkg/tests/protocol_startup_test.go @@ -84,7 +84,7 @@ func Test_BookInCommittedSlot(t *testing.T) { // Epoch 0: issue 4 rows per slot. { - ts.IssueBlocksAtEpoch("", 0, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtEpoch("", 0, 4, "Genesis", ts.Nodes(), true, false) ts.AssertBlocksExist(ts.BlocksWithPrefixes("1", "2", "3", "4", "5", "6", "7"), true, ts.Nodes()...) @@ -198,7 +198,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { // Epoch 0: issue 4 rows per slot. { - ts.IssueBlocksAtEpoch("", 0, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtEpoch("", 0, 4, "Genesis", ts.Nodes(), true, false) ts.AssertBlocksExist(ts.BlocksWithPrefixes("1", "2", "3", "4", "5", "6", "7"), true, ts.Nodes()...) @@ -244,7 +244,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { // Epoch 1: skip slot 10 and issue 6 rows per slot { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{8, 9, 11, 12, 13}, 6, "7.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{8, 9, 11, 12, 13}, 6, "7.3", ts.Nodes(), true, false) ts.AssertBlocksExist(ts.BlocksWithPrefixes("8", "9", "11", "12", "13"), true, ts.Nodes()...) @@ -377,7 +377,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { } // Only issue on nodes that have the latest state in memory. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{14, 15}, 6, "13.5", ts.Nodes("nodeA", "nodeB"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{14, 15}, 6, "13.5", ts.Nodes("nodeA", "nodeB"), true, false) for _, slot := range []iotago.SlotIndex{12, 13} { aliases := lo.Map([]string{"nodeA", "nodeB"}, func(s string) string { @@ -397,11 +397,11 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { // Epoch 2-4 { // Issue on all nodes except nodeD as its account is not yet known. - ts.IssueBlocksAtEpoch("", 2, 4, "15.5", ts.Nodes(), true, nil) + ts.IssueBlocksAtEpoch("", 2, 4, "15.5", ts.Nodes(), true, false) // Issue on all nodes. - ts.IssueBlocksAtEpoch("", 3, 4, "23.3", ts.Nodes(), true, nil) - ts.IssueBlocksAtEpoch("", 4, 4, "31.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtEpoch("", 3, 4, "23.3", ts.Nodes(), true, false) + ts.IssueBlocksAtEpoch("", 4, 4, "31.3", ts.Nodes(), true, false) var expectedActiveRootBlocks []*blocks.Block for _, slot := range []iotago.SlotIndex{35, 36, 37} { diff --git a/pkg/tests/upgrade_signaling_test.go b/pkg/tests/upgrade_signaling_test.go index 8a94ef17a..0a627d5e1 100644 --- a/pkg/tests/upgrade_signaling_test.go +++ b/pkg/tests/upgrade_signaling_test.go @@ -173,7 +173,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.Node("nodeA").SetProtocolParametersHash(hash2) ts.Node("nodeD").SetHighestSupportedVersion(3) ts.Node("nodeD").SetProtocolParametersHash(hash2) - ts.IssueBlocksAtEpoch("", 0, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtEpoch("", 0, 4, "Genesis", ts.Nodes(), true, false) // check account data before all nodes set the current version ts.AssertAccountData(&accounts.AccountData{ @@ -208,7 +208,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.Node("nodeD").SetHighestSupportedVersion(5) ts.Node("nodeD").SetProtocolParametersHash(hash2) - ts.IssueBlocksAtEpoch("", 1, 4, "7.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtEpoch("", 1, 4, "7.3", ts.Nodes(), true, false) ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, @@ -229,20 +229,20 @@ func Test_Upgrade_Signaling(t *testing.T) { require.Contains(t, pastAccounts, ts.Node("nodeA").Validator.AccountID) require.Equal(t, model.VersionAndHash{Version: 4, Hash: hash2}, pastAccounts[ts.Node("nodeA").Validator.AccountID].LatestSupportedProtocolVersionAndHash) - ts.IssueBlocksAtEpoch("", 2, 4, "15.3", ts.Nodes(), true, nil) - ts.IssueBlocksAtEpoch("", 3, 4, "23.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtEpoch("", 2, 4, "15.3", ts.Nodes(), true, false) + ts.IssueBlocksAtEpoch("", 3, 4, "23.3", ts.Nodes(), true, false) // Epoch 5: revoke vote of nodeA in last slot of epoch. - ts.IssueBlocksAtSlots("", ts.SlotsForEpoch(4)[:ts.API.TimeProvider().EpochDurationSlots()-1], 4, "31.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", ts.SlotsForEpoch(4)[:ts.API.TimeProvider().EpochDurationSlots()-1], 4, "31.3", ts.Nodes(), true, false) ts.Node("nodeA").SetProtocolParametersHash(iotago.Identifier{}) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{39}, 4, "38.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{39}, 4, "38.3", ts.Nodes(), true, false) ts.Node("nodeA").SetProtocolParametersHash(hash1) // Epoch 6: issue half before restarting and half after restarting. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{40, 41, 42, 43}, 4, "39.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{40, 41, 42, 43}, 4, "39.3", ts.Nodes(), true, false) { var expectedRootBlocks []*blocks.Block @@ -292,14 +292,14 @@ func Test_Upgrade_Signaling(t *testing.T) { } // Can only continue to issue on nodeA, nodeB, nodeC, nodeD, nodeF. nodeE and nodeG were just restarted and don't have the latest unaccepted state. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{44}, 4, "43.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{44}, 4, "43.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF"), true, false) // TODO: would be great to dynamically add accounts for later nodes. // Can't issue on nodeG as its account is not known. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{45, 46, 47}, 4, "44.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{45, 46, 47}, 4, "44.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, false) - ts.IssueBlocksAtEpoch("", 6, 4, "47.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, nil) - ts.IssueBlocksAtEpoch("", 7, 4, "55.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, nil) + ts.IssueBlocksAtEpoch("", 6, 4, "47.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, false) + ts.IssueBlocksAtEpoch("", 7, 4, "55.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, false) // Restart node (and add protocol parameters) and add another node from snapshot (also with protocol parameters already set). { @@ -410,8 +410,8 @@ func Test_Upgrade_Signaling(t *testing.T) { // Check that issuing still produces the same commitments on the nodes that upgraded. The nodes that did not upgrade // should not be able to issue and process blocks with the new version. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{64, 65}, 4, "63.3", ts.Nodes("nodeB", "nodeC"), false, nil) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{66, 67, 68, 69, 70, 71}, 4, "65.3", ts.Nodes("nodeB", "nodeC"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{64, 65}, 4, "63.3", ts.Nodes("nodeB", "nodeC"), false, false) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{66, 67, 68, 69, 70, 71}, 4, "65.3", ts.Nodes("nodeB", "nodeC"), true, false) // Nodes that did not set up the new protocol parameters are not able to process blocks with the new version. ts.AssertNodeState(ts.Nodes("nodeA", "nodeD", "nodeF", "nodeG"), diff --git a/pkg/testsuite/attestations.go b/pkg/testsuite/attestations.go index a9d258a48..4b4670252 100644 --- a/pkg/testsuite/attestations.go +++ b/pkg/testsuite/attestations.go @@ -40,14 +40,14 @@ func (t *TestSuite) AssertAttestationsForSlot(slot iotago.SlotIndex, blocks []*b return ierrors.Wrapf(err, "AssertAttestationsForSlot: %s: error iterating over attestation tree", node.Name) } - if len(expectedAttestations) != len(storedAttestations) { - return ierrors.Errorf("AssertAttestationsForSlot: %s: expected %d attestation(s), got %d", node.Name, len(expectedAttestations), len(storedAttestations)) - } - if !assert.ElementsMatch(t.fakeTesting, expectedAttestations, storedAttestations) { return ierrors.Errorf("AssertAttestationsForSlot: %s: expected attestation(s) %s, got %s", node.Name, expectedAttestations, storedAttestations) } + if len(expectedAttestations) != len(storedAttestations) { + return ierrors.Errorf("AssertAttestationsForSlot: %s: expected %d attestation(s), got %d", node.Name, len(expectedAttestations), len(storedAttestations)) + } + return nil }) } diff --git a/pkg/testsuite/node_state.go b/pkg/testsuite/node_state.go index 65b60c728..aef18d144 100644 --- a/pkg/testsuite/node_state.go +++ b/pkg/testsuite/node_state.go @@ -38,6 +38,9 @@ func (t *TestSuite) AssertNodeState(nodes []*mock.Node, opts ...options.Option[N if state.sybilProtectionOnlineCommittee != nil { t.AssertSybilProtectionOnlineCommittee(*state.sybilProtectionOnlineCommittee, nodes...) } + if state.sybilProtectionCandidatesEpoch != nil && state.sybilProtectionCandidates != nil { + t.AssertSybilProtectionCandidates(*state.sybilProtectionCandidatesEpoch, *state.sybilProtectionCandidates, nodes...) + } if state.storageCommitmentAtSlot != nil { t.AssertEqualStoredCommitmentAtIndex(*state.storageCommitmentAtSlot, nodes...) } @@ -70,6 +73,8 @@ type NodeState struct { sybilProtectionCommitteeEpoch *iotago.EpochIndex sybilProtectionCommittee *[]iotago.AccountID sybilProtectionOnlineCommittee *[]account.SeatIndex + sybilProtectionCandidatesEpoch *iotago.EpochIndex + sybilProtectionCandidates *[]iotago.AccountID storageCommitments *[]*iotago.Commitment storageCommitmentAtSlot *iotago.SlotIndex @@ -143,6 +148,13 @@ func WithSybilProtectionOnlineCommittee(committee ...account.SeatIndex) options. } } +func WithSybilProtectionCandidates(epoch iotago.EpochIndex, candidates []iotago.AccountID) options.Option[NodeState] { + return func(state *NodeState) { + state.sybilProtectionCandidatesEpoch = &epoch + state.sybilProtectionCandidates = &candidates + } +} + func WithStorageCommitments(commitments []*iotago.Commitment) options.Option[NodeState] { return func(state *NodeState) { state.storageCommitments = &commitments diff --git a/pkg/testsuite/testsuite.go b/pkg/testsuite/testsuite.go index f2cc14c89..77cc15942 100644 --- a/pkg/testsuite/testsuite.go +++ b/pkg/testsuite/testsuite.go @@ -16,6 +16,7 @@ import ( "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" + "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/protocol" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" @@ -309,6 +310,28 @@ func (t *TestSuite) Nodes(names ...string) []*mock.Node { return nodes } +func (t *TestSuite) AccountsOfNodes(names ...string) []iotago.AccountID { + nodes := t.Nodes(names...) + + return lo.Map(nodes, func(node *mock.Node) iotago.AccountID { + return node.Validator.AccountID + }) +} + +func (t *TestSuite) SeatOfNodes(slot iotago.SlotIndex, names ...string) []account.SeatIndex { + nodes := t.Nodes(names...) + + return lo.Map(nodes, func(node *mock.Node) account.SeatIndex { + seatedAccounts, exists := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(slot) + require.True(t.Testing, exists, "node %s: committee at slot %d does not exist", node.Name, slot) + + seat, exists := seatedAccounts.GetSeat(node.Validator.AccountID) + require.True(t.Testing, exists, "node %s: seat for account %s does not exist", node.Name, node.Validator.AccountID) + + return seat + }) +} + func (t *TestSuite) Wait(nodes ...*mock.Node) { for _, node := range nodes { node.Wait() diff --git a/pkg/testsuite/testsuite_issue_blocks.go b/pkg/testsuite/testsuite_issue_blocks.go index 7dbaad3dd..920baa482 100644 --- a/pkg/testsuite/testsuite_issue_blocks.go +++ b/pkg/testsuite/testsuite_issue_blocks.go @@ -195,10 +195,12 @@ func (t *TestSuite) issueBlockRows(prefix string, rows int, initialParentsPrefix return blocksIssued, lastBlockRowIssued } -func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, rowsPerSlot int, initialParentsPrefix string, nodes []*mock.Node, waitForSlotsCommitted bool, issuingOptions map[string][]options.Option[mock.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) { +func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, rowsPerSlot int, initialParentsPrefix string, nodes []*mock.Node, waitForSlotsCommitted bool, useCommitmentAtMinCommittableAge bool) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) { var blocksIssued, lastBlockRowIssued []*blocks.Block parentsPrefix := initialParentsPrefix + issuingOptions := make(map[string][]options.Option[mock.BlockHeaderParams]) + for i, slot := range slots { // advance time of the test suite t.SetCurrentSlot(slot) @@ -212,7 +214,20 @@ func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, if waitForSlotsCommitted { if slot > t.API.ProtocolParameters().MinCommittableAge() { - t.AssertCommitmentSlotIndexExists(slot-(t.API.ProtocolParameters().MinCommittableAge()), nodes...) + commitmentSlot := slot - t.API.ProtocolParameters().MinCommittableAge() + t.AssertCommitmentSlotIndexExists(commitmentSlot, nodes...) + + if useCommitmentAtMinCommittableAge { + // Make sure that all nodes create blocks throughout the slot that commit to the same commitment at slot-minCommittableAge-1. + for _, node := range nodes { + commitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(commitmentSlot) + require.NoError(t.Testing, err) + + issuingOptions[node.Name] = []options.Option[mock.BlockHeaderParams]{ + mock.WithSlotCommitment(commitment.Commitment()), + } + } + } } else { t.AssertBlocksExist(blocksInSlot, true, nodes...) } @@ -222,8 +237,8 @@ func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, return blocksIssued, lastBlockRowIssued } -func (t *TestSuite) IssueBlocksAtEpoch(prefix string, epoch iotago.EpochIndex, rowsPerSlot int, initialParentsPrefix string, nodes []*mock.Node, waitForSlotsCommitted bool, issuingOptions map[string][]options.Option[mock.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) { - return t.IssueBlocksAtSlots(prefix, t.SlotsForEpoch(epoch), rowsPerSlot, initialParentsPrefix, nodes, waitForSlotsCommitted, issuingOptions) +func (t *TestSuite) IssueBlocksAtEpoch(prefix string, epoch iotago.EpochIndex, rowsPerSlot int, initialParentsPrefix string, nodes []*mock.Node, waitForSlotsCommitted bool, useCommitmentAtMinCommittableAge bool) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) { + return t.IssueBlocksAtSlots(prefix, t.SlotsForEpoch(epoch), rowsPerSlot, initialParentsPrefix, nodes, waitForSlotsCommitted, useCommitmentAtMinCommittableAge) } func (t *TestSuite) SlotsForEpoch(epoch iotago.EpochIndex) []iotago.SlotIndex {