From e05850d42b576fb4f0a251255693e1fc754f9d4a Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 16 Apr 2024 16:39:58 +0800 Subject: [PATCH 01/16] Implement test case to reproduce commitment mismatch for account root --- pkg/tests/loss_of_acceptance_test.go | 64 +++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 11 deletions(-) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index aa91469f0..475465f13 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" "github.com/iotaledger/iota-core/pkg/protocol" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/testsuite" @@ -17,7 +18,6 @@ import ( func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts := testsuite.NewTestSuite(t, - testsuite.WithWaitFor(15*time.Second), testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( 0, @@ -39,11 +39,17 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { node0 := ts.AddValidatorNode("node0") ts.AddDefaultWallet(node0) - ts.AddValidatorNode("node1") - ts.AddNode("node2") + node1 := ts.AddValidatorNode("node1") + node2 := ts.AddNode("node2") + + nodesP1 := []*mock.Node{node0, node2} + nodesP2 := []*mock.Node{node1} ts.Run(true, nil) + node0.Protocol.SetLogLevel(log.LevelTrace) + node1.Protocol.SetLogLevel(log.LevelTrace) + // Create snapshot to use later. snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) require.NoError(t, node0.Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) @@ -68,13 +74,33 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts.AssertBlocksExist(ts.Blocks("block0"), true, ts.ClientsForNodes()...) } - // Continue issuing on all nodes for a few slots. + ts.SplitIntoPartitions(map[string][]*mock.Node{ + "P1": nodesP1, + "P2": nodesP2, + }) + + // Issue in P1 { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", ts.Nodes(), true, false) + ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", nodesP1, true, false) - ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, ts.Nodes()...) - ts.AssertLatestCommitmentSlotIndex(55, ts.Nodes()...) - ts.AssertEqualStoredCommitmentAtIndex(55, ts.Nodes()...) + ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, nodesP1...) + ts.AssertLatestCommitmentSlotIndex(55, nodesP1...) + ts.AssertEqualStoredCommitmentAtIndex(55, nodesP1...) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), true, ts.ClientsForNodes(nodesP1...)...) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), false, ts.ClientsForNodes(nodesP2...)...) + } + + // Issue in P2 + { + ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", nodesP2, true, false) + + ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, nodesP2...) + ts.AssertLatestCommitmentSlotIndex(55, nodesP2...) + ts.AssertEqualStoredCommitmentAtIndex(55, nodesP2...) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), false, ts.ClientsForNodes(nodesP1...)...) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), true, ts.ClientsForNodes(nodesP2...)...) } // Start node3 from genesis snapshot. @@ -84,18 +110,34 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { protocol.WithSnapshotPath(snapshotPath), protocol.WithBaseDirectory(ts.Directory.PathWithCreate(node3.Name)), ) + // node3.Protocol.SetLogLevel(log.LevelTrace) ts.Wait() } - // Continue issuing on all nodes for a few slots. + ts.MergePartitionsToMain() + fmt.Println("\n=========================\nMerged network partitions\n=========================") + + // Continue issuing on all nodes on top of their chain, respectively. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{58, 59}, 3, "57.2", ts.Nodes("node0", "node1", "node2"), true, false) + ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{58, 59}, 3, "P1:57.2", nodesP1, true, false) + ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{58, 59}, 3, "P2:57.2", nodesP2, true, false) - ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("59.0"), true, ts.Nodes()...) + // ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("59.0"), true, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(57, ts.Nodes()...) ts.AssertEqualStoredCommitmentAtIndex(57, ts.Nodes()...) } + return + + // Continue issuing on all nodes for a few slots. + { + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", ts.Nodes(), true, false) + + ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, ts.Nodes()...) + ts.AssertLatestCommitmentSlotIndex(55, ts.Nodes()...) + ts.AssertEqualStoredCommitmentAtIndex(55, ts.Nodes()...) + } + // Check that commitments from 1-49 are empty. for slot := iotago.SlotIndex(1); slot <= 49; slot++ { ts.AssertStorageCommitmentBlocks(slot, nil, ts.Nodes()...) From 58de79556b3c25ef5dfb3f7c4b8b0b57ed0c1282 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Tue, 16 Apr 2024 16:09:45 +0200 Subject: [PATCH 02/16] Fix logger --- pkg/protocol/engines.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/protocol/engines.go b/pkg/protocol/engines.go index 153652611..c3be37c89 100644 --- a/pkg/protocol/engines.go +++ b/pkg/protocol/engines.go @@ -105,7 +105,7 @@ func (e *Engines) ForkAtSlot(slot iotago.SlotIndex) (*engine.Engine, error) { evictionState.Initialize(latestCommitment.Slot()) blockCache := blocks.New(evictionState, newStorage.Settings().APIProvider()) - accountsManager := accountsledger.New(module.New(log.NewLogger(log.WithName("ForkedAccountsLedger"))), newStorage.Settings().APIProvider(), blockCache.Block, newStorage.AccountDiffs, newStorage.Accounts()) + accountsManager := accountsledger.New(e.protocol.NewSubModule("ForkedAccountsLedger"), newStorage.Settings().APIProvider(), blockCache.Block, newStorage.AccountDiffs, newStorage.Accounts()) accountsManager.SetLatestCommittedSlot(latestCommitment.Slot()) if err = accountsManager.Rollback(slot); err != nil { From eda06e9bceebac0da530257312ad5ab0b6557a72 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Tue, 16 Apr 2024 16:10:20 +0200 Subject: [PATCH 03/16] Commit accounts ledger after rollback to persist the changes --- pkg/protocol/engine/accounts/accountsledger/manager.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/protocol/engine/accounts/accountsledger/manager.go b/pkg/protocol/engine/accounts/accountsledger/manager.go index 7416434d5..2ee8245e1 100644 --- a/pkg/protocol/engine/accounts/accountsledger/manager.go +++ b/pkg/protocol/engine/accounts/accountsledger/manager.go @@ -127,6 +127,11 @@ func (m *Manager) AccountsTreeRoot() iotago.Identifier { m.mutex.RLock() defer m.mutex.RUnlock() + _ = m.accountsTree.Stream(func(accountID iotago.AccountID, accountData *accounts.AccountData) error { + m.LogDebug(">> committing account account", "accountID", accountID, "BIC.VALUE", accountData.Credits.Value, "BIC.UpdateSlot", accountData.Credits.UpdateSlot) + return nil + }) + return m.accountsTree.Root() } @@ -314,7 +319,7 @@ func (m *Manager) Rollback(targetSlot iotago.SlotIndex) error { } } - return nil + return m.accountsTree.Commit() } // AddAccount adds a new account to the Account tree, allotting to it the balance on the given output. From b52b8c64e9d1095d77d0783f8aed2aecf60f21a2 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Tue, 16 Apr 2024 16:10:37 +0200 Subject: [PATCH 04/16] Reproduce engine nil pointer and start fixing --- pkg/protocol/attestations.go | 8 +++- pkg/protocol/chain.go | 1 + pkg/protocol/commitment.go | 24 +++++++---- pkg/tests/loss_of_acceptance_test.go | 62 ++++++++++++++-------------- 4 files changed, 55 insertions(+), 40 deletions(-) diff --git a/pkg/protocol/attestations.go b/pkg/protocol/attestations.go index 235f7974b..4f4b7a5bd 100644 --- a/pkg/protocol/attestations.go +++ b/pkg/protocol/attestations.go @@ -1,6 +1,8 @@ package protocol import ( + "fmt" + "github.com/libp2p/go-libp2p/core/peer" "github.com/iotaledger/hive.go/core/eventticker" @@ -129,7 +131,11 @@ func (a *Attestations) setupCommitmentVerifier(chain *Chain) (shutdown func()) { } a.commitmentVerifiers.GetOrCreate(forkingPoint.ID(), func() (commitmentVerifier *CommitmentVerifier) { - commitmentVerifier, err := newCommitmentVerifier(forkingPoint.Chain.Get().LatestEngine(), parentOfForkingPoint.Commitment) + engine := forkingPoint.Chain.Get().LatestEngine() + if engine == nil { + fmt.Println("engine not available ", a.ParentLogger().LogName(), a.LogName()) + } + commitmentVerifier, err := newCommitmentVerifier(engine, parentOfForkingPoint.Commitment) if err != nil { a.LogError("failed to create commitment verifier", "chain", chain.LogName(), "error", err) } diff --git a/pkg/protocol/chain.go b/pkg/protocol/chain.go index 161f89dda..3216866eb 100644 --- a/pkg/protocol/chain.go +++ b/pkg/protocol/chain.go @@ -201,6 +201,7 @@ func (c *Chain) initLogger() (shutdown func()) { c.LatestSyncedSlot.LogUpdates(c, log.LevelTrace, "LatestSyncedSlot"), c.OutOfSyncThreshold.LogUpdates(c, log.LevelTrace, "OutOfSyncThreshold"), c.ForkingPoint.LogUpdates(c, log.LevelTrace, "ForkingPoint", (*Commitment).LogName), + c.ParentChain.LogUpdates(c, log.LevelTrace, "ParentChain", (*Chain).LogName), c.LatestCommitment.LogUpdates(c, log.LevelTrace, "LatestCommitment", (*Commitment).LogName), c.LatestAttestedCommitment.LogUpdates(c, log.LevelTrace, "LatestAttestedCommitment", (*Commitment).LogName), c.LatestProducedCommitment.LogUpdates(c, log.LevelDebug, "LatestProducedCommitment", (*Commitment).LogName), diff --git a/pkg/protocol/commitment.go b/pkg/protocol/commitment.go index de75800aa..93246adcd 100644 --- a/pkg/protocol/commitment.go +++ b/pkg/protocol/commitment.go @@ -60,6 +60,10 @@ type Commitment struct { // IsRoot contains a flag indicating if this Commitment is the root of the Chain. IsRoot reactive.Event + // IsSolid contains a flag indicating if this Commitment is solid (has all the commitments in its past cone until + // the RootCommitment). + IsSolid reactive.Event + // IsAttested contains a flag indicating if we have received attestations for this Commitment. IsAttested reactive.Event @@ -108,6 +112,7 @@ func newCommitment(commitments *Commitments, model *model.Commitment) *Commitmen CumulativeAttestedWeight: reactive.NewVariable[uint64](), CumulativeVerifiedWeight: reactive.NewVariable[uint64](), IsRoot: reactive.NewEvent(), + IsSolid: reactive.NewEvent(), IsAttested: reactive.NewEvent(), IsSynced: reactive.NewEvent(), IsCommittable: reactive.NewEvent(), @@ -219,6 +224,7 @@ func (c *Commitment) initLogger() (shutdown func()) { c.CumulativeAttestedWeight.LogUpdates(c, log.LevelTrace, "CumulativeAttestedWeight"), c.CumulativeVerifiedWeight.LogUpdates(c, log.LevelTrace, "CumulativeVerifiedWeight"), c.IsRoot.LogUpdates(c, log.LevelTrace, "IsRoot"), + c.IsSolid.LogUpdates(c, log.LevelTrace, "IsSolid"), c.IsAttested.LogUpdates(c, log.LevelTrace, "IsAttested"), c.IsSynced.LogUpdates(c, log.LevelTrace, "IsSynced"), c.IsCommittable.LogUpdates(c, log.LevelTrace, "IsCommittable"), @@ -235,6 +241,7 @@ func (c *Commitment) initDerivedProperties() (shutdown func()) { return lo.BatchReverse( // mark commitments that are marked as root as verified c.IsVerified.InheritFrom(c.IsRoot), + c.IsSolid.InheritFrom(c.IsRoot), // mark commitments that are marked as verified as attested and synced c.IsAttested.InheritFrom(c.IsVerified), @@ -252,7 +259,7 @@ func (c *Commitment) initDerivedProperties() (shutdown func()) { return lo.BatchReverse( c.deriveChain(parent), - + c.IsSolid.InheritFrom(parent.IsSolid), c.deriveCumulativeAttestedWeight(parent), c.deriveIsAboveLatestVerifiedCommitment(parent), @@ -294,9 +301,9 @@ func (c *Commitment) registerChild(child *Commitment) { // deriveChain derives the Chain of this Commitment which is either inherited from the parent if we are the main child // or a newly created chain. func (c *Commitment) deriveChain(parent *Commitment) func() { - return c.Chain.DeriveValueFrom(reactive.NewDerivedVariable3(func(currentChain *Chain, isRoot bool, mainChild *Commitment, parentChain *Chain) *Chain { + return c.Chain.DeriveValueFrom(reactive.NewDerivedVariable4(func(currentChain *Chain, isRoot bool, isSolid bool, mainChild *Commitment, parentChain *Chain) *Chain { // do not adjust the chain of the root commitment (it is set from the outside) - if isRoot { + if isRoot || !isSolid { return currentChain } @@ -315,13 +322,14 @@ func (c *Commitment) deriveChain(parent *Commitment) func() { // then we inherit the parent chain and evict the current one. // We will spawn a new one if we ever change back to not being the main child. // Here we basically move commitments to the parent chain. - if currentChain != nil && currentChain != parentChain { - // TODO: refactor it to use a dedicated WorkerPool - go currentChain.IsEvicted.Trigger() - } + + //if currentChain != nil && currentChain != parentChain { + // // TODO: refactor it to use a dedicated WorkerPool + // go currentChain.IsEvicted.Trigger() + //} return parentChain - }, c.IsRoot, parent.MainChild, parent.Chain, c.Chain.Get())) + }, c.IsRoot, c.IsSolid, parent.MainChild, parent.Chain, c.Chain.Get())) } // deriveCumulativeAttestedWeight derives the CumulativeAttestedWeight of this Commitment which is the sum of the diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 475465f13..7ad739286 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -9,6 +9,7 @@ import ( "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/protocol" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/testsuite" @@ -47,13 +48,19 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts.Run(true, nil) - node0.Protocol.SetLogLevel(log.LevelTrace) - node1.Protocol.SetLogLevel(log.LevelTrace) + node0.Protocol.SetLogLevel(log.LevelFatal) + node1.Protocol.SetLogLevel(log.LevelFatal) + node2.Protocol.SetLogLevel(log.LevelFatal) // Create snapshot to use later. snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) require.NoError(t, node0.Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) + seatIndexes := []account.SeatIndex{ + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountData.ID)), + lo.Return1(lo.Return1(node1.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountData.ID)), + } + // Revive chain on node0. { ts.SetCurrentSlot(50) @@ -73,44 +80,52 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts.AssertEqualStoredCommitmentAtIndex(50, ts.Nodes()...) ts.AssertBlocksExist(ts.Blocks("block0"), true, ts.ClientsForNodes()...) } + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], ts.Nodes()...) ts.SplitIntoPartitions(map[string][]*mock.Node{ "P1": nodesP1, "P2": nodesP2, }) + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], node0) + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], node1) + // Issue in P1 { - ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", nodesP1, true, false) + ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{53, 54, 55, 56, 57, 58, 59, 60, 61}, 3, "52.1", nodesP1, true, true) - ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, nodesP1...) - ts.AssertLatestCommitmentSlotIndex(55, nodesP1...) - ts.AssertEqualStoredCommitmentAtIndex(55, nodesP1...) + ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("61.0"), true, nodesP1...) + ts.AssertLatestCommitmentSlotIndex(59, nodesP1...) + ts.AssertEqualStoredCommitmentAtIndex(59, nodesP1...) ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), true, ts.ClientsForNodes(nodesP1...)...) ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), false, ts.ClientsForNodes(nodesP2...)...) } + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], node0, node2) + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], node1) // Issue in P2 { - ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", nodesP2, true, false) + ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{53, 54, 55, 56, 57, 58, 59, 60, 61}, 3, "52.1", nodesP2, false, false) - ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, nodesP2...) - ts.AssertLatestCommitmentSlotIndex(55, nodesP2...) - ts.AssertEqualStoredCommitmentAtIndex(55, nodesP2...) + ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("61.0"), true, nodesP2...) + ts.AssertLatestCommitmentSlotIndex(59, nodesP2...) + ts.AssertEqualStoredCommitmentAtIndex(59, nodesP2...) ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), false, ts.ClientsForNodes(nodesP1...)...) ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), true, ts.ClientsForNodes(nodesP2...)...) } + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], node0, node2) + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[1:2], node1) // Start node3 from genesis snapshot. + node3 := ts.AddNode("node3") { - node3 := ts.AddNode("node3") node3.Initialize(true, protocol.WithSnapshotPath(snapshotPath), protocol.WithBaseDirectory(ts.Directory.PathWithCreate(node3.Name)), ) - // node3.Protocol.SetLogLevel(log.LevelTrace) + node3.Protocol.SetLogLevel(log.LevelTrace) ts.Wait() } @@ -119,28 +134,13 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { // Continue issuing on all nodes on top of their chain, respectively. { - ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{58, 59}, 3, "P1:57.2", nodesP1, true, false) - ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{58, 59}, 3, "P2:57.2", nodesP2, true, false) + ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{62}, 1, "P2:61.2", nodesP2, false, false) + ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{62}, 1, "P1:61.2", nodesP1, false, false) // ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("59.0"), true, ts.Nodes()...) - ts.AssertLatestCommitmentSlotIndex(57, ts.Nodes()...) - ts.AssertEqualStoredCommitmentAtIndex(57, ts.Nodes()...) - } - - return + ts.AssertLatestCommitmentSlotIndex(59, ts.Nodes()...) - // Continue issuing on all nodes for a few slots. - { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", ts.Nodes(), true, false) - - ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, ts.Nodes()...) - ts.AssertLatestCommitmentSlotIndex(55, ts.Nodes()...) - ts.AssertEqualStoredCommitmentAtIndex(55, ts.Nodes()...) - } - - // Check that commitments from 1-49 are empty. - for slot := iotago.SlotIndex(1); slot <= 49; slot++ { - ts.AssertStorageCommitmentBlocks(slot, nil, ts.Nodes()...) + ts.AssertEqualStoredCommitmentAtIndex(59, ts.Nodes()...) } } From 2e553ff9af5eff606af6ad44962b5ee2d85a88d2 Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Tue, 16 Apr 2024 22:15:20 +0200 Subject: [PATCH 05/16] Fix: fix nil pointer exception --- pkg/protocol/chain.go | 7 +++++++ pkg/protocol/chains.go | 4 +++- pkg/protocol/commitment.go | 10 ++++++---- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/pkg/protocol/chain.go b/pkg/protocol/chain.go index 3216866eb..24192c724 100644 --- a/pkg/protocol/chain.go +++ b/pkg/protocol/chain.go @@ -57,6 +57,9 @@ type Chain struct { // IsEvicted contains a flag that indicates whether this chain was evicted. IsEvicted reactive.Event + // IsSolid contains a flag that indicates whether this chain is solid (has a continuous connection to the root). + IsSolid reactive.Event + // shouldEvict contains a flag that indicates whether this chain should be evicted. shouldEvict reactive.Event @@ -86,6 +89,7 @@ func newChain(chains *Chains) *Chain { StartEngine: reactive.NewVariable[bool](), Engine: reactive.NewVariable[*engine.Engine](), IsEvicted: reactive.NewEvent(), + IsSolid: reactive.NewEvent(), shouldEvict: reactive.NewEvent(), chains: chains, @@ -209,6 +213,7 @@ func (c *Chain) initLogger() (shutdown func()) { c.StartEngine.LogUpdates(c, log.LevelDebug, "StartEngine"), c.Engine.LogUpdates(c, log.LevelTrace, "Engine", (*engine.Engine).LogName), c.IsEvicted.LogUpdates(c, log.LevelTrace, "IsEvicted"), + c.IsSolid.LogUpdates(c, log.LevelTrace, "IsSolid"), c.shouldEvict.LogUpdates(c, log.LevelTrace, "shouldEvict"), c.Logger.Shutdown, @@ -234,6 +239,8 @@ func (c *Chain) initDerivedProperties() (shutdown func()) { c.deriveShouldEvict(forkingPoint, parentChain), ) }), + + c.IsSolid.InheritFrom(forkingPoint.IsSolid), ) }), ), diff --git a/pkg/protocol/chains.go b/pkg/protocol/chains.go index d50974d46..7bb09169f 100644 --- a/pkg/protocol/chains.go +++ b/pkg/protocol/chains.go @@ -323,7 +323,9 @@ func (c *Chains) initChainSwitching() (shutdown func()) { return lo.BatchReverse( c.HeaviestClaimedCandidate.WithNonEmptyValue(func(heaviestClaimedCandidate *Chain) (shutdown func()) { - return heaviestClaimedCandidate.RequestAttestations.ToggleValue(true) + return heaviestClaimedCandidate.IsSolid.WithNonEmptyValue(func(_ bool) (teardown func()) { + return heaviestClaimedCandidate.RequestAttestations.ToggleValue(true) + }) }), c.HeaviestAttestedCandidate.OnUpdate(func(_ *Chain, heaviestAttestedCandidate *Chain) { diff --git a/pkg/protocol/commitment.go b/pkg/protocol/commitment.go index 93246adcd..446c52493 100644 --- a/pkg/protocol/commitment.go +++ b/pkg/protocol/commitment.go @@ -259,7 +259,7 @@ func (c *Commitment) initDerivedProperties() (shutdown func()) { return lo.BatchReverse( c.deriveChain(parent), - c.IsSolid.InheritFrom(parent.IsSolid), + c.deriveCumulativeAttestedWeight(parent), c.deriveIsAboveLatestVerifiedCommitment(parent), @@ -273,6 +273,8 @@ func (c *Commitment) initDerivedProperties() (shutdown func()) { }), ) }), + + c.IsSolid.InheritFrom(parent.IsSolid), ) }), @@ -301,9 +303,9 @@ func (c *Commitment) registerChild(child *Commitment) { // deriveChain derives the Chain of this Commitment which is either inherited from the parent if we are the main child // or a newly created chain. func (c *Commitment) deriveChain(parent *Commitment) func() { - return c.Chain.DeriveValueFrom(reactive.NewDerivedVariable4(func(currentChain *Chain, isRoot bool, isSolid bool, mainChild *Commitment, parentChain *Chain) *Chain { + return c.Chain.DeriveValueFrom(reactive.NewDerivedVariable3(func(currentChain *Chain, isRoot bool, mainChild *Commitment, parentChain *Chain) *Chain { // do not adjust the chain of the root commitment (it is set from the outside) - if isRoot || !isSolid { + if isRoot { return currentChain } @@ -329,7 +331,7 @@ func (c *Commitment) deriveChain(parent *Commitment) func() { //} return parentChain - }, c.IsRoot, c.IsSolid, parent.MainChild, parent.Chain, c.Chain.Get())) + }, c.IsRoot, parent.MainChild, parent.Chain, c.Chain.Get())) } // deriveCumulativeAttestedWeight derives the CumulativeAttestedWeight of this Commitment which is the sum of the From 9db69b68837b8d89f48f04593fdfa9f191349318 Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Tue, 16 Apr 2024 22:22:43 +0200 Subject: [PATCH 06/16] Refactor: reverted unnecessary changes --- pkg/protocol/attestations.go | 8 +------- pkg/protocol/commitment.go | 9 ++++----- pkg/protocol/engine/accounts/accountsledger/manager.go | 5 ----- 3 files changed, 5 insertions(+), 17 deletions(-) diff --git a/pkg/protocol/attestations.go b/pkg/protocol/attestations.go index 4f4b7a5bd..235f7974b 100644 --- a/pkg/protocol/attestations.go +++ b/pkg/protocol/attestations.go @@ -1,8 +1,6 @@ package protocol import ( - "fmt" - "github.com/libp2p/go-libp2p/core/peer" "github.com/iotaledger/hive.go/core/eventticker" @@ -131,11 +129,7 @@ func (a *Attestations) setupCommitmentVerifier(chain *Chain) (shutdown func()) { } a.commitmentVerifiers.GetOrCreate(forkingPoint.ID(), func() (commitmentVerifier *CommitmentVerifier) { - engine := forkingPoint.Chain.Get().LatestEngine() - if engine == nil { - fmt.Println("engine not available ", a.ParentLogger().LogName(), a.LogName()) - } - commitmentVerifier, err := newCommitmentVerifier(engine, parentOfForkingPoint.Commitment) + commitmentVerifier, err := newCommitmentVerifier(forkingPoint.Chain.Get().LatestEngine(), parentOfForkingPoint.Commitment) if err != nil { a.LogError("failed to create commitment verifier", "chain", chain.LogName(), "error", err) } diff --git a/pkg/protocol/commitment.go b/pkg/protocol/commitment.go index 446c52493..78a24bf29 100644 --- a/pkg/protocol/commitment.go +++ b/pkg/protocol/commitment.go @@ -324,11 +324,10 @@ func (c *Commitment) deriveChain(parent *Commitment) func() { // then we inherit the parent chain and evict the current one. // We will spawn a new one if we ever change back to not being the main child. // Here we basically move commitments to the parent chain. - - //if currentChain != nil && currentChain != parentChain { - // // TODO: refactor it to use a dedicated WorkerPool - // go currentChain.IsEvicted.Trigger() - //} + if currentChain != nil && currentChain != parentChain { + // TODO: refactor it to use a dedicated WorkerPool + go currentChain.IsEvicted.Trigger() + } return parentChain }, c.IsRoot, parent.MainChild, parent.Chain, c.Chain.Get())) diff --git a/pkg/protocol/engine/accounts/accountsledger/manager.go b/pkg/protocol/engine/accounts/accountsledger/manager.go index 2ee8245e1..59f5e8c7e 100644 --- a/pkg/protocol/engine/accounts/accountsledger/manager.go +++ b/pkg/protocol/engine/accounts/accountsledger/manager.go @@ -127,11 +127,6 @@ func (m *Manager) AccountsTreeRoot() iotago.Identifier { m.mutex.RLock() defer m.mutex.RUnlock() - _ = m.accountsTree.Stream(func(accountID iotago.AccountID, accountData *accounts.AccountData) error { - m.LogDebug(">> committing account account", "accountID", accountID, "BIC.VALUE", accountData.Credits.Value, "BIC.UpdateSlot", accountData.Credits.UpdateSlot) - return nil - }) - return m.accountsTree.Root() } From 8206baa52a924e0b9dc84ddf79406b1a1316f6cc Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:22:37 +0800 Subject: [PATCH 07/16] moar debug logs to finally find lastAccessedEpoch bug and node being stuck in "deleting" practically endless (ghost) epochs from disk --- pkg/protocol/engines.go | 22 ++++++++ pkg/storage/prunable/epochstore/epoch_kv.go | 4 ++ pkg/storage/prunable/prunable.go | 16 ++++++ pkg/tests/loss_of_acceptance_test.go | 59 +++++++++++++-------- 4 files changed, 78 insertions(+), 23 deletions(-) diff --git a/pkg/protocol/engines.go b/pkg/protocol/engines.go index c3be37c89..7ec714ec9 100644 --- a/pkg/protocol/engines.go +++ b/pkg/protocol/engines.go @@ -81,29 +81,39 @@ func (e *Engines) initLogging() (shutdown func()) { // ForkAtSlot creates a new engine instance that forks from the main engine at the given slot. func (e *Engines) ForkAtSlot(slot iotago.SlotIndex) (*engine.Engine, error) { + e.LogInfo("forking engine at slot", "slot", slot) + newEngineAlias := lo.PanicOnErr(uuid.NewUUID()).String() errorHandler := func(err error) { e.protocol.LogError("engine error", "err", err, "name", newEngineAlias[0:8]) } + e.LogInfo("forking engine at slot 2", "slot", slot) + // copy raw data on disk. newStorage, err := storage.Clone(e, e.Main.Get().Storage, e.directory.Path(newEngineAlias), DatabaseVersion, errorHandler, e.protocol.Options.StorageOptions...) if err != nil { return nil, ierrors.Wrapf(err, "failed to copy storage from active engine instance (%s) to new engine instance (%s)", e.Main.Get().Storage.Directory(), e.directory.Path(newEngineAlias)) } + e.LogInfo("forking engine at slot 3", "slot", slot) + // remove commitments that after forking point. latestCommitment := newStorage.Settings().LatestCommitment() if err = newStorage.Commitments().Rollback(slot, latestCommitment.Slot()); err != nil { return nil, ierrors.Wrap(err, "failed to rollback commitments") } + e.LogInfo("forking engine at slot 4", "slot", slot) + // some components are automatically rolled back by deleting their data on disk (e.g. slot based storage). // some other components need to be rolled back manually, like the UTXO ledger for example. // we need to create temporary components to rollback their permanent state, which will be reflected on disk. evictionState := eviction.NewState(newStorage.Settings(), newStorage.RootBlocks) evictionState.Initialize(latestCommitment.Slot()) + e.LogInfo("forking engine at slot 5", "slot", slot) + blockCache := blocks.New(evictionState, newStorage.Settings().APIProvider()) accountsManager := accountsledger.New(e.protocol.NewSubModule("ForkedAccountsLedger"), newStorage.Settings().APIProvider(), blockCache.Block, newStorage.AccountDiffs, newStorage.Accounts()) @@ -119,19 +129,27 @@ func (e *Engines) ForkAtSlot(slot iotago.SlotIndex) (*engine.Engine, error) { return nil, err } + e.LogInfo("forking engine at slot 6", "slot", slot) + targetCommitment, err := newStorage.Commitments().Load(slot) if err != nil { return nil, ierrors.Wrapf(err, "error while retrieving commitment for target index %d", slot) } + e.LogInfo("forking engine at slot 6.1", "slot", slot) + if err = newStorage.Settings().Rollback(targetCommitment); err != nil { return nil, err } + e.LogInfo("forking engine at slot 6.2", "slot", slot) if err = newStorage.Rollback(slot); err != nil { + e.LogError("failed to rollback storage", "err", err) return nil, err } + e.LogInfo("forking engine at slot 7", "slot", slot) + candidateEngine := e.loadEngineInstanceWithStorage(newEngineAlias, newStorage) // rollback attestations already on created engine instance, because this action modifies the in-memory storage. @@ -139,6 +157,8 @@ func (e *Engines) ForkAtSlot(slot iotago.SlotIndex) (*engine.Engine, error) { return nil, ierrors.Wrap(err, "error while rolling back attestations storage on candidate engine") } + e.LogInfo("forking engine at slot 8", "slot", slot) + return candidateEngine, nil } @@ -274,7 +294,9 @@ func (e *Engines) injectEngineInstances() (shutdown func()) { } else { e.protocol.Network.OnShutdown(func() { newEngine.ShutdownEvent().Trigger() }) + e.LogInfo("injecting engine instance before", "chain", chain.LogName()) chain.Engine.Set(newEngine) + e.LogInfo("injecting engine instance after", "chain", chain.LogName()) } }) }) diff --git a/pkg/storage/prunable/epochstore/epoch_kv.go b/pkg/storage/prunable/epochstore/epoch_kv.go index 1c106b28a..0275afd26 100644 --- a/pkg/storage/prunable/epochstore/epoch_kv.go +++ b/pkg/storage/prunable/epochstore/epoch_kv.go @@ -1,6 +1,8 @@ package epochstore import ( + "fmt" + "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" @@ -108,10 +110,12 @@ func (e *EpochKVStore) Prune(epoch iotago.EpochIndex, defaultPruningDelay iotago func (e *EpochKVStore) RollbackEpochs(epoch iotago.EpochIndex) (lastPrunedEpoch iotago.EpochIndex, err error) { lastAccessedEpoch, err := e.LastAccessedEpoch() + fmt.Println("lastAccessedEpoch: ", lastAccessedEpoch) if err != nil { return lastAccessedEpoch, ierrors.Wrap(err, "failed to get last accessed epoch") } + fmt.Println("epoch: ", epoch) for epochToPrune := epoch; epochToPrune <= lastAccessedEpoch; epochToPrune++ { if err = e.DeleteEpoch(epochToPrune); err != nil { return epochToPrune, ierrors.Wrapf(err, "error while deleting epoch %d", epochToPrune) diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index e90f2b0b8..e186eedb6 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -1,6 +1,8 @@ package prunable import ( + "fmt" + copydir "github.com/otiai10/copy" "github.com/iotaledger/hive.go/ierrors" @@ -171,34 +173,47 @@ func (p *Prunable) Flush() { } func (p *Prunable) Rollback(targetEpoch iotago.EpochIndex, startPruneRange iotago.SlotIndex, endPruneRange iotago.SlotIndex) error { + fmt.Println("Rollback", targetEpoch, startPruneRange, endPruneRange) + if err := p.prunableSlotStore.PruneSlots(targetEpoch, startPruneRange, endPruneRange); err != nil { return ierrors.Wrapf(err, "failed to prune slots in range [%d, %d] from target epoch %d", startPruneRange, endPruneRange, targetEpoch) } + fmt.Println("Rollback 2", targetEpoch, startPruneRange, endPruneRange) + if err := p.rollbackCommitteesCandidates(targetEpoch, startPruneRange); err != nil { return ierrors.Wrapf(err, "failed to rollback committee candidates to target epoch %d", targetEpoch) } + fmt.Println("Rollback 3", targetEpoch, startPruneRange, endPruneRange) lastPrunedCommitteeEpoch, err := p.rollbackCommitteeEpochs(targetEpoch+1, startPruneRange-1) if err != nil { return ierrors.Wrapf(err, "failed to rollback committee epochs to target epoch %d", targetEpoch) } + fmt.Println("Rollback 4", targetEpoch, startPruneRange, endPruneRange) + lastPrunedPoolStatsEpoch, _, err := p.poolStats.RollbackEpochs(targetEpoch) if err != nil { return ierrors.Wrapf(err, "failed to rollback pool stats epochs to target epoch %d", targetEpoch) } + fmt.Println("Rollback 5", targetEpoch, startPruneRange, endPruneRange) + lastPrunedDecidedUpgradeSignalsEpoch, _, err := p.decidedUpgradeSignals.RollbackEpochs(targetEpoch) if err != nil { return ierrors.Wrapf(err, "failed to rollback decided upgrade signals epochs to target epoch %d", targetEpoch) } + fmt.Println("Rollback 6", targetEpoch, startPruneRange, endPruneRange) + lastPrunedPoolRewardsEpoch, err := p.poolRewards.RollbackEpochs(targetEpoch) if err != nil { return ierrors.Wrapf(err, "failed to rollback pool rewards epochs to target epoch %d", targetEpoch) } + fmt.Println("Rollback 7 ", targetEpoch, startPruneRange, endPruneRange) + for epochToPrune := targetEpoch + 1; epochToPrune <= max( lastPrunedCommitteeEpoch, lastPrunedPoolStatsEpoch, @@ -207,6 +222,7 @@ func (p *Prunable) Rollback(targetEpoch iotago.EpochIndex, startPruneRange iotag ); epochToPrune++ { p.prunableSlotStore.DeleteBucket(epochToPrune) } + fmt.Println("Rollback 8", targetEpoch, startPruneRange, endPruneRange) return nil } diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 7ad739286..f3eb950a8 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -2,6 +2,9 @@ package tests import ( "fmt" + "net/http" + _ "net/http" + _ "net/http/pprof" "testing" "time" @@ -18,6 +21,12 @@ import ( ) func TestLossOfAcceptanceFromGenesis(t *testing.T) { + // debug.SetEnabled(true) + + go func() { + fmt.Println(http.ListenAndServe("localhost:6061", nil)) + }() + ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( @@ -36,21 +45,21 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ), testsuite.WithWaitFor(15*time.Second), ) - defer ts.Shutdown() + // defer ts.Shutdown() node0 := ts.AddValidatorNode("node0") ts.AddDefaultWallet(node0) node1 := ts.AddValidatorNode("node1") - node2 := ts.AddNode("node2") + // node2 := ts.AddNode("node2") - nodesP1 := []*mock.Node{node0, node2} + nodesP1 := []*mock.Node{node0} nodesP2 := []*mock.Node{node1} ts.Run(true, nil) - node0.Protocol.SetLogLevel(log.LevelFatal) - node1.Protocol.SetLogLevel(log.LevelFatal) - node2.Protocol.SetLogLevel(log.LevelFatal) + node0.Protocol.SetLogLevel(log.LevelTrace) + // node1.Protocol.SetLogLevel(log.LevelTrace) + // node2.Protocol.SetLogLevel(log.LevelFatal) // Create snapshot to use later. snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) @@ -101,8 +110,8 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), true, ts.ClientsForNodes(nodesP1...)...) ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), false, ts.ClientsForNodes(nodesP2...)...) } - ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], node0, node2) - ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], node1) + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], nodesP1...) + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], nodesP2...) // Issue in P2 { @@ -115,32 +124,36 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), false, ts.ClientsForNodes(nodesP1...)...) ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), true, ts.ClientsForNodes(nodesP2...)...) } - ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], node0, node2) - ts.AssertSybilProtectionOnlineCommittee(seatIndexes[1:2], node1) + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[0:1], nodesP1...) + ts.AssertSybilProtectionOnlineCommittee(seatIndexes[1:2], nodesP2...) // Start node3 from genesis snapshot. - node3 := ts.AddNode("node3") - { - node3.Initialize(true, - protocol.WithSnapshotPath(snapshotPath), - protocol.WithBaseDirectory(ts.Directory.PathWithCreate(node3.Name)), - ) - node3.Protocol.SetLogLevel(log.LevelTrace) - ts.Wait() - } - + // node3 := ts.AddNode("node3") + // { + // node3.Initialize(true, + // protocol.WithSnapshotPath(snapshotPath), + // protocol.WithBaseDirectory(ts.Directory.PathWithCreate(node3.Name)), + // ) + // node3.Protocol.SetLogLevel(log.LevelTrace) + // ts.Wait() + // } ts.MergePartitionsToMain() fmt.Println("\n=========================\nMerged network partitions\n=========================") // Continue issuing on all nodes on top of their chain, respectively. { - ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{62}, 1, "P2:61.2", nodesP2, false, false) - ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{62}, 1, "P1:61.2", nodesP1, false, false) + ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{61}, 1, "P2:61.2", nodesP2, false, false) + ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{61}, 1, "P1:61.2", nodesP1, false, false) + ts.Wait() + + fmt.Println(">>>>>>> Checking stuff...") // ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("59.0"), true, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(59, ts.Nodes()...) - + fmt.Println(">>>>>>> Latest commitment slot index:", 59) + // pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) ts.AssertEqualStoredCommitmentAtIndex(59, ts.Nodes()...) + fmt.Println(">>>>>>> Stored commitment at index 59 is equal for all nodes.") } } From d43a2f1c3507f85658d2641bd8d77f2d1c37bbd7 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Wed, 17 Apr 2024 09:59:17 +0200 Subject: [PATCH 08/16] Fix underflow when exporting a snapshot. --- .../sybilprotectionv1/performance/snapshot.go | 17 +++++++++++------ pkg/storage/prunable/epochstore/epoch_kv.go | 5 +---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go index d4b19e2a5..6e0ed9bf2 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go @@ -3,6 +3,7 @@ package performance import ( "io" + "github.com/iotaledger/hive.go/core/safemath" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/serializer/v2" @@ -47,12 +48,15 @@ func (t *Tracker) Export(writer io.WriteSeeker, targetSlotIndex iotago.SlotIndex timeProvider := t.apiProvider.APIForSlot(targetSlotIndex).TimeProvider() targetEpoch := timeProvider.EpochFromSlot(targetSlotIndex) - // if the target index is the last slot of the epoch, the epoch was committed - if timeProvider.EpochEnd(targetEpoch) != targetSlotIndex { - targetEpoch-- + // if the target index is the last slot of the epoch, the epoch was committed - unless it's epoch 0 to avoid underflow. + if timeProvider.EpochEnd(targetEpoch) != targetSlotIndex && targetEpoch > 0 { + targetEpoch = lo.PanicOnErr(safemath.SafeSub(targetEpoch, 1)) } - if err := t.exportPerformanceFactor(writer, timeProvider.EpochStart(targetEpoch+1), targetSlotIndex); err != nil { + // If targetEpoch==0 then export performance factors from slot 0 to the targetSlotIndex. + // PoolRewards and PoolStats are empty if epoch 0 was not committed yet, so it's not a problem. + // But PerformanceFactors are exported for the ongoing epoch, so for epoch 0 we must make an exception and not add 1 to the targetEpoch. + if err := t.exportPerformanceFactor(writer, timeProvider.EpochStart(targetEpoch+lo.Cond(targetEpoch == 0, iotago.EpochIndex(0), iotago.EpochIndex(1))), targetSlotIndex); err != nil { return ierrors.Wrap(err, "unable to export performance factor") } @@ -277,8 +281,9 @@ func (t *Tracker) exportPoolRewards(writer io.WriteSeeker, targetEpoch iotago.Ep if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { var epochCount int - - for epoch := targetEpoch; epoch > iotago.EpochIndex(lo.Max(0, int(targetEpoch)-daysInYear)); epoch-- { + // Here underflow will not happen because we will stop iterating for epoch 0, because 0 is not greater than zero. + // Use safemath here anyway to avoid hard to trace problems stemming from an accidental underflow. + for epoch := targetEpoch; epoch > iotago.EpochIndex(lo.Max(0, int(targetEpoch)-daysInYear)); epoch = lo.PanicOnErr(safemath.SafeSub(epoch, 1)) { rewardsMap, err := t.rewardsMap(epoch) if err != nil { return 0, ierrors.Wrapf(err, "unable to get rewards tree for epoch %d", epoch) diff --git a/pkg/storage/prunable/epochstore/epoch_kv.go b/pkg/storage/prunable/epochstore/epoch_kv.go index 0275afd26..5b94cbde9 100644 --- a/pkg/storage/prunable/epochstore/epoch_kv.go +++ b/pkg/storage/prunable/epochstore/epoch_kv.go @@ -1,8 +1,6 @@ package epochstore import ( - "fmt" - "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" @@ -110,12 +108,11 @@ func (e *EpochKVStore) Prune(epoch iotago.EpochIndex, defaultPruningDelay iotago func (e *EpochKVStore) RollbackEpochs(epoch iotago.EpochIndex) (lastPrunedEpoch iotago.EpochIndex, err error) { lastAccessedEpoch, err := e.LastAccessedEpoch() - fmt.Println("lastAccessedEpoch: ", lastAccessedEpoch) + if err != nil { return lastAccessedEpoch, ierrors.Wrap(err, "failed to get last accessed epoch") } - fmt.Println("epoch: ", epoch) for epochToPrune := epoch; epochToPrune <= lastAccessedEpoch; epochToPrune++ { if err = e.DeleteEpoch(epochToPrune); err != nil { return epochToPrune, ierrors.Wrapf(err, "error while deleting epoch %d", epochToPrune) From 8bb7715547b883e215b1b02ab9c504a63efa4d90 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Wed, 17 Apr 2024 10:09:10 +0200 Subject: [PATCH 09/16] Disable trace logging --- pkg/tests/loss_of_acceptance_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index f3eb950a8..35c3c2653 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/log" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/protocol" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" @@ -57,7 +56,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts.Run(true, nil) - node0.Protocol.SetLogLevel(log.LevelTrace) + //node0.Protocol.SetLogLevel(log.LevelTrace) // node1.Protocol.SetLogLevel(log.LevelTrace) // node2.Protocol.SetLogLevel(log.LevelFatal) From 1ff5de2875655a3756aa76b0c0e8ef6903118a30 Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:46:20 +0200 Subject: [PATCH 10/16] Refactor: cleaned up changes --- pkg/protocol/engines.go | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/pkg/protocol/engines.go b/pkg/protocol/engines.go index 7ec714ec9..87407d8b2 100644 --- a/pkg/protocol/engines.go +++ b/pkg/protocol/engines.go @@ -81,39 +81,29 @@ func (e *Engines) initLogging() (shutdown func()) { // ForkAtSlot creates a new engine instance that forks from the main engine at the given slot. func (e *Engines) ForkAtSlot(slot iotago.SlotIndex) (*engine.Engine, error) { - e.LogInfo("forking engine at slot", "slot", slot) - newEngineAlias := lo.PanicOnErr(uuid.NewUUID()).String() errorHandler := func(err error) { e.protocol.LogError("engine error", "err", err, "name", newEngineAlias[0:8]) } - e.LogInfo("forking engine at slot 2", "slot", slot) - // copy raw data on disk. newStorage, err := storage.Clone(e, e.Main.Get().Storage, e.directory.Path(newEngineAlias), DatabaseVersion, errorHandler, e.protocol.Options.StorageOptions...) if err != nil { return nil, ierrors.Wrapf(err, "failed to copy storage from active engine instance (%s) to new engine instance (%s)", e.Main.Get().Storage.Directory(), e.directory.Path(newEngineAlias)) } - e.LogInfo("forking engine at slot 3", "slot", slot) - // remove commitments that after forking point. latestCommitment := newStorage.Settings().LatestCommitment() if err = newStorage.Commitments().Rollback(slot, latestCommitment.Slot()); err != nil { return nil, ierrors.Wrap(err, "failed to rollback commitments") } - e.LogInfo("forking engine at slot 4", "slot", slot) - // some components are automatically rolled back by deleting their data on disk (e.g. slot based storage). // some other components need to be rolled back manually, like the UTXO ledger for example. // we need to create temporary components to rollback their permanent state, which will be reflected on disk. evictionState := eviction.NewState(newStorage.Settings(), newStorage.RootBlocks) evictionState.Initialize(latestCommitment.Slot()) - e.LogInfo("forking engine at slot 5", "slot", slot) - blockCache := blocks.New(evictionState, newStorage.Settings().APIProvider()) accountsManager := accountsledger.New(e.protocol.NewSubModule("ForkedAccountsLedger"), newStorage.Settings().APIProvider(), blockCache.Block, newStorage.AccountDiffs, newStorage.Accounts()) @@ -129,27 +119,19 @@ func (e *Engines) ForkAtSlot(slot iotago.SlotIndex) (*engine.Engine, error) { return nil, err } - e.LogInfo("forking engine at slot 6", "slot", slot) - targetCommitment, err := newStorage.Commitments().Load(slot) if err != nil { return nil, ierrors.Wrapf(err, "error while retrieving commitment for target index %d", slot) } - e.LogInfo("forking engine at slot 6.1", "slot", slot) - if err = newStorage.Settings().Rollback(targetCommitment); err != nil { return nil, err } - e.LogInfo("forking engine at slot 6.2", "slot", slot) if err = newStorage.Rollback(slot); err != nil { - e.LogError("failed to rollback storage", "err", err) return nil, err } - e.LogInfo("forking engine at slot 7", "slot", slot) - candidateEngine := e.loadEngineInstanceWithStorage(newEngineAlias, newStorage) // rollback attestations already on created engine instance, because this action modifies the in-memory storage. @@ -157,8 +139,6 @@ func (e *Engines) ForkAtSlot(slot iotago.SlotIndex) (*engine.Engine, error) { return nil, ierrors.Wrap(err, "error while rolling back attestations storage on candidate engine") } - e.LogInfo("forking engine at slot 8", "slot", slot) - return candidateEngine, nil } From 66370670ecdde16477541e11858e324fb06abcaa Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:48:14 +0200 Subject: [PATCH 11/16] Refactor: cleaned up more code --- pkg/protocol/engines.go | 2 -- pkg/storage/prunable/epochstore/epoch_kv.go | 1 - pkg/storage/prunable/prunable.go | 16 ---------------- 3 files changed, 19 deletions(-) diff --git a/pkg/protocol/engines.go b/pkg/protocol/engines.go index 87407d8b2..c3be37c89 100644 --- a/pkg/protocol/engines.go +++ b/pkg/protocol/engines.go @@ -274,9 +274,7 @@ func (e *Engines) injectEngineInstances() (shutdown func()) { } else { e.protocol.Network.OnShutdown(func() { newEngine.ShutdownEvent().Trigger() }) - e.LogInfo("injecting engine instance before", "chain", chain.LogName()) chain.Engine.Set(newEngine) - e.LogInfo("injecting engine instance after", "chain", chain.LogName()) } }) }) diff --git a/pkg/storage/prunable/epochstore/epoch_kv.go b/pkg/storage/prunable/epochstore/epoch_kv.go index 5b94cbde9..1c106b28a 100644 --- a/pkg/storage/prunable/epochstore/epoch_kv.go +++ b/pkg/storage/prunable/epochstore/epoch_kv.go @@ -108,7 +108,6 @@ func (e *EpochKVStore) Prune(epoch iotago.EpochIndex, defaultPruningDelay iotago func (e *EpochKVStore) RollbackEpochs(epoch iotago.EpochIndex) (lastPrunedEpoch iotago.EpochIndex, err error) { lastAccessedEpoch, err := e.LastAccessedEpoch() - if err != nil { return lastAccessedEpoch, ierrors.Wrap(err, "failed to get last accessed epoch") } diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index e186eedb6..e90f2b0b8 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -1,8 +1,6 @@ package prunable import ( - "fmt" - copydir "github.com/otiai10/copy" "github.com/iotaledger/hive.go/ierrors" @@ -173,47 +171,34 @@ func (p *Prunable) Flush() { } func (p *Prunable) Rollback(targetEpoch iotago.EpochIndex, startPruneRange iotago.SlotIndex, endPruneRange iotago.SlotIndex) error { - fmt.Println("Rollback", targetEpoch, startPruneRange, endPruneRange) - if err := p.prunableSlotStore.PruneSlots(targetEpoch, startPruneRange, endPruneRange); err != nil { return ierrors.Wrapf(err, "failed to prune slots in range [%d, %d] from target epoch %d", startPruneRange, endPruneRange, targetEpoch) } - fmt.Println("Rollback 2", targetEpoch, startPruneRange, endPruneRange) - if err := p.rollbackCommitteesCandidates(targetEpoch, startPruneRange); err != nil { return ierrors.Wrapf(err, "failed to rollback committee candidates to target epoch %d", targetEpoch) } - fmt.Println("Rollback 3", targetEpoch, startPruneRange, endPruneRange) lastPrunedCommitteeEpoch, err := p.rollbackCommitteeEpochs(targetEpoch+1, startPruneRange-1) if err != nil { return ierrors.Wrapf(err, "failed to rollback committee epochs to target epoch %d", targetEpoch) } - fmt.Println("Rollback 4", targetEpoch, startPruneRange, endPruneRange) - lastPrunedPoolStatsEpoch, _, err := p.poolStats.RollbackEpochs(targetEpoch) if err != nil { return ierrors.Wrapf(err, "failed to rollback pool stats epochs to target epoch %d", targetEpoch) } - fmt.Println("Rollback 5", targetEpoch, startPruneRange, endPruneRange) - lastPrunedDecidedUpgradeSignalsEpoch, _, err := p.decidedUpgradeSignals.RollbackEpochs(targetEpoch) if err != nil { return ierrors.Wrapf(err, "failed to rollback decided upgrade signals epochs to target epoch %d", targetEpoch) } - fmt.Println("Rollback 6", targetEpoch, startPruneRange, endPruneRange) - lastPrunedPoolRewardsEpoch, err := p.poolRewards.RollbackEpochs(targetEpoch) if err != nil { return ierrors.Wrapf(err, "failed to rollback pool rewards epochs to target epoch %d", targetEpoch) } - fmt.Println("Rollback 7 ", targetEpoch, startPruneRange, endPruneRange) - for epochToPrune := targetEpoch + 1; epochToPrune <= max( lastPrunedCommitteeEpoch, lastPrunedPoolStatsEpoch, @@ -222,7 +207,6 @@ func (p *Prunable) Rollback(targetEpoch iotago.EpochIndex, startPruneRange iotag ); epochToPrune++ { p.prunableSlotStore.DeleteBucket(epochToPrune) } - fmt.Println("Rollback 8", targetEpoch, startPruneRange, endPruneRange) return nil } From 7a8d3b13a3ea6d097f610bd177a4d5bdf5ebdefe Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:50:38 +0200 Subject: [PATCH 12/16] Refactor: reverted more changes --- pkg/tests/loss_of_acceptance_test.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 35c3c2653..bf18e0d0e 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -2,7 +2,6 @@ package tests import ( "fmt" - "net/http" _ "net/http" _ "net/http/pprof" "testing" @@ -20,12 +19,6 @@ import ( ) func TestLossOfAcceptanceFromGenesis(t *testing.T) { - // debug.SetEnabled(true) - - go func() { - fmt.Println(http.ListenAndServe("localhost:6061", nil)) - }() - ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( @@ -44,22 +37,17 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ), testsuite.WithWaitFor(15*time.Second), ) - // defer ts.Shutdown() + defer ts.Shutdown() node0 := ts.AddValidatorNode("node0") ts.AddDefaultWallet(node0) node1 := ts.AddValidatorNode("node1") - // node2 := ts.AddNode("node2") nodesP1 := []*mock.Node{node0} nodesP2 := []*mock.Node{node1} ts.Run(true, nil) - //node0.Protocol.SetLogLevel(log.LevelTrace) - // node1.Protocol.SetLogLevel(log.LevelTrace) - // node2.Protocol.SetLogLevel(log.LevelFatal) - // Create snapshot to use later. snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) require.NoError(t, node0.Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) From 0f2803143f6d8edcdf386846bd9cae5e6c19df89 Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:52:07 +0200 Subject: [PATCH 13/16] Refactor: more cleanup --- pkg/tests/loss_of_acceptance_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index bf18e0d0e..d8098de31 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -134,13 +134,8 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts.Wait() - fmt.Println(">>>>>>> Checking stuff...") - // ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("59.0"), true, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(59, ts.Nodes()...) - fmt.Println(">>>>>>> Latest commitment slot index:", 59) - // pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) ts.AssertEqualStoredCommitmentAtIndex(59, ts.Nodes()...) - fmt.Println(">>>>>>> Stored commitment at index 59 is equal for all nodes.") } } From 971c6cd561fe3a2670525a6b7f64eaaccc4d4809 Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:53:24 +0200 Subject: [PATCH 14/16] Refactor: removed unused imports --- pkg/tests/loss_of_acceptance_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index d8098de31..5244ba03e 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -2,8 +2,6 @@ package tests import ( "fmt" - _ "net/http" - _ "net/http/pprof" "testing" "time" From 872232b01f144b833aa9790670101702031a0adf Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:38:44 +0200 Subject: [PATCH 15/16] Fix: fixed deadlock in bucketmanager --- pkg/storage/prunable/bucket_manager.go | 2 +- pkg/storage/prunable/bucketed_kvstore.go | 131 +++++++++++++++++++++++ pkg/tests/loss_of_acceptance_test.go | 18 ++-- 3 files changed, 141 insertions(+), 10 deletions(-) create mode 100644 pkg/storage/prunable/bucketed_kvstore.go diff --git a/pkg/storage/prunable/bucket_manager.go b/pkg/storage/prunable/bucket_manager.go index c557e2903..22218c73d 100644 --- a/pkg/storage/prunable/bucket_manager.go +++ b/pkg/storage/prunable/bucket_manager.go @@ -67,7 +67,7 @@ func (b *BucketManager) Get(epoch iotago.EpochIndex, realm kvstore.Realm) (kvsto return nil, ierrors.WithMessagef(database.ErrEpochPruned, "epoch %d", epoch) } - kv := b.getDBInstance(epoch).KVStore() + kv := newBucketedKVStore(b, b.getDBInstance(epoch).KVStore()) return lo.PanicOnErr(kv.WithExtendedRealm(realm)), nil } diff --git a/pkg/storage/prunable/bucketed_kvstore.go b/pkg/storage/prunable/bucketed_kvstore.go new file mode 100644 index 000000000..3d5d7d1f1 --- /dev/null +++ b/pkg/storage/prunable/bucketed_kvstore.go @@ -0,0 +1,131 @@ +package prunable + +import "github.com/iotaledger/hive.go/kvstore" + +type bucketedKVStore struct { + bucketManager *BucketManager + store kvstore.KVStore +} + +func newBucketedKVStore(bucketManager *BucketManager, store kvstore.KVStore) *bucketedKVStore { + return &bucketedKVStore{ + bucketManager: bucketManager, + store: store, + } +} + +func (b *bucketedKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + s, err := b.store.WithRealm(realm) + if err != nil { + return nil, err + } + + return newBucketedKVStore(b.bucketManager, s), nil +} + +func (b *bucketedKVStore) WithExtendedRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + s, err := b.store.WithExtendedRealm(realm) + if err != nil { + return nil, err + } + + return newBucketedKVStore(b.bucketManager, s), nil +} + +func (b *bucketedKVStore) Realm() kvstore.Realm { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Realm() +} + +func (b *bucketedKVStore) Iterate(prefix kvstore.KeyPrefix, kvConsumerFunc kvstore.IteratorKeyValueConsumerFunc, direction ...kvstore.IterDirection) error { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Iterate(prefix, kvConsumerFunc, direction...) +} + +func (b *bucketedKVStore) IterateKeys(prefix kvstore.KeyPrefix, consumerFunc kvstore.IteratorKeyConsumerFunc, direction ...kvstore.IterDirection) error { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.IterateKeys(prefix, consumerFunc, direction...) +} + +func (b *bucketedKVStore) Clear() error { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Clear() +} + +func (b *bucketedKVStore) Get(key kvstore.Key) (value kvstore.Value, err error) { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Get(key) +} + +func (b *bucketedKVStore) Set(key kvstore.Key, value kvstore.Value) error { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Set(key, value) +} + +func (b *bucketedKVStore) Has(key kvstore.Key) (bool, error) { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Has(key) +} + +func (b *bucketedKVStore) Delete(key kvstore.Key) error { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Delete(key) +} + +func (b *bucketedKVStore) DeletePrefix(prefix kvstore.KeyPrefix) error { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.DeletePrefix(prefix) +} + +func (b *bucketedKVStore) Flush() error { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Flush() +} + +func (b *bucketedKVStore) Close() error { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Close() +} + +func (b *bucketedKVStore) Batched() (kvstore.BatchedMutations, error) { + b.rLockBucketManager() + defer b.rUnlockBucketManager() + + return b.store.Batched() +} + +func (b *bucketedKVStore) rLockBucketManager() { + b.bucketManager.mutex.RLock() +} + +func (b *bucketedKVStore) rUnlockBucketManager() { + b.bucketManager.mutex.RUnlock() +} diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 5244ba03e..6bbadf2e7 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -113,15 +113,15 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts.AssertSybilProtectionOnlineCommittee(seatIndexes[1:2], nodesP2...) // Start node3 from genesis snapshot. - // node3 := ts.AddNode("node3") - // { - // node3.Initialize(true, - // protocol.WithSnapshotPath(snapshotPath), - // protocol.WithBaseDirectory(ts.Directory.PathWithCreate(node3.Name)), - // ) - // node3.Protocol.SetLogLevel(log.LevelTrace) - // ts.Wait() - // } + node3 := ts.AddNode("node3") + { + node3.Initialize(true, + protocol.WithSnapshotPath(snapshotPath), + protocol.WithBaseDirectory(ts.Directory.PathWithCreate(node3.Name)), + ) + + ts.Wait() + } ts.MergePartitionsToMain() fmt.Println("\n=========================\nMerged network partitions\n=========================") From 575416a2e1fbcaf881ed65c3e3775dff6af75066 Mon Sep 17 00:00:00 2001 From: Hans Moog <3293976+hmoog@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:42:02 +0200 Subject: [PATCH 16/16] Feat: re-add previous test --- pkg/tests/loss_of_acceptance_test.go | 87 ++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 6bbadf2e7..5d9ba530c 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -17,6 +17,93 @@ import ( ) func TestLossOfAcceptanceFromGenesis(t *testing.T) { + ts := testsuite.NewTestSuite(t, + testsuite.WithWaitFor(15*time.Second), + testsuite.WithProtocolParametersOptions( + iotago.WithTimeProviderOptions( + 0, + testsuite.GenesisTimeWithOffsetBySlots(100, testsuite.DefaultSlotDurationInSeconds), + testsuite.DefaultSlotDurationInSeconds, + 3, + ), + iotago.WithLivenessOptions( + 10, + 10, + 2, + 4, + 5, + ), + ), + testsuite.WithWaitFor(15*time.Second), + ) + defer ts.Shutdown() + + node0 := ts.AddValidatorNode("node0") + ts.AddDefaultWallet(node0) + ts.AddValidatorNode("node1") + ts.AddNode("node2") + + ts.Run(true, nil) + + // Create snapshot to use later. + snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) + require.NoError(t, node0.Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) + + // Revive chain on node0. + { + ts.SetCurrentSlot(50) + block0 := lo.PanicOnErr(ts.IssueValidationBlockWithHeaderOptions("block0", node0)) + require.EqualValues(t, 48, ts.Block("block0").SlotCommitmentID().Slot()) + // Reviving the chain should select one parent from the last committed slot. + require.Len(t, block0.Parents(), 1) + require.Equal(t, block0.Parents()[0].Alias(), "Genesis") + ts.AssertBlocksExist(ts.Blocks("block0"), true, ts.ClientsForNodes(node0)...) + } + + // Need to issue to slot 52 so that all other nodes can warp sync up to slot 49 and then commit slot 50 themselves. + { + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{51, 52}, 2, "block0", mock.Nodes(node0), true, false) + + ts.AssertLatestCommitmentSlotIndex(50, ts.Nodes()...) + ts.AssertEqualStoredCommitmentAtIndex(50, ts.Nodes()...) + ts.AssertBlocksExist(ts.Blocks("block0"), true, ts.ClientsForNodes()...) + } + + // Continue issuing on all nodes for a few slots. + { + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", ts.Nodes(), true, false) + + ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("57.0"), true, ts.Nodes()...) + ts.AssertLatestCommitmentSlotIndex(55, ts.Nodes()...) + ts.AssertEqualStoredCommitmentAtIndex(55, ts.Nodes()...) + } + + // Start node3 from genesis snapshot. + { + node3 := ts.AddNode("node3") + node3.Initialize(true, + protocol.WithSnapshotPath(snapshotPath), + protocol.WithBaseDirectory(ts.Directory.PathWithCreate(node3.Name)), + ) + ts.Wait() + } + + // Continue issuing on all nodes for a few slots. + { + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{58, 59}, 3, "57.2", ts.Nodes("node0", "node1", "node2"), true, false) + + ts.AssertBlocksInCacheAccepted(ts.BlocksWithPrefix("59.0"), true, ts.Nodes()...) + ts.AssertLatestCommitmentSlotIndex(57, ts.Nodes()...) + ts.AssertEqualStoredCommitmentAtIndex(57, ts.Nodes()...) + } + + // Check that commitments from 1-49 are empty. + for slot := iotago.SlotIndex(1); slot <= 49; slot++ { + ts.AssertStorageCommitmentBlocks(slot, nil, ts.Nodes()...) + } +} + +func TestEngineSwitchingUponStartupWithLossOfAcceptance(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions(