diff --git a/pkg/tests/booker_test.go b/pkg/tests/booker_test.go index b1b35d572..8d3b95579 100644 --- a/pkg/tests/booker_test.go +++ b/pkg/tests/booker_test.go @@ -402,7 +402,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { // Advance both nodes at the edge of slot 1 committability { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{2, 3, 4}, 1, "block2.4", ts.Nodes("node1", "node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{2, 3, 4}, 1, "block2.4", ts.Nodes("node1", "node2"), false, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -420,7 +420,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { ts.Block("block2.tx1"): {"tx1"}, }, node1, node2) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, false) ts.AssertBlocksExist(ts.BlocksWithPrefix("5.0"), true, ts.Nodes()...) } @@ -436,7 +436,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { ts.SplitIntoPartitions(partitions) // Only node2 will commit after issuing this one - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, false) ts.AssertNodeState(ts.Nodes("node1"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -527,7 +527,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { // Sync up the nodes to he same point and check consistency between them. { // Let node1 catch up with commitment 1 - ts.IssueBlocksAtSlots("5.1", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, nil) + ts.IssueBlocksAtSlots("5.1", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, false) ts.AssertNodeState(ts.Nodes("node1", "node2"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -573,7 +573,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { { ts.AssertTransactionsExist(wallet.Transactions("tx1", "tx2", "tx4"), true, node1, node2) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{6, 7, 8, 9, 10}, 5, "5.1", ts.Nodes("node1", "node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{6, 7, 8, 9, 10}, 5, "5.1", ts.Nodes("node1", "node2"), false, false) ts.AssertNodeState(ts.Nodes("node1", "node2"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -656,7 +656,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { // Advance both nodes at the edge of slot 1 committability { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{2, 3, 4}, 1, "Genesis", ts.Nodes("node1", "node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{2, 3, 4}, 1, "Genesis", ts.Nodes("node1", "node2"), false, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -667,7 +667,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { ts.IssueValidationBlockAtSlot("", 5, genesisCommitment, node1, ts.BlockIDsWithPrefix("4.0")...) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, false) ts.AssertBlocksExist(ts.BlocksWithPrefix("5.0"), true, ts.Nodes()...) } @@ -682,7 +682,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { ts.SplitIntoPartitions(partitions) // Only node2 will commit after issuing this one - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, false) ts.AssertNodeState(ts.Nodes("node1"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -724,7 +724,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { // Sync up the nodes to he same point and check consistency between them. { // Let node1 catch up with commitment 1 - ts.IssueBlocksAtSlots("5.1", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, nil) + ts.IssueBlocksAtSlots("5.1", []iotago.SlotIndex{5}, 1, "5.0", ts.Nodes("node2"), false, false) ts.AssertNodeState(ts.Nodes("node1", "node2"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), @@ -756,7 +756,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { ts.AssertTransactionsExist(wallet.Transactions("tx1", "tx2"), true, node1, node2) ts.AssertTransactionsInCachePending(wallet.Transactions("tx1", "tx2"), true, node1, node2) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{6, 7, 8, 9, 10}, 5, "5.1", ts.Nodes("node1", "node2"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{6, 7, 8, 9, 10}, 5, "5.1", ts.Nodes("node1", "node2"), false, false) ts.AssertNodeState(ts.Nodes("node1", "node2"), testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), diff --git a/pkg/tests/committee_rotation_test.go b/pkg/tests/committee_rotation_test.go index a7415bc24..cfc06133d 100644 --- a/pkg/tests/committee_rotation_test.go +++ b/pkg/tests/committee_rotation_test.go @@ -78,12 +78,12 @@ func Test_TopStakersRotation(t *testing.T) { // Select committee for epoch 1 and test candidacy announcements at different times. { - ts.IssueBlocksAtSlots("wave-1:", []iotago.SlotIndex{1, 2, 3, 4}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-1:", []iotago.SlotIndex{1, 2, 3, 4}, 4, "Genesis", ts.Nodes(), true, false) ts.IssueCandidacyAnnouncementInSlot("node1-candidacy:1", 4, "wave-1:4.3", ts.Wallet("node1")) ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:1", 5, "node1-candidacy:1", ts.Wallet("node4")) - ts.IssueBlocksAtSlots("wave-2:", []iotago.SlotIndex{5, 6, 7, 8, 9}, 4, "node4-candidacy:1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-2:", []iotago.SlotIndex{5, 6, 7, 8, 9}, 4, "node4-candidacy:1", ts.Nodes(), true, false) ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:2", 9, "wave-2:9.3", ts.Wallet("node4")) ts.IssueCandidacyAnnouncementInSlot("node5-candidacy:1", 9, "node4-candidacy:2", ts.Wallet("node5")) @@ -91,7 +91,7 @@ func Test_TopStakersRotation(t *testing.T) { // This candidacy should be considered as it's announced at the last possible slot. ts.IssueCandidacyAnnouncementInSlot("node6-candidacy:1", 10, "node5-candidacy:1", ts.Wallet("node6")) - ts.IssueBlocksAtSlots("wave-3:", []iotago.SlotIndex{10}, 4, "node6-candidacy:1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-3:", []iotago.SlotIndex{10}, 4, "node6-candidacy:1", ts.Nodes(), true, false) // Those candidacies should not be considered as they're issued after EpochNearingThreshold (slot 10). ts.IssueCandidacyAnnouncementInSlot("node2-candidacy:1", 11, "wave-3:10.3", ts.Wallet("node2")) @@ -107,7 +107,7 @@ func Test_TopStakersRotation(t *testing.T) { ts.Node("node6").Validator.AccountID, }, ts.Nodes()...) - ts.IssueBlocksAtSlots("wave-4:", []iotago.SlotIndex{11, 12, 13, 14, 15, 16, 17}, 4, "node5-candidacy:2", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-4:", []iotago.SlotIndex{11, 12, 13, 14, 15, 16, 17}, 4, "node5-candidacy:2", ts.Nodes(), true, false) ts.AssertLatestFinalizedSlot(14, ts.Nodes()...) ts.AssertSybilProtectionCommittee(1, []iotago.AccountID{ @@ -119,7 +119,7 @@ func Test_TopStakersRotation(t *testing.T) { // Do not announce new candidacies for epoch 2 but finalize slots. The committee should be the reused. { - ts.IssueBlocksAtSlots("wave-5:", []iotago.SlotIndex{18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, 4, "wave-4:17.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-5:", []iotago.SlotIndex{18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, 4, "wave-4:17.3", ts.Nodes(), true, false) ts.AssertSybilProtectionCandidates(1, []iotago.AccountID{}, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(28, ts.Nodes()...) @@ -134,13 +134,13 @@ func Test_TopStakersRotation(t *testing.T) { // Do not finalize slots in time for epoch 3. The committee should be the reused. Even though there are candidates. { // Issue blocks to remove the inactive committee members. - ts.IssueBlocksAtSlots("wave-6:", []iotago.SlotIndex{31, 32}, 4, "wave-5:30.3", ts.Nodes("node5", "node7"), false, nil) + ts.IssueBlocksAtSlots("wave-6:", []iotago.SlotIndex{31, 32}, 4, "wave-5:30.3", ts.Nodes("node5", "node7"), false, false) ts.AssertLatestCommitmentSlotIndex(30, ts.Nodes()...) ts.IssueCandidacyAnnouncementInSlot("node6-candidacy:2", 33, "wave-6:32.3", ts.Wallet("node6")) // Issue the rest of the epoch just before we reach epoch end - maxCommittableAge. - ts.IssueBlocksAtSlots("wave-7:", []iotago.SlotIndex{33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45}, 4, "node6-candidacy:2", ts.Nodes("node5"), true, nil) + ts.IssueBlocksAtSlots("wave-7:", []iotago.SlotIndex{33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45}, 4, "node6-candidacy:2", ts.Nodes("node5"), true, false) ts.AssertLatestCommitmentSlotIndex(43, ts.Nodes()...) // Even though we have a candidate, the committee should be reused as we did not finalize at epochNearingThreshold before epoch end - maxCommittableAge was committed @@ -157,11 +157,11 @@ func Test_TopStakersRotation(t *testing.T) { // Rotate committee to smaller committee due to too few candidates available. { - ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{46, 47, 48, 49, 50, 51, 52, 53, 54, 55}, 4, "wave-7:45.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{46, 47, 48, 49, 50, 51, 52, 53, 54, 55}, 4, "wave-7:45.3", ts.Nodes(), true, false) ts.IssueCandidacyAnnouncementInSlot("node3-candidacy:2", 56, "wave-8:55.3", ts.Wallet("node3")) - ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{56, 57, 58, 59, 60, 61}, 4, "node3-candidacy:2", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{56, 57, 58, 59, 60, 61}, 4, "node3-candidacy:2", ts.Nodes(), true, false) ts.AssertLatestCommitmentSlotIndex(59, ts.Nodes()...) ts.AssertLatestFinalizedSlot(58, ts.Nodes()...) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 64c4a58c6..2f2c1ea7a 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -59,7 +59,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { // Need to issue to slot 52 so that all other nodes can warp sync up to slot 49 and then commit slot 50 themselves. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{51, 52}, 2, "block0", ts.Nodes("node0"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{51, 52}, 2, "block0", ts.Nodes("node0"), true, false) ts.AssertEqualStoredCommitmentAtIndex(50, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(50, ts.Nodes()...) @@ -68,7 +68,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { // Continue issuing on all nodes for a few slots. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{53, 54, 55, 56, 57}, 3, "52.1", ts.Nodes(), true, false) ts.AssertEqualStoredCommitmentAtIndex(55, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(55, ts.Nodes()...) @@ -87,7 +87,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { // Continue issuing on all nodes for a few slots. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{58, 59}, 3, "57.2", ts.Nodes("node0", "node1", "node2"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{58, 59}, 3, "57.2", ts.Nodes("node0", "node1", "node2"), true, false) ts.AssertEqualStoredCommitmentAtIndex(57, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(57, ts.Nodes()...) @@ -129,7 +129,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { // Issue up to slot 10, committing slot 8. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3, "Genesis", ts.Nodes(), true, false) ts.AssertEqualStoredCommitmentAtIndex(8, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(8, ts.Nodes()...) @@ -170,7 +170,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { // Need to issue to slot 22 so that all other nodes can warp sync up to slot 19 and then commit slot 20 themselves. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{21, 22}, 2, "block0", ts.Nodes("node0-restarted"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{21, 22}, 2, "block0", ts.Nodes("node0-restarted"), true, false) ts.AssertEqualStoredCommitmentAtIndex(20, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(20, ts.Nodes()...) @@ -182,7 +182,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { // are not used again. ts.SetAutomaticTransactionIssuingCounters(node2.Partition, 24) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{23, 24, 25}, 3, "22.1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{23, 24, 25}, 3, "22.1", ts.Nodes(), true, false) ts.AssertEqualStoredCommitmentAtIndex(23, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(23, ts.Nodes()...) @@ -224,7 +224,7 @@ func TestLossOfAcceptanceWithRestartFromDisk(t *testing.T) { // Issue up to slot 10, committing slot 8. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3, "Genesis", ts.Nodes(), true, false) ts.AssertEqualStoredCommitmentAtIndex(8, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(8, ts.Nodes()...) @@ -261,7 +261,7 @@ func TestLossOfAcceptanceWithRestartFromDisk(t *testing.T) { // Need to issue to slot 22 so that all other nodes can warp sync up to slot 19 and then commit slot 20 themselves. { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{21, 22}, 2, "block0", ts.Nodes("node0-restarted"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{21, 22}, 2, "block0", ts.Nodes("node0-restarted"), true, false) ts.AssertEqualStoredCommitmentAtIndex(20, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(20, ts.Nodes()...) @@ -273,7 +273,7 @@ func TestLossOfAcceptanceWithRestartFromDisk(t *testing.T) { // are not used again. ts.SetAutomaticTransactionIssuingCounters(node2.Partition, 24) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{23, 24, 25}, 3, "22.1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{23, 24, 25}, 3, "22.1", ts.Nodes(), true, false) ts.AssertEqualStoredCommitmentAtIndex(23, ts.Nodes()...) ts.AssertLatestCommitmentSlotIndex(23, ts.Nodes()...) diff --git a/pkg/tests/protocol_engine_rollback_test.go b/pkg/tests/protocol_engine_rollback_test.go index 0de8dc8f6..60a491111 100644 --- a/pkg/tests/protocol_engine_rollback_test.go +++ b/pkg/tests/protocol_engine_rollback_test.go @@ -137,7 +137,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { // Issue up to slot 11 - just before committee selection for the next epoch. // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -163,7 +163,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { } { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(13), @@ -330,7 +330,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { // Issue up to slot 11 - just before committee selection for the next epoch. // Committee will be reused when slot 10 is finalized or slot 12 is committed, whichever happens first. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -363,7 +363,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { } { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -530,7 +530,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { // Issue up to slot 11 - just before committee selection for the next epoch. // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -563,7 +563,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { } { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16, 17, 18, 19}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16, 17, 18, 19}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -730,7 +730,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T // Issue up to slot 11 - just before committee selection for the next epoch. // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), @@ -763,7 +763,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T } { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(8), diff --git a/pkg/tests/protocol_engine_switching_test.go b/pkg/tests/protocol_engine_switching_test.go index 8000d4bca..abc292d0d 100644 --- a/pkg/tests/protocol_engine_switching_test.go +++ b/pkg/tests/protocol_engine_switching_test.go @@ -165,7 +165,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { // Issue up to slot 13 in P0 (main partition with all nodes) and verify that the nodes have the expected states. { - ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 4, "Genesis", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 4, "Genesis", ts.Nodes(), true, false) ts.AssertNodeState(ts.Nodes(), testsuite.WithLatestFinalizedSlot(10), @@ -220,7 +220,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { // Issue blocks in partition 1. { - ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{14, 15, 16, 17, 18, 19, 20}, 4, "P0:13.3", nodesP1[:len(nodesP1)-1], true, nil) + ts.IssueBlocksAtSlots("P1:", []iotago.SlotIndex{14, 15, 16, 17, 18, 19, 20}, 4, "P0:13.3", nodesP1[:len(nodesP1)-1], true, false) ts.AssertNodeState(nodesP1, testsuite.WithLatestFinalizedSlot(17), @@ -274,7 +274,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { // Issue blocks in partition 2. { - ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{14, 15, 16, 17, 18, 19, 20}, 4, "P0:13.3", nodesP2[:len(nodesP2)-1], true, nil) + ts.IssueBlocksAtSlots("P2:", []iotago.SlotIndex{14, 15, 16, 17, 18, 19, 20}, 4, "P0:13.3", nodesP2[:len(nodesP2)-1], true, false) ts.AssertNodeState(nodesP2, testsuite.WithLatestFinalizedSlot(10), diff --git a/pkg/tests/protocol_startup_test.go b/pkg/tests/protocol_startup_test.go index e4124af5b..0fb69f7af 100644 --- a/pkg/tests/protocol_startup_test.go +++ b/pkg/tests/protocol_startup_test.go @@ -241,7 +241,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { // Epoch 1: skip slot 10 and issue 6 rows per slot { - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{8, 9, 11, 12, 13}, 6, "7.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{8, 9, 11, 12, 13}, 6, "7.3", ts.Nodes(), true, false) ts.AssertBlocksExist(ts.BlocksWithPrefixes("8", "9", "11", "12", "13"), true, ts.Nodes()...) @@ -374,7 +374,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { } // Only issue on nodes that have the latest state in memory. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{14, 15}, 6, "13.5", ts.Nodes("nodeA", "nodeB"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{14, 15}, 6, "13.5", ts.Nodes("nodeA", "nodeB"), true, false) for _, slot := range []iotago.SlotIndex{12, 13} { aliases := lo.Map([]string{"nodeA", "nodeB"}, func(s string) string { diff --git a/pkg/tests/upgrade_signaling_test.go b/pkg/tests/upgrade_signaling_test.go index 76b63980f..ac381d87b 100644 --- a/pkg/tests/upgrade_signaling_test.go +++ b/pkg/tests/upgrade_signaling_test.go @@ -234,16 +234,16 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.IssueBlocksAtEpoch("", 3, 4, "23.3", ts.Nodes(), true, nil) // Epoch 5: revoke vote of nodeA in last slot of epoch. - ts.IssueBlocksAtSlots("", ts.SlotsForEpoch(4)[:ts.API.TimeProvider().EpochDurationSlots()-1], 4, "31.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", ts.SlotsForEpoch(4)[:ts.API.TimeProvider().EpochDurationSlots()-1], 4, "31.3", ts.Nodes(), true, false) ts.Node("nodeA").SetProtocolParametersHash(iotago.Identifier{}) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{39}, 4, "38.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{39}, 4, "38.3", ts.Nodes(), true, false) ts.Node("nodeA").SetProtocolParametersHash(hash1) // Epoch 6: issue half before restarting and half after restarting. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{40, 41, 42, 43}, 4, "39.3", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{40, 41, 42, 43}, 4, "39.3", ts.Nodes(), true, false) { var expectedRootBlocks []*blocks.Block @@ -293,11 +293,11 @@ func Test_Upgrade_Signaling(t *testing.T) { } // Can only continue to issue on nodeA, nodeB, nodeC, nodeD, nodeF. nodeE and nodeG were just restarted and don't have the latest unaccepted state. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{44}, 4, "43.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{44}, 4, "43.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF"), true, false) // TODO: would be great to dynamically add accounts for later nodes. // Can't issue on nodeG as its account is not known. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{45, 46, 47}, 4, "44.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{45, 46, 47}, 4, "44.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, false) ts.IssueBlocksAtEpoch("", 6, 4, "47.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, nil) ts.IssueBlocksAtEpoch("", 7, 4, "55.3", ts.Nodes("nodeA", "nodeB", "nodeC", "nodeD", "nodeF", "nodeE1"), true, nil) @@ -411,8 +411,8 @@ func Test_Upgrade_Signaling(t *testing.T) { // Check that issuing still produces the same commitments on the nodes that upgraded. The nodes that did not upgrade // should not be able to issue and process blocks with the new version. - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{64, 65}, 4, "63.3", ts.Nodes("nodeB", "nodeC"), false, nil) - ts.IssueBlocksAtSlots("", []iotago.SlotIndex{66, 67, 68, 69, 70, 71}, 4, "65.3", ts.Nodes("nodeB", "nodeC"), true, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{64, 65}, 4, "63.3", ts.Nodes("nodeB", "nodeC"), false, false) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{66, 67, 68, 69, 70, 71}, 4, "65.3", ts.Nodes("nodeB", "nodeC"), true, false) // Nodes that did not set up the new protocol parameters are not able to process blocks with the new version. ts.AssertNodeState(ts.Nodes("nodeA", "nodeD", "nodeF", "nodeG"), diff --git a/pkg/testsuite/attestations.go b/pkg/testsuite/attestations.go index a9d258a48..4b4670252 100644 --- a/pkg/testsuite/attestations.go +++ b/pkg/testsuite/attestations.go @@ -40,14 +40,14 @@ func (t *TestSuite) AssertAttestationsForSlot(slot iotago.SlotIndex, blocks []*b return ierrors.Wrapf(err, "AssertAttestationsForSlot: %s: error iterating over attestation tree", node.Name) } - if len(expectedAttestations) != len(storedAttestations) { - return ierrors.Errorf("AssertAttestationsForSlot: %s: expected %d attestation(s), got %d", node.Name, len(expectedAttestations), len(storedAttestations)) - } - if !assert.ElementsMatch(t.fakeTesting, expectedAttestations, storedAttestations) { return ierrors.Errorf("AssertAttestationsForSlot: %s: expected attestation(s) %s, got %s", node.Name, expectedAttestations, storedAttestations) } + if len(expectedAttestations) != len(storedAttestations) { + return ierrors.Errorf("AssertAttestationsForSlot: %s: expected %d attestation(s), got %d", node.Name, len(expectedAttestations), len(storedAttestations)) + } + return nil }) } diff --git a/pkg/testsuite/testsuite_issue_blocks.go b/pkg/testsuite/testsuite_issue_blocks.go index 14210a24c..da7bcd2d7 100644 --- a/pkg/testsuite/testsuite_issue_blocks.go +++ b/pkg/testsuite/testsuite_issue_blocks.go @@ -238,10 +238,12 @@ func (t *TestSuite) IssueBlockRowsInSlot(prefix string, slot iotago.SlotIndex, r return blocksIssued, lastBlockRowIssued } -func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, rowsPerSlot int, initialParentsPrefix string, nodes []*mock.Node, waitForSlotsCommitted bool, issuingOptions map[string][]options.Option[mock.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) { +func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, rowsPerSlot int, initialParentsPrefix string, nodes []*mock.Node, waitForSlotsCommitted bool, useCommitmentAtMinCommittableAge bool) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) { var blocksIssued, lastBlockRowIssued []*blocks.Block parentsPrefix := initialParentsPrefix + issuingOptions := make(map[string][]options.Option[mock.BlockHeaderParams]) + for i, slot := range slots { if i > 0 { parentsPrefix = fmt.Sprintf("%s%d.%d", prefix, slots[i-1], rowsPerSlot-1) @@ -253,7 +255,20 @@ func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, if waitForSlotsCommitted { if slot > t.API.ProtocolParameters().MinCommittableAge() { - t.AssertCommitmentSlotIndexExists(slot-(t.API.ProtocolParameters().MinCommittableAge()), nodes...) + commitmentSlot := slot - t.API.ProtocolParameters().MinCommittableAge() + t.AssertCommitmentSlotIndexExists(commitmentSlot, nodes...) + + if useCommitmentAtMinCommittableAge { + // Make sure that all nodes create blocks throughout the slot that commit to the same commitment at slot-minCommittableAge-1. + for _, node := range nodes { + commitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(commitmentSlot) + require.NoError(t.Testing, err) + + issuingOptions[node.Name] = []options.Option[mock.BlockHeaderParams]{ + mock.WithSlotCommitment(commitment.Commitment()), + } + } + } } else { t.AssertBlocksExist(blocksInSlot, true, nodes...) } @@ -263,8 +278,8 @@ func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, return blocksIssued, lastBlockRowIssued } -func (t *TestSuite) IssueBlocksAtEpoch(prefix string, epoch iotago.EpochIndex, rowsPerSlot int, initialParentsPrefix string, nodes []*mock.Node, waitForSlotsCommitted bool, issuingOptions map[string][]options.Option[mock.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) { - return t.IssueBlocksAtSlots(prefix, t.SlotsForEpoch(epoch), rowsPerSlot, initialParentsPrefix, nodes, waitForSlotsCommitted, issuingOptions) +func (t *TestSuite) IssueBlocksAtEpoch(prefix string, epoch iotago.EpochIndex, rowsPerSlot int, initialParentsPrefix string, nodes []*mock.Node, waitForSlotsCommitted bool, useCommitmentAtMinCommittableAge bool) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) { + return t.IssueBlocksAtSlots(prefix, t.SlotsForEpoch(epoch), rowsPerSlot, initialParentsPrefix, nodes, waitForSlotsCommitted, false) } func (t *TestSuite) SlotsForEpoch(epoch iotago.EpochIndex) []iotago.SlotIndex {