diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go index 64ac7fb08..99cabf7f8 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go @@ -128,18 +128,19 @@ func (o *SybilProtection) TrackBlock(block *blocks.Block) { return } - blockEpoch := o.apiProvider.APIForSlot(block.ID().Slot()).TimeProvider().EpochFromSlot(block.ID().Slot()) + blockSlot := block.ID().Slot() + blockAPI := o.apiProvider.APIForSlot(blockSlot) + blockEpoch := blockAPI.TimeProvider().EpochFromSlot(blockSlot) - // if the block is issued before the stake end epoch, then it's not a valid validator or candidate block - if accountData.StakeEndEpoch() < blockEpoch { + // if the block is issued in or after the stake end epoch, + // then don't consider it because the validator can't be part of the committee in the next epoch. + if blockEpoch >= accountData.StakeEndEpoch() { return } - // if a candidate block is issued in the stake end epoch, - // or if block is issued after EpochEndSlot - EpochNearingThreshold, because candidates can register only until that point. - // then don't consider it because the validator can't be part of the committee in the next epoch - if accountData.StakeEndEpoch() == blockEpoch || - block.ID().Slot()+o.apiProvider.APIForSlot(block.ID().Slot()).ProtocolParameters().EpochNearingThreshold() > o.apiProvider.APIForSlot(block.ID().Slot()).TimeProvider().EpochEnd(blockEpoch) { + // if block is issued after EpochEndSlot - EpochNearingThreshold, because candidates can register only until that point. + // then also don't consider it because the validator can't be part of the committee in the next epoch. + if blockSlot+blockAPI.ProtocolParameters().EpochNearingThreshold() > blockAPI.TimeProvider().EpochEnd(blockEpoch) { return } diff --git a/pkg/tests/combined_account_transition_test.go b/pkg/tests/combined_account_transition_test.go index ef85d873c..a30f4057c 100644 --- a/pkg/tests/combined_account_transition_test.go +++ b/pkg/tests/combined_account_transition_test.go @@ -48,32 +48,32 @@ func Test_AccountStateTransition(t *testing.T) { ts.AssertTransactionsInCacheAccepted(wallet.Transactions("TX1"), true, node1, node2) } - // create the account1 from TX1:0 with wallet "first" + // create the account-1 from TX1:0 with wallet "account-1" // generated (block1, TX2) - ts.AddWallet("first", node1, iotago.EmptyAccountID) + ts.AddWallet("account-1", node1, iotago.EmptyAccountID) createFullAccount(ts) - // create the account2, from implicit to full account from TX1:1 with wallet "second" + // create the account-2, from implicit to full account from TX1:1 with wallet "account-2" // generated (block2, TX3), (block3, TX4) - ts.AddWallet("second", node1, iotago.EmptyAccountID) + ts.AddWallet("account-2", node1, iotago.EmptyAccountID) account2ID := createImplicitToFullAccount(ts) - // send funds to account2, with TX1:2 + // send funds to account-2, with TX1:2 // generated (block4, TX5) sendFunds(ts) - // allot 1000 mana to account2 with TX1:3 + // allot 1000 mana to account-2 with TX1:3 // generated (block5, TX6) allotManaTo(ts, account2ID) - // create native token from "TX5:0" and account2 (TX4:0) + // create native token from "TX5:0" and account-2 (TX4:0) // generated (block6, TX7) createNativetoken(ts) } func createFullAccount(ts *testsuite.TestSuite) iotago.AccountID { node1 := ts.Node("node1") - newUserWallet := ts.Wallet("first") + newUserWallet := ts.Wallet("account-1") // CREATE NEW ACCOUNT WITH BLOCK ISSUER FROM BASIC UTXO newAccountBlockIssuerKey := tpkg.RandBlockIssuerKey() @@ -125,7 +125,7 @@ func createFullAccount(ts *testsuite.TestSuite) iotago.AccountID { func createImplicitToFullAccount(ts *testsuite.TestSuite) iotago.AccountID { node1 := ts.Node("node1") - newUserWallet := ts.Wallet("second") + newUserWallet := ts.Wallet("account-2") // CREATE IMPLICIT ACCOUNT FROM GENESIS BASIC UTXO, SENT TO A NEW USER WALLET. // a default wallet, already registered in the ledger, will issue the transaction and block. @@ -198,7 +198,7 @@ func sendFunds(ts *testsuite.TestSuite) { node1 := ts.Node("node1") node2 := ts.Node("node2") wallet := ts.DefaultWallet() - secondWallet := ts.Wallet("second") + secondWallet := ts.Wallet("account-2") // send funds from defaultWallet to secondWallet tx := wallet.SendFundsToWallet("TX5", secondWallet, "TX1:2") @@ -246,7 +246,7 @@ func allotManaTo(ts *testsuite.TestSuite, to iotago.AccountID) { // createNativetoken creates a native token from the given input and account. func createNativetoken(ts *testsuite.TestSuite) { - wallet := ts.Wallet("second") + wallet := ts.Wallet("account-2") node1 := ts.Node("node1") node2 := ts.Node("node2") diff --git a/pkg/testsuite/mock/blockissuer.go b/pkg/testsuite/mock/blockissuer.go index 835dd9339..24b504a46 100644 --- a/pkg/testsuite/mock/blockissuer.go +++ b/pkg/testsuite/mock/blockissuer.go @@ -46,10 +46,7 @@ type BlockIssuer struct { keyManager *wallet.KeyManager Client Client - // latestBlockIssuanceResp is the cached response from the latest query to the block issuance endpoint. - latestBlockIssuanceResp *api.IssuanceBlockHeaderResponse - blockIssuanceResponseUsed bool - mutex syncutils.RWMutex + mutex syncutils.RWMutex AccountData *AccountData } @@ -65,13 +62,12 @@ func NewBlockIssuer(t *testing.T, name string, keyManager *wallet.KeyManager, cl accountData.ID.RegisterAlias(name) return options.Apply(&BlockIssuer{ - Testing: t, - Name: name, - Validator: validator, - keyManager: keyManager, - Client: client, - blockIssuanceResponseUsed: true, - AccountData: accountData, + Testing: t, + Name: name, + Validator: validator, + keyManager: keyManager, + Client: client, + AccountData: accountData, }, opts) } @@ -216,13 +212,14 @@ func (i *BlockIssuer) CreateAndSubmitValidationBlock(ctx context.Context, alias func (i *BlockIssuer) CreateBasicBlock(ctx context.Context, alias string, opts ...options.Option[BasicBlockParams]) (*blocks.Block, error) { blockParams := options.Apply(&BasicBlockParams{BlockHeader: &BlockHeaderParams{}}, opts) - blockIssuanceInfo := i.latestBlockIssuanceResponse(ctx) + blockIssuanceInfo, err := i.Client.BlockIssuance(ctx) + require.NoError(i.Testing, err) if blockParams.BlockHeader.References == nil { blockParams.BlockHeader.References = referencesFromBlockIssuanceResponse(blockIssuanceInfo) } - err := i.setDefaultBlockParams(ctx, blockParams.BlockHeader) + err = i.setDefaultBlockParams(ctx, blockParams.BlockHeader) require.NoError(i.Testing, err) api := i.Client.APIForTime(*blockParams.BlockHeader.IssuingTime) @@ -270,9 +267,6 @@ func (i *BlockIssuer) CreateBasicBlock(ctx context.Context, alias string, opts . modelBlock.ID().RegisterAlias(alias) - // mark the response as used so that the next time we query the node for the latest block issuance. - i.blockIssuanceResponseUsed = true - return blocks.NewBlock(modelBlock), err } @@ -405,31 +399,8 @@ func (i *BlockIssuer) retrieveAPI(blockParams *BlockHeaderParams) iotago.API { } func (i *BlockIssuer) GetNewBlockIssuanceResponse() *api.IssuanceBlockHeaderResponse { - i.mutex.Lock() - defer i.mutex.Unlock() - - i.blockIssuanceResponseUsed = false resp, err := i.Client.BlockIssuance(context.Background()) require.NoError(i.Testing, err) - i.latestBlockIssuanceResp = resp - - return i.latestBlockIssuanceResp -} - -func (i *BlockIssuer) latestBlockIssuanceResponse(context context.Context) *api.IssuanceBlockHeaderResponse { - i.mutex.Lock() - defer i.mutex.Unlock() - - // If the response was already used to issue a block, we need to get a new response from the node. - // Otherwise we can reuse the cached response. For transactions with commitment inputs, we want to get a fresh response - // for the transaction creation, and then reuse that response for the block issuance, so we only mark the response as used - // if it was used for block issuance. - if i.blockIssuanceResponseUsed { - i.blockIssuanceResponseUsed = false - resp, err := i.Client.BlockIssuance(context) - require.NoError(i.Testing, err) - i.latestBlockIssuanceResp = resp - } - return i.latestBlockIssuanceResp + return resp } diff --git a/pkg/testsuite/mock/wallet.go b/pkg/testsuite/mock/wallet.go index ce21e6541..b062b0543 100644 --- a/pkg/testsuite/mock/wallet.go +++ b/pkg/testsuite/mock/wallet.go @@ -62,6 +62,31 @@ func NewAccountData(accountID iotago.AccountID, optAddressIndex ...uint32) *Acco } } +type AccountWithWallet struct { + account *AccountData + wallet *Wallet +} + +func NewAccountWithWallet(account *AccountData, wallet *Wallet) *AccountWithWallet { + return &AccountWithWallet{ + account: account, + wallet: wallet, + } +} + +func (a *AccountWithWallet) Account() *AccountData { + return a.account +} + +func (a *AccountWithWallet) UpdateAccount(updatedAccount *AccountData) { + a.account = updatedAccount + a.wallet.SetBlockIssuer(updatedAccount) +} + +func (a *AccountWithWallet) Wallet() *Wallet { + return a.wallet +} + // WalletClock is an interface that provides the current slot. type WalletClock interface { SetCurrentSlot(slot iotago.SlotIndex) diff --git a/tools/docker-network/.env b/tools/docker-network/.env index 283f70767..5ac834755 100644 --- a/tools/docker-network/.env +++ b/tools/docker-network/.env @@ -8,12 +8,7 @@ COMMON_CONFIG=" --profiling.bindAddress=0.0.0.0:6061 --restAPI.publicRoutes=/health,/api/routes,/api/core/v3/info,/api/core/v3/network*,/api/core/v3/blocks*,/api/core/v3/transactions*,/api/core/v3/commitments*,/api/core/v3/outputs*,/api/core/v3/accounts*,/api/core/v3/validators*,/api/core/v3/rewards*,/api/core/v3/committee*,/api/debug/v2/*,/api/indexer/v2/*,/api/mqtt/v2,/api/blockissuer/v1/*,/api/management/v1/* --debugAPI.enabled=false ---p2p.autopeering.maxPeers=5 ---p2p.autopeering.allowLocalIPs=true -" - -AUTOPEERING_CONFIG=" ---p2p.autopeering.bootstrapPeers=/dns/node-1-validator/tcp/15600/p2p/12D3KooWRVt4Engu27jHnF2RjfX48EqiAqJbgLfFdHNt3Vn6BtJK +--p2p.autopeering.maxPeers=0 " # admin/admin diff --git a/tools/docker-network/docker-compose.yml b/tools/docker-network/docker-compose.yml index 8aa1df43c..b07611e53 100644 --- a/tools/docker-network/docker-compose.yml +++ b/tools/docker-network/docker-compose.yml @@ -30,6 +30,8 @@ services: --db.path==${DB_PATH_V1:-/app/data/database} --protocol.snapshot.path=${SNAPSHOT_PATH_V1:-/app/data/snapshots/snapshot.bin} --p2p.identityPrivateKey=08735375679f3d8031353e94282ed1d65119e5c288fe56d6639d9184a3f978fee8febfedff11cc376daea0f59c395ae2e9a870a25ac4e36093000fbf4d0e8f18 + --p2p.peers=/dns/node-2-validator/tcp/15600/p2p/12D3KooWCropDmzpoLy8UrFg59M1oUx7k1UmQmuHrmN5RDgp6pVL,/dns/node-3-validator/tcp/15600/p2p/12D3KooWPr1mW33PGhv3oRbPQQDesN4THBB3WgnYsNzCfnQLr5QR,/dns/node-4-validator/tcp/15600/p2p/12D3KooWRxDSg2H7ThEJyn4HNkL1ixsqm6PgjCtkTG1hm7NpYG4Q,/dns/node-5/tcp/15600/p2p/12D3KooWG9S868XrL1wzeUbMXDtBTAaxwuqktAzUodFFES8uoTGz,/dns/node-6/tcp/15600/p2p/12D3KooWFatFVM1wyGnMUwJrMW7YwD6j32iq5SCA9S7e386biZ73 + --p2p.peerAliases=node-2-validator,node-3-validator,node-4-validator,node-5,node-6 --node.alias=node-1-validator --inx.enabled=true --inx.bindAddress=0.0.0.0:9029 @@ -62,10 +64,11 @@ services: - ./config.json:/app/config.json:ro command: > ${COMMON_CONFIG} - ${AUTOPEERING_CONFIG} --db.path==${DB_PATH_V2:-/app/data/database} --protocol.snapshot.path=${SNAPSHOT_PATH_V2:-/app/data/snapshots/snapshot.bin} --p2p.identityPrivateKey=ba771419c52132a0dfb2521ed18667813f398da159010a55a0a482af939affb92d3338789ad4a07a7631b91791deb11f82ed5dc612822f24275e9f7a313b691f + --p2p.peers=/dns/node-1-validator/tcp/15600/p2p/12D3KooWRVt4Engu27jHnF2RjfX48EqiAqJbgLfFdHNt3Vn6BtJK,/dns/node-3-validator/tcp/15600/p2p/12D3KooWPr1mW33PGhv3oRbPQQDesN4THBB3WgnYsNzCfnQLr5QR,/dns/node-4-validator/tcp/15600/p2p/12D3KooWRxDSg2H7ThEJyn4HNkL1ixsqm6PgjCtkTG1hm7NpYG4Q,/dns/node-5/tcp/15600/p2p/12D3KooWG9S868XrL1wzeUbMXDtBTAaxwuqktAzUodFFES8uoTGz,/dns/node-6/tcp/15600/p2p/12D3KooWFatFVM1wyGnMUwJrMW7YwD6j32iq5SCA9S7e386biZ73 + --p2p.peerAliases=node-1-validator,node-3-validator,node-4-validator,node-5,node-6 --node.alias=node-2-validator --inx.enabled=true --inx.bindAddress=0.0.0.0:9029 @@ -96,10 +99,11 @@ services: - ./config.json:/app/config.json:ro command: > ${COMMON_CONFIG} - ${AUTOPEERING_CONFIG} --db.path==${DB_PATH_V3:-/app/data/database} --protocol.snapshot.path=${SNAPSHOT_PATH_V3:-/app/data/snapshots/snapshot.bin} --p2p.identityPrivateKey=a6261ac049755675ff1437654ca9f83b305055f01ff08c4f039209ef5a4a7d96d06fb61df77a8815209a8f4d204226dee593e50d0ec897ec440a2c1fbde77656 + --p2p.peers=/dns/node-1-validator/tcp/15600/p2p/12D3KooWRVt4Engu27jHnF2RjfX48EqiAqJbgLfFdHNt3Vn6BtJK,/dns/node-2-validator/tcp/15600/p2p/12D3KooWCropDmzpoLy8UrFg59M1oUx7k1UmQmuHrmN5RDgp6pVL,/dns/node-4-validator/tcp/15600/p2p/12D3KooWRxDSg2H7ThEJyn4HNkL1ixsqm6PgjCtkTG1hm7NpYG4Q,/dns/node-5/tcp/15600/p2p/12D3KooWG9S868XrL1wzeUbMXDtBTAaxwuqktAzUodFFES8uoTGz,/dns/node-6/tcp/15600/p2p/12D3KooWFatFVM1wyGnMUwJrMW7YwD6j32iq5SCA9S7e386biZ73 + --p2p.peerAliases=node-1-validator,node-2-validator,node-4-validator,node-5,node-6 --node.alias=node-3-validator --inx.enabled=true --inx.bindAddress=0.0.0.0:9029 @@ -130,10 +134,11 @@ services: - ./config.json:/app/config.json:ro command: > ${COMMON_CONFIG} - ${AUTOPEERING_CONFIG} --db.path==${DB_PATH_V4:-/app/data/database} --protocol.snapshot.path=${SNAPSHOT_PATH_V4:-/app/data/snapshots/snapshot.bin} --p2p.identityPrivateKey=f205f6c4525069f71f9c7e987d72421a16c7900056b494a2b85fdf7942cf906aefbdc580f5d1ce4ae3f86ccfe109c6cd76df9b0e710a437b2aa964358c7b9449 + --p2p.peers=/dns/node-1-validator/tcp/15600/p2p/12D3KooWRVt4Engu27jHnF2RjfX48EqiAqJbgLfFdHNt3Vn6BtJK,/dns/node-2-validator/tcp/15600/p2p/12D3KooWCropDmzpoLy8UrFg59M1oUx7k1UmQmuHrmN5RDgp6pVL,/dns/node-3-validator/tcp/15600/p2p/12D3KooWPr1mW33PGhv3oRbPQQDesN4THBB3WgnYsNzCfnQLr5QR,/dns/node-5/tcp/15600/p2p/12D3KooWG9S868XrL1wzeUbMXDtBTAaxwuqktAzUodFFES8uoTGz,/dns/node-6/tcp/15600/p2p/12D3KooWFatFVM1wyGnMUwJrMW7YwD6j32iq5SCA9S7e386biZ73 + --p2p.peerAliases=node-1-validator,node-2-validator,node-3-validator,node-5,node-6 --node.alias=node-4-validator --inx.enabled=true --inx.bindAddress=0.0.0.0:9029 @@ -162,10 +167,11 @@ services: - ./config.json:/app/config.json:ro command: > ${COMMON_CONFIG} - ${AUTOPEERING_CONFIG} --db.path==${DB_PATH_node5:-/app/data/database} --protocol.snapshot.path=${SNAPSHOT_PATH_node5:-/app/data/snapshots/snapshot.bin} --p2p.identityPrivateKey=03feb3bcd25e57f75697bb329e6e0100680431e4c45c85bc013da2aea9e9d0345e08a0c37407dc62369deebc64cb0fb3ea26127d19d141ee7fb8eaa6b92019d7 + --p2p.peers=/dns/node-1-validator/tcp/15600/p2p/12D3KooWRVt4Engu27jHnF2RjfX48EqiAqJbgLfFdHNt3Vn6BtJK,/dns/node-2-validator/tcp/15600/p2p/12D3KooWCropDmzpoLy8UrFg59M1oUx7k1UmQmuHrmN5RDgp6pVL,/dns/node-3-validator/tcp/15600/p2p/12D3KooWPr1mW33PGhv3oRbPQQDesN4THBB3WgnYsNzCfnQLr5QR,/dns/node-4-validator/tcp/15600/p2p/12D3KooWRxDSg2H7ThEJyn4HNkL1ixsqm6PgjCtkTG1hm7NpYG4Q,/dns/node-6/tcp/15600/p2p/12D3KooWFatFVM1wyGnMUwJrMW7YwD6j32iq5SCA9S7e386biZ73 + --p2p.peerAliases=node-1-validator,node-2-validator,node-3-validator,node-4-validator,node-6 --node.alias=node-5 --inx.enabled=true --inx.bindAddress=0.0.0.0:9029 @@ -196,10 +202,11 @@ services: - ./config.json:/app/config.json:ro command: > ${COMMON_CONFIG} - ${AUTOPEERING_CONFIG} --db.path==${DB_PATH_node6:-/app/data/database} --protocol.snapshot.path=${SNAPSHOT_PATH_node6:-/app/data/snapshots/snapshot.bin} --p2p.identityPrivateKey=7d1491df3ef334dee988d6cdfc4b430b996d520bd63375a01d6754f8cee979b855b200fbea8c936ea1937a27e6ad72a7c9a21c1b17c2bd3c11f1f6994d813446 + --p2p.peers=/dns/node-1-validator/tcp/15600/p2p/12D3KooWRVt4Engu27jHnF2RjfX48EqiAqJbgLfFdHNt3Vn6BtJK,/dns/node-2-validator/tcp/15600/p2p/12D3KooWCropDmzpoLy8UrFg59M1oUx7k1UmQmuHrmN5RDgp6pVL,/dns/node-3-validator/tcp/15600/p2p/12D3KooWPr1mW33PGhv3oRbPQQDesN4THBB3WgnYsNzCfnQLr5QR,/dns/node-4-validator/tcp/15600/p2p/12D3KooWRxDSg2H7ThEJyn4HNkL1ixsqm6PgjCtkTG1hm7NpYG4Q,/dns/node-5/tcp/15600/p2p/12D3KooWG9S868XrL1wzeUbMXDtBTAaxwuqktAzUodFFES8uoTGz + --p2p.peerAliases=node-1-validator,node-2-validator,node-3-validator,node-4-validator,node-5 --node.alias=node-6 --inx.enabled=true --inx.bindAddress=0.0.0.0:9029 @@ -356,6 +363,7 @@ services: --inx.address=node-1-validator:9029 --validator.ignoreBootstrapped=true --validator.accountAddress=rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6 + --validator.candidacyRetryInterval=${CANDIDACY_RETRY_INTERVAL:-10s} --validator.issueCandidacyPayload=${ISSUE_CANDIDACY_PAYLOAD_V1:-true} profiles: - minimal @@ -376,6 +384,7 @@ services: --logger.level=debug --inx.address=node-2-validator:9029 --validator.accountAddress=rms1pqm4xk8e9ny5w5rxjkvtp249tfhlwvcshyr3pc0665jvp7g3hc875k538hl + --validator.candidacyRetryInterval=${CANDIDACY_RETRY_INTERVAL:-10s} --validator.issueCandidacyPayload=${ISSUE_CANDIDACY_PAYLOAD_V2:-true} profiles: - full @@ -395,6 +404,7 @@ services: --logger.level=debug --inx.address=node-3-validator:9029 --validator.accountAddress=rms1pp4wuuz0y42caz48vv876qfpmffswsvg40zz8v79sy8cp0jfxm4kunflcgt + --validator.candidacyRetryInterval=${CANDIDACY_RETRY_INTERVAL:-10s} --validator.issueCandidacyPayload=${ISSUE_CANDIDACY_PAYLOAD_V3:-true} profiles: - full @@ -414,6 +424,7 @@ services: --logger.level=debug --inx.address=node-4-validator:9029 --validator.accountAddress=rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw + --validator.candidacyRetryInterval=${CANDIDACY_RETRY_INTERVAL:-10s} --validator.issueCandidacyPayload=${ISSUE_CANDIDACY_PAYLOAD_V4:-true} profiles: - full diff --git a/tools/docker-network/tests/accounttransition_test.go b/tools/docker-network/tests/accounttransition_test.go index e7b638c8c..92fc8f991 100644 --- a/tools/docker-network/tests/accounttransition_test.go +++ b/tools/docker-network/tests/accounttransition_test.go @@ -3,29 +3,24 @@ package tests import ( + "context" "fmt" "testing" - "time" "github.com/stretchr/testify/require" "github.com/iotaledger/iota-core/tools/docker-network/tests/dockertestframework" - iotago "github.com/iotaledger/iota.go/v4" ) // Test_AccountTransitions follows the account state transition flow described in: -// 1. Create account1. -// 2. Create account2. -// 3. account1 requests faucet funds then allots 1000 mana to account2. -// 4. account2 requests faucet funds then creates native tokens. +// 1. Create account-1. +// 2. Create account-2. +// 3. account-1 requests faucet funds then allots 1000 mana to account-2. +// 4. account-2 requests faucet funds then creates native tokens. func Test_AccountTransitions(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -39,19 +34,21 @@ func Test_AccountTransitions(t *testing.T) { d.WaitUntilNetworkReady() - // create account1 - fmt.Println("Creating account1") - wallet1, _ := d.CreateAccountFromFaucet() + ctx, cancel := context.WithCancel(context.Background()) - // create account2 - fmt.Println("Creating account2") - wallet2, _ := d.CreateAccountFromFaucet() + // cancel the context when the test is done + t.Cleanup(cancel) - // allot 1000 mana from account1 to account2 - fmt.Println("Allotting mana from account1 to account2") - d.RequestFaucetFundsAndAllotManaTo(wallet1, wallet2.BlockIssuer.AccountData, 1000) + // create account-1 + accounts := d.CreateAccountsFromFaucet(ctx, 2, "account-1", "account-2") + account1 := accounts[0] + account2 := accounts[1] + + // allot 1000 mana from account-1 to account-2 + fmt.Println("Allotting mana from account-1 to account-2") + d.RequestFaucetFundsAndAllotManaTo(account1.Wallet(), account2.Account(), 1000) // create native token fmt.Println("Creating native token") - d.CreateNativeToken(wallet1, 5_000_000, 10_000_000_000) + d.CreateNativeToken(account1.Wallet(), 5_000_000, 10_000_000_000) } diff --git a/tools/docker-network/tests/api_core_test.go b/tools/docker-network/tests/api_core_test.go index 5c064ce61..2f6b65107 100644 --- a/tools/docker-network/tests/api_core_test.go +++ b/tools/docker-network/tests/api_core_test.go @@ -30,79 +30,68 @@ func (a coreAPIAssets) setupAssetsForSlot(slot iotago.SlotIndex) { } } -func (a coreAPIAssets) assertCommitments(t *testing.T) { - for _, asset := range a { - asset.assertCommitments(t) +func (a coreAPIAssets) forEach(consumer func(slot iotago.SlotIndex, asset *coreAPISlotAssets)) { + for slot, asset := range a { + consumer(slot, asset) } } -func (a coreAPIAssets) assertBICs(t *testing.T) { - for _, asset := range a { - asset.assertBICs(t) +func (a coreAPIAssets) forEachSlot(consumer func(slot iotago.SlotIndex)) { + for slot := range a { + consumer(slot) } } -func (a coreAPIAssets) forEachBlock(t *testing.T, f func(*testing.T, *iotago.Block)) { +func (a coreAPIAssets) forEachBlock(consumer func(block *iotago.Block)) { for _, asset := range a { for _, block := range asset.dataBlocks { - f(t, block) + consumer(block) } for _, block := range asset.valueBlocks { - f(t, block) + consumer(block) } } } -func (a coreAPIAssets) forEachTransaction(t *testing.T, f func(*testing.T, *iotago.SignedTransaction, iotago.BlockID)) { +func (a coreAPIAssets) forEachTransaction(consumer func(signedTx *iotago.SignedTransaction, blockID iotago.BlockID)) { for _, asset := range a { - for i, tx := range asset.transactions { + for i, signedTx := range asset.transactions { blockID := asset.valueBlocks[i].MustID() - f(t, tx, blockID) + consumer(signedTx, blockID) } } } -func (a coreAPIAssets) forEachReattachment(t *testing.T, f func(*testing.T, iotago.BlockID)) { +func (a coreAPIAssets) forEachReattachment(consumer func(blockID iotago.BlockID)) { for _, asset := range a { - for _, reattachment := range asset.reattachments { - f(t, reattachment) + for _, blockID := range asset.reattachments { + consumer(blockID) } } } -func (a coreAPIAssets) forEachOutput(t *testing.T, f func(*testing.T, iotago.OutputID, iotago.Output)) { +func (a coreAPIAssets) forEachOutput(consumer func(outputID iotago.OutputID, output iotago.Output)) { for _, asset := range a { - for outID, out := range asset.basicOutputs { - f(t, outID, out) + for outputID, output := range asset.basicOutputs { + consumer(outputID, output) } - for outID, out := range asset.faucetOutputs { - f(t, outID, out) + for outputID, output := range asset.faucetOutputs { + consumer(outputID, output) } - for outID, out := range asset.delegationOutputs { - f(t, outID, out) + for outputID, output := range asset.delegationOutputs { + consumer(outputID, output) } } } -func (a coreAPIAssets) forEachSlot(t *testing.T, f func(*testing.T, iotago.SlotIndex, map[string]iotago.CommitmentID)) { - for slot, slotAssets := range a { - f(t, slot, slotAssets.commitmentPerNode) - } -} - -func (a coreAPIAssets) forEachCommitment(t *testing.T, f func(*testing.T, map[string]iotago.CommitmentID)) { - for _, asset := range a { - f(t, asset.commitmentPerNode) - } -} - -func (a coreAPIAssets) forEachAccountAddress(t *testing.T, f func(t *testing.T, accountAddress *iotago.AccountAddress, commitmentPerNode map[string]iotago.CommitmentID, bicPerNode map[string]iotago.BlockIssuanceCredits)) { +func (a coreAPIAssets) forEachAccountAddress(consumer func(accountAddress *iotago.AccountAddress)) { for _, asset := range a { if asset.accountAddress == nil { // no account created in this slot continue } - f(t, asset.accountAddress, asset.commitmentPerNode, asset.bicPerNode) + + consumer(asset.accountAddress) } } @@ -161,28 +150,8 @@ type coreAPISlotAssets struct { faucetOutputs map[iotago.OutputID]iotago.Output delegationOutputs map[iotago.OutputID]iotago.Output - commitmentPerNode map[string]iotago.CommitmentID - bicPerNode map[string]iotago.BlockIssuanceCredits -} - -func (a *coreAPISlotAssets) assertCommitments(t *testing.T) { - prevCommitment := a.commitmentPerNode["V1"] - for _, commitmentID := range a.commitmentPerNode { - if prevCommitment == iotago.EmptyCommitmentID { - require.Fail(t, "commitment is empty") - } - - require.Equal(t, commitmentID, prevCommitment) - prevCommitment = commitmentID - } -} - -func (a *coreAPISlotAssets) assertBICs(t *testing.T) { - prevBIC := a.bicPerNode["V1"] - for _, bic := range a.bicPerNode { - require.Equal(t, bic, prevBIC) - prevBIC = bic - } + // set later in the test by the default wallet + commitmentID iotago.CommitmentID } func newAssetsPerSlot() *coreAPISlotAssets { @@ -194,8 +163,7 @@ func newAssetsPerSlot() *coreAPISlotAssets { basicOutputs: make(map[iotago.OutputID]iotago.Output), faucetOutputs: make(map[iotago.OutputID]iotago.Output), delegationOutputs: make(map[iotago.OutputID]iotago.Output), - commitmentPerNode: make(map[string]iotago.CommitmentID), - bicPerNode: make(map[string]iotago.BlockIssuanceCredits), + commitmentID: iotago.EmptyCommitmentID, } } @@ -205,9 +173,13 @@ func prepareAssets(d *dockertestframework.DockerTestFramework, totalAssetsNum in latestSlot := iotago.SlotIndex(0) + // create accounts + accounts := d.CreateAccountsFromFaucet(ctx, totalAssetsNum) + for i := 0; i < totalAssetsNum; i++ { // account - wallet, account := d.CreateAccountFromFaucet() + wallet, account := accounts[i].Wallet(), accounts[i].Account() + assets.setupAssetsForSlot(account.OutputID.Slot()) assets[account.OutputID.Slot()].accountAddress = account.Address @@ -243,10 +215,20 @@ func prepareAssets(d *dockertestframework.DockerTestFramework, totalAssetsNum in assets[delegationOutputData.ID.CreationSlot()].delegationOutputs[delegationOutputData.ID] = delegationOutputData.Output.(*iotago.DelegationOutput) latestSlot = lo.Max[iotago.SlotIndex](latestSlot, blockSlot, valueBlockSlot, delegationOutputData.ID.CreationSlot(), secondAttachment.ID().Slot()) - - fmt.Printf("Assets for slot %d\n: dataBlock: %s block: %s\ntx: %s\nbasic output: %s, faucet output: %s\n delegation output: %s\n", - valueBlockSlot, block.MustID().String(), valueBlock.MustID().String(), signedTx.MustID().String(), - basicOutputID.String(), faucetOutput.ID.String(), delegationOutputData.ID.String()) + fmt.Printf(`Assets for slot %d: + dataBlock ID: %s + txblock ID: %s + signedTx ID: %s + basic output ID: %s + faucet output ID: %s + delegation output ID: %s`, + valueBlockSlot, + block.MustID().String(), + valueBlock.MustID().String(), + signedTx.MustID().String(), + basicOutputID.String(), + faucetOutput.ID.String(), + delegationOutputData.ID.String()) } return assets, latestSlot @@ -260,12 +242,8 @@ func prepareAssets(d *dockertestframework.DockerTestFramework, totalAssetsNum in // 5. Wait until next epoch then check again if the results remain. func Test_ValidatorsAPI(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -274,69 +252,120 @@ func Test_ValidatorsAPI(t *testing.T) { d.AddValidatorNode("V4", "docker-network-inx-validator-4-1", "http://localhost:8040", "rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw") d.AddNode("node5", "docker-network-node-5-1", "http://localhost:8080") - runErr := d.Run() - require.NoError(t, runErr) + err := d.Run() + require.NoError(t, err) d.WaitUntilNetworkReady() - hrp := d.DefaultWallet().Client.CommittedAPI().ProtocolParameters().Bech32HRP() ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(func() { - cancel() - }) - // Create registered validators - var wg sync.WaitGroup - clt := d.DefaultWallet().Client - status := d.NodeStatus("V1") - currentEpoch := clt.CommittedAPI().TimeProvider().EpochFromSlot(status.LatestAcceptedBlockSlot) - expectedValidators := d.AccountsFromNodes(d.Nodes()...) + // cancel the context when the test is done + t.Cleanup(cancel) + + defaultClient := d.DefaultWallet().Client + + hrp := defaultClient.CommittedAPI().ProtocolParameters().Bech32HRP() + + // get the initial validators (those are the nodes added via AddValidatorNode) + // they should be returned by the validators API + initialValidators := d.AccountsFromNodes(d.Nodes()...) - for i := 0; i < 50; i++ { + // copy the initial validators, so we can append the new validators + expectedValidators := make([]string, len(initialValidators)) + copy(expectedValidators, initialValidators) + + // create 50 new validators + validatorCount := 50 + implicitAccounts := d.CreateImplicitAccounts(ctx, validatorCount) + + blockIssuance, err := defaultClient.BlockIssuance(ctx) + require.NoError(t, err) + + latestCommitmentSlot := blockIssuance.LatestCommitment.Slot + // we can't set the staking start epoch too much in the future, because it is bound to the latest commitment slot plus MaxCommittableAge + stakingStartEpoch := d.DefaultWallet().StakingStartEpochFromSlot(latestCommitmentSlot) + + // create accounts with staking feature for the validators + var wg sync.WaitGroup + validators := make([]*mock.AccountWithWallet, validatorCount) + for i := range validatorCount { wg.Add(1) - go func() { - defer wg.Done() - // create implicit accounts for every validator - wallet, implicitAccountOutputData := d.CreateImplicitAccount(ctx) + go func(validatorNr int) { + defer wg.Done() // create account with staking feature for every validator - accountData := d.CreateAccountFromImplicitAccount(wallet, - implicitAccountOutputData, - wallet.GetNewBlockIssuanceResponse(), - dockertestframework.WithStakingFeature(100, 1, currentEpoch), + validators[validatorNr] = d.CreateAccountFromImplicitAccount(implicitAccounts[validatorNr], + blockIssuance, + dockertestframework.WithStakingFeature(100, 1, stakingStartEpoch), ) + expectedValidators = append(expectedValidators, validators[validatorNr].Account().Address.Bech32(hrp)) + }(i) + } + wg.Wait() + + annoucementEpoch := stakingStartEpoch + + // check if we missed to announce the candidacy during the staking start epoch because it takes time to create the account. + latestAcceptedBlockSlot := d.NodeStatus("V1").LatestAcceptedBlockSlot + currentEpoch := defaultClient.CommittedAPI().TimeProvider().EpochFromSlot(latestAcceptedBlockSlot) + if annoucementEpoch < currentEpoch { + annoucementEpoch = currentEpoch + } + + maxRegistrationSlot := dockertestframework.GetMaxRegistrationSlot(defaultClient.CommittedAPI(), annoucementEpoch) + + // the candidacy announcement needs to be done before the nearing threshold of the epoch + // and we shouldn't start trying in the last possible slot, otherwise the tests might be wonky + if latestAcceptedBlockSlot >= maxRegistrationSlot { + // we are already too late, we can't issue candidacy payloads anymore, so lets start with the next epoch + annoucementEpoch++ + } - expectedValidators = append(expectedValidators, accountData.Address.Bech32(hrp)) + // the candidacy announcement needs to be done before the nearing threshold + maxRegistrationSlot = dockertestframework.GetMaxRegistrationSlot(defaultClient.CommittedAPI(), annoucementEpoch) - // issue candidacy payload in the next epoch (currentEpoch + 1), in order to issue it before epochNearingThreshold - d.AwaitCommitment(clt.CommittedAPI().TimeProvider().EpochEnd(currentEpoch)) - blkID := d.IssueCandidacyPayloadFromAccount(ctx, wallet) - fmt.Println("Candidacy payload:", blkID.ToHex(), blkID.Slot()) - d.AwaitCommitment(blkID.Slot()) - }() + // now wait until the start of the announcement epoch + d.AwaitLatestAcceptedBlockSlot(defaultClient.CommittedAPI().TimeProvider().EpochStart(annoucementEpoch), true) + + // issue candidacy payload for each account + wg = sync.WaitGroup{} + for i := range validatorCount { + wg.Add(1) + + go func(validatorNr int) { + defer wg.Done() + + candidacyBlockID := d.IssueCandidacyPayloadFromAccount(ctx, validators[validatorNr].Wallet()) + fmt.Println("Issuing candidacy payload for account", validators[validatorNr].Account().ID, "in epoch", annoucementEpoch, "...", "blockID:", candidacyBlockID.ToHex()) + require.LessOrEqualf(d.Testing, candidacyBlockID.Slot(), maxRegistrationSlot, "candidacy announcement block slot is greater than max registration slot for the epoch (%d>%d)", candidacyBlockID.Slot(), maxRegistrationSlot) + }(i) } wg.Wait() - // get all validators of currentEpoch+1 with pageSize 10 - actualValidators := getAllValidatorsOnEpoch(t, clt, 0, 10) - require.ElementsMatch(t, expectedValidators, actualValidators) + // wait until the end of the announcement epoch + d.AwaitEpochFinalized() - // wait until currentEpoch+3 and check the results again - targetSlot := clt.CommittedAPI().TimeProvider().EpochEnd(currentEpoch + 2) - d.AwaitCommitment(targetSlot) - actualValidators = getAllValidatorsOnEpoch(t, clt, currentEpoch+1, 10) + // check if all validators are returned from the validators API with pageSize 10 + actualValidators := getAllValidatorsOnEpoch(t, defaultClient, annoucementEpoch, 10) require.ElementsMatch(t, expectedValidators, actualValidators) + + // wait until the end of the next epoch, the newly added validators should be offline again + // because they haven't issued candidacy annoucement for the next epoch + d.AwaitEpochFinalized() + + actualValidators = getAllValidatorsOnEpoch(t, defaultClient, annoucementEpoch+1, 10) + require.ElementsMatch(t, initialValidators, actualValidators) + + // the initital validators should be returned for epoch 0 + actualValidators = getAllValidatorsOnEpoch(t, defaultClient, 0, 10) + require.ElementsMatch(t, initialValidators, actualValidators) } -func Test_CoreAPI(t *testing.T) { +func Test_CoreAPI_ValidRequests(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -345,343 +374,420 @@ func Test_CoreAPI(t *testing.T) { d.AddValidatorNode("V4", "docker-network-inx-validator-4-1", "http://localhost:8040", "rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw") d.AddNode("node5", "docker-network-node-5-1", "http://localhost:8080") - runErr := d.Run() - require.NoError(t, runErr) + err := d.Run() + require.NoError(t, err) d.WaitUntilNetworkReady() assetsPerSlot, lastSlot := prepareAssets(d, 5) - fmt.Println("Await finalisation of slot", lastSlot) - d.AwaitFinalization(lastSlot) + d.AwaitFinalizedSlot(lastSlot, true) + + defaultClient := d.DefaultWallet().Client + + forEachNodeClient := func(consumer func(nodeName string, client mock.Client)) { + for _, node := range d.Nodes() { + client := d.Client(node.Name) + consumer(node.Name, client) + } + } tests := []struct { name string - testFunc func(t *testing.T, node *dockertestframework.Node, client mock.Client) + testFunc func(t *testing.T) }{ { name: "Test_Info", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - resp, err := client.Info(context.Background()) - require.NoError(t, err) - require.NotNil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + resp, err := client.Info(context.Background()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_BlockByBlockID", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachBlock(t, func(t *testing.T, block *iotago.Block) { - respBlock, err := client.BlockByBlockID(context.Background(), block.MustID()) - require.NoError(t, err) - require.NotNil(t, respBlock) - require.Equal(t, block.MustID(), respBlock.MustID(), "BlockID of retrieved block does not match: %s != %s", block.MustID(), respBlock.MustID()) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachBlock(func(block *iotago.Block) { + respBlock, err := client.BlockByBlockID(context.Background(), block.MustID()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, respBlock, "node %s", nodeName) + require.Equalf(t, block.MustID(), respBlock.MustID(), "node %s: BlockID of retrieved block does not match: %s != %s", nodeName, block.MustID(), respBlock.MustID()) + }) }) }, }, { name: "Test_BlockMetadataByBlockID", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachBlock(t, func(t *testing.T, block *iotago.Block) { - resp, err := client.BlockMetadataByBlockID(context.Background(), block.MustID()) - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, block.MustID(), resp.BlockID, "BlockID of retrieved block does not match: %s != %s", block.MustID(), resp.BlockID) - require.Equal(t, api.BlockStateFinalized, resp.BlockState) - }) - - assetsPerSlot.forEachReattachment(t, func(t *testing.T, blockID iotago.BlockID) { - resp, err := client.BlockMetadataByBlockID(context.Background(), blockID) - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, blockID, resp.BlockID, "BlockID of retrieved block does not match: %s != %s", blockID, resp.BlockID) - require.Equal(t, api.BlockStateFinalized, resp.BlockState) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachBlock(func(block *iotago.Block) { + resp, err := client.BlockMetadataByBlockID(context.Background(), block.MustID()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.Equalf(t, block.MustID(), resp.BlockID, "node %s: BlockID of retrieved block does not match: %s != %s", nodeName, block.MustID(), resp.BlockID) + require.Equalf(t, api.BlockStateFinalized, resp.BlockState, "node %s", nodeName) + }) + + assetsPerSlot.forEachReattachment(func(blockID iotago.BlockID) { + resp, err := client.BlockMetadataByBlockID(context.Background(), blockID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.Equalf(t, blockID, resp.BlockID, "node %s: BlockID of retrieved block does not match: %s != %s", nodeName, blockID, resp.BlockID) + require.Equalf(t, api.BlockStateFinalized, resp.BlockState, "node %s", nodeName) + }) }) }, }, { name: "Test_BlockWithMetadata", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachBlock(t, func(t *testing.T, block *iotago.Block) { - resp, err := client.BlockWithMetadataByBlockID(context.Background(), block.MustID()) - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, block.MustID(), resp.Block.MustID(), "BlockID of retrieved block does not match: %s != %s", block.MustID(), resp.Block.MustID()) - require.Equal(t, api.BlockStateFinalized, resp.Metadata.BlockState) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachBlock(func(block *iotago.Block) { + resp, err := client.BlockWithMetadataByBlockID(context.Background(), block.MustID()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.Equalf(t, block.MustID(), resp.Block.MustID(), "node %s: BlockID of retrieved block does not match: %s != %s", nodeName, block.MustID(), resp.Block.MustID()) + require.Equalf(t, api.BlockStateFinalized, resp.Metadata.BlockState, "node %s", nodeName) + }) }) }, }, { name: "Test_BlockIssuance", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - resp, err := client.BlockIssuance(context.Background()) - require.NoError(t, err) - require.NotNil(t, resp) - - require.GreaterOrEqual(t, len(resp.StrongParents), 1, "There should be at least 1 strong parent provided") + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + resp, err := client.BlockIssuance(context.Background()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.GreaterOrEqualf(t, len(resp.StrongParents), 1, "node %s: there should be at least 1 strong parent provided", nodeName) + }) }, }, { name: "Test_CommitmentBySlot", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachSlot(t, func(t *testing.T, slot iotago.SlotIndex, commitmentsPerNode map[string]iotago.CommitmentID) { - resp, err := client.CommitmentBySlot(context.Background(), slot) + testFunc: func(t *testing.T) { + // first we get the commitment IDs for each slot from the default wallet + // this step is necessary to get the commitment IDs for each slot for the following tests + assetsPerSlot.forEach(func(slot iotago.SlotIndex, assets *coreAPISlotAssets) { + resp, err := defaultClient.CommitmentBySlot(context.Background(), slot) require.NoError(t, err) require.NotNil(t, resp) - commitmentsPerNode[node.Name] = resp.MustID() + + commitmentID := resp.MustID() + if commitmentID == iotago.EmptyCommitmentID { + require.Failf(t, "commitment is empty", "slot %d", slot) + } + + assets.commitmentID = commitmentID + }) + + // now we check if the commitment IDs are the same for each node + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEach(func(slot iotago.SlotIndex, assets *coreAPISlotAssets) { + resp, err := client.CommitmentBySlot(context.Background(), slot) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + + // check if the commitment ID is the same as the one from the default wallet + require.Equalf(t, assets.commitmentID, resp.MustID(), "node %s: commitment in slot %d does not match the default wallet: %s != %s", nodeName, slot, assets.commitmentID, resp.MustID()) + }) }) }, }, { name: "Test_CommitmentByID", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachCommitment(t, func(t *testing.T, commitmentsPerNode map[string]iotago.CommitmentID) { - resp, err := client.CommitmentByID(context.Background(), commitmentsPerNode[node.Name]) - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, commitmentsPerNode[node.Name], resp.MustID(), "Commitment does not match commitment got for the same slot from the same node: %s != %s", commitmentsPerNode[node.Name], resp.MustID()) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEach(func(slot iotago.SlotIndex, assets *coreAPISlotAssets) { + resp, err := client.CommitmentByID(context.Background(), assets.commitmentID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.Equalf(t, assets.commitmentID, resp.MustID(), "node %s: commitment in slot %d does not match the default wallet: %s != %s", nodeName, slot, assets.commitmentID, resp.MustID()) + }) }) }, }, { name: "Test_CommitmentUTXOChangesByID", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachCommitment(t, func(t *testing.T, commitmentsPerNode map[string]iotago.CommitmentID) { - resp, err := client.CommitmentUTXOChangesByID(context.Background(), commitmentsPerNode[node.Name]) - require.NoError(t, err) - require.NotNil(t, resp) - assetsPerSlot.assertUTXOOutputIDsInSlot(t, commitmentsPerNode[node.Name].Slot(), resp.CreatedOutputs, resp.ConsumedOutputs) - require.Equal(t, commitmentsPerNode[node.Name], resp.CommitmentID, "CommitmentID of retrieved UTXO changes does not match: %s != %s", commitmentsPerNode[node.Name], resp.CommitmentID) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEach(func(slot iotago.SlotIndex, assets *coreAPISlotAssets) { + resp, err := client.CommitmentUTXOChangesByID(context.Background(), assets.commitmentID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + assetsPerSlot.assertUTXOOutputIDsInSlot(t, assets.commitmentID.Slot(), resp.CreatedOutputs, resp.ConsumedOutputs) + require.Equalf(t, assets.commitmentID, resp.CommitmentID, "node %s: CommitmentID of retrieved UTXO changes does not match: %s != %s", nodeName, assets.commitmentID, resp.CommitmentID) + }) }) }, }, { - "Test_CommitmentUTXOChangesFullByID", - func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachCommitment(t, func(t *testing.T, commitmentsPerNode map[string]iotago.CommitmentID) { - resp, err := client.CommitmentUTXOChangesFullByID(context.Background(), commitmentsPerNode[node.Name]) - require.NoError(t, err) - require.NotNil(t, resp) - assetsPerSlot.assertUTXOOutputsInSlot(t, commitmentsPerNode[node.Name].Slot(), resp.CreatedOutputs, resp.ConsumedOutputs) - require.Equal(t, commitmentsPerNode[node.Name], resp.CommitmentID, "CommitmentID of retrieved UTXO changes does not match: %s != %s", commitmentsPerNode[node.Name], resp.CommitmentID) + name: "Test_CommitmentUTXOChangesFullByID", + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEach(func(slot iotago.SlotIndex, assets *coreAPISlotAssets) { + resp, err := client.CommitmentUTXOChangesFullByID(context.Background(), assets.commitmentID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + assetsPerSlot.assertUTXOOutputsInSlot(t, assets.commitmentID.Slot(), resp.CreatedOutputs, resp.ConsumedOutputs) + require.Equalf(t, assets.commitmentID, resp.CommitmentID, "node %s: CommitmentID of retrieved UTXO changes does not match: %s != %s", nodeName, assets.commitmentID, resp.CommitmentID) + }) }) }, }, { name: "Test_CommitmentUTXOChangesBySlot", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachCommitment(t, func(t *testing.T, commitmentsPerNode map[string]iotago.CommitmentID) { - resp, err := client.CommitmentUTXOChangesBySlot(context.Background(), commitmentsPerNode[node.Name].Slot()) - require.NoError(t, err) - require.NotNil(t, resp) - assetsPerSlot.assertUTXOOutputIDsInSlot(t, commitmentsPerNode[node.Name].Slot(), resp.CreatedOutputs, resp.ConsumedOutputs) - require.Equal(t, commitmentsPerNode[node.Name], resp.CommitmentID, "CommitmentID of retrieved UTXO changes does not match: %s != %s", commitmentsPerNode[node.Name], resp.CommitmentID) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEach(func(slot iotago.SlotIndex, assets *coreAPISlotAssets) { + resp, err := client.CommitmentUTXOChangesBySlot(context.Background(), assets.commitmentID.Slot()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + assetsPerSlot.assertUTXOOutputIDsInSlot(t, assets.commitmentID.Slot(), resp.CreatedOutputs, resp.ConsumedOutputs) + require.Equalf(t, assets.commitmentID, resp.CommitmentID, "node %s: CommitmentID of retrieved UTXO changes does not match: %s != %s", nodeName, assets.commitmentID, resp.CommitmentID) + }) }) }, }, { name: "Test_CommitmentUTXOChangesFullBySlot", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachCommitment(t, func(t *testing.T, commitmentsPerNode map[string]iotago.CommitmentID) { - resp, err := client.CommitmentUTXOChangesFullBySlot(context.Background(), commitmentsPerNode[node.Name].Slot()) - require.NoError(t, err) - require.NotNil(t, resp) - assetsPerSlot.assertUTXOOutputsInSlot(t, commitmentsPerNode[node.Name].Slot(), resp.CreatedOutputs, resp.ConsumedOutputs) - require.Equal(t, commitmentsPerNode[node.Name], resp.CommitmentID, "CommitmentID of retrieved UTXO changes does not match: %s != %s", commitmentsPerNode[node.Name], resp.CommitmentID) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEach(func(slot iotago.SlotIndex, assets *coreAPISlotAssets) { + resp, err := client.CommitmentUTXOChangesFullBySlot(context.Background(), assets.commitmentID.Slot()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + assetsPerSlot.assertUTXOOutputsInSlot(t, assets.commitmentID.Slot(), resp.CreatedOutputs, resp.ConsumedOutputs) + require.Equalf(t, assets.commitmentID, resp.CommitmentID, "node %s: CommitmentID of retrieved UTXO changes does not match: %s != %s", nodeName, assets.commitmentID, resp.CommitmentID) + }) }) }, }, { name: "Test_OutputByID", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachOutput(t, func(t *testing.T, outputID iotago.OutputID, output iotago.Output) { - resp, err := client.OutputByID(context.Background(), outputID) - require.NoError(t, err) - require.NotNil(t, resp) - require.EqualValues(t, output, resp, "Output created is different than retrieved from the API: %s != %s", output, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachOutput(func(outputID iotago.OutputID, output iotago.Output) { + resp, err := client.OutputByID(context.Background(), outputID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.EqualValuesf(t, output, resp, "node %s: Output created is different than retrieved from the API: %s != %s", nodeName, output, resp) + }) }) }, }, { name: "Test_OutputMetadata", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachOutput(t, func(t *testing.T, outputID iotago.OutputID, output iotago.Output) { - resp, err := client.OutputMetadataByID(context.Background(), outputID) - require.NoError(t, err) - require.NotNil(t, resp) - require.EqualValues(t, outputID, resp.OutputID, "OutputID of retrieved output does not match: %s != %s", outputID, resp.OutputID) - require.EqualValues(t, outputID.TransactionID(), resp.Included.TransactionID, "TransactionID of retrieved output does not match: %s != %s", outputID.TransactionID(), resp.Included.TransactionID) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachOutput(func(outputID iotago.OutputID, output iotago.Output) { + resp, err := client.OutputMetadataByID(context.Background(), outputID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.EqualValuesf(t, outputID, resp.OutputID, "node %s: OutputID of retrieved output does not match: %s != %s", nodeName, outputID, resp.OutputID) + require.EqualValuesf(t, outputID.TransactionID(), resp.Included.TransactionID, "node %s: TransactionID of retrieved output does not match: %s != %s", nodeName, outputID.TransactionID(), resp.Included.TransactionID) + }) }) }, }, { name: "Test_OutputWithMetadata", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachOutput(t, func(t *testing.T, outputID iotago.OutputID, output iotago.Output) { - out, outMetadata, err := client.OutputWithMetadataByID(context.Background(), outputID) - require.NoError(t, err) - require.NotNil(t, outMetadata) - require.NotNil(t, out) - require.EqualValues(t, outputID, outMetadata.OutputID, "OutputID of retrieved output does not match: %s != %s", outputID, outMetadata.OutputID) - require.EqualValues(t, outputID.TransactionID(), outMetadata.Included.TransactionID, "TransactionID of retrieved output does not match: %s != %s", outputID.TransactionID(), outMetadata.Included.TransactionID) - require.EqualValues(t, output, out, "OutputID of retrieved output does not match: %s != %s", output, out) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachOutput(func(outputID iotago.OutputID, output iotago.Output) { + out, outMetadata, err := client.OutputWithMetadataByID(context.Background(), outputID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, outMetadata, "node %s", nodeName) + require.NotNilf(t, out, "node %s", nodeName) + require.EqualValuesf(t, outputID, outMetadata.OutputID, "node %s: OutputID of retrieved output does not match: %s != %s", nodeName, outputID, outMetadata.OutputID) + require.EqualValuesf(t, outputID.TransactionID(), outMetadata.Included.TransactionID, "node %s: TransactionID of retrieved output does not match: %s != %s", nodeName, outputID.TransactionID(), outMetadata.Included.TransactionID) + require.EqualValuesf(t, output, out, "node %s: OutputID of retrieved output does not match: %s != %s", nodeName, output, out) + }) }) }, }, { name: "Test_TransactionByID", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachTransaction(t, func(t *testing.T, transaction *iotago.SignedTransaction, firstAttachmentID iotago.BlockID) { - txID := transaction.Transaction.MustID() - resp, err := client.TransactionByID(context.Background(), txID) - require.NoError(t, err) - require.NotNil(t, resp) - require.EqualValues(t, txID, resp.MustID()) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachTransaction(func(transaction *iotago.SignedTransaction, firstAttachmentID iotago.BlockID) { + txID := transaction.Transaction.MustID() + resp, err := client.TransactionByID(context.Background(), txID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.EqualValuesf(t, txID, resp.MustID(), "node %s: TransactionID of retrieved transaction does not match: %s != %s", nodeName, txID, resp.MustID()) + }) }) }, }, { name: "Test_TransactionsIncludedBlock", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachTransaction(t, func(t *testing.T, transaction *iotago.SignedTransaction, firstAttachmentID iotago.BlockID) { - resp, err := client.TransactionIncludedBlock(context.Background(), transaction.Transaction.MustID()) - require.NoError(t, err) - require.NotNil(t, resp) - require.EqualValues(t, firstAttachmentID, resp.MustID()) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachTransaction(func(transaction *iotago.SignedTransaction, firstAttachmentID iotago.BlockID) { + resp, err := client.TransactionIncludedBlock(context.Background(), transaction.Transaction.MustID()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.EqualValuesf(t, firstAttachmentID, resp.MustID(), "node %s: BlockID of retrieved transaction does not match: %s != %s", nodeName, firstAttachmentID, resp.MustID()) + }) }) }, }, { name: "Test_TransactionsIncludedBlockMetadata", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachTransaction(t, func(t *testing.T, transaction *iotago.SignedTransaction, firstAttachmentID iotago.BlockID) { - resp, err := client.TransactionIncludedBlockMetadata(context.Background(), transaction.Transaction.MustID()) - require.NoError(t, err) - require.NotNil(t, resp) - require.EqualValues(t, api.BlockStateFinalized, resp.BlockState) - require.EqualValues(t, firstAttachmentID, resp.BlockID, "Inclusion BlockID of retrieved transaction does not match: %s != %s", firstAttachmentID, resp.BlockID) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachTransaction(func(transaction *iotago.SignedTransaction, firstAttachmentID iotago.BlockID) { + resp, err := client.TransactionIncludedBlockMetadata(context.Background(), transaction.Transaction.MustID()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.EqualValuesf(t, api.BlockStateFinalized, resp.BlockState, "node %s: BlockState of retrieved transaction does not match: %s != %s", nodeName, api.BlockStateFinalized, resp.BlockState) + require.EqualValuesf(t, firstAttachmentID, resp.BlockID, "node %s: BlockID of retrieved transaction does not match: %s != %s", nodeName, firstAttachmentID, resp.BlockID) + }) }) }, }, { name: "Test_TransactionsMetadata", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachTransaction(t, func(t *testing.T, transaction *iotago.SignedTransaction, firstAttachmentID iotago.BlockID) { - resp, err := client.TransactionMetadata(context.Background(), transaction.Transaction.MustID()) - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, api.TransactionStateFinalized, resp.TransactionState) - require.EqualValues(t, resp.EarliestAttachmentSlot, firstAttachmentID.Slot()) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachTransaction(func(transaction *iotago.SignedTransaction, firstAttachmentID iotago.BlockID) { + resp, err := client.TransactionMetadata(context.Background(), transaction.Transaction.MustID()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.Equalf(t, api.TransactionStateFinalized, resp.TransactionState, "node %s: TransactionState of retrieved transaction does not match: %s != %s", nodeName, api.TransactionStateFinalized, resp.TransactionState) + require.EqualValuesf(t, resp.EarliestAttachmentSlot, firstAttachmentID.Slot(), "node %s: EarliestAttachmentSlot of retrieved transaction does not match: %s != %s", nodeName, resp.EarliestAttachmentSlot, firstAttachmentID.Slot()) + }) }) }, }, { name: "Test_Congestion", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachAccountAddress(t, func( - t *testing.T, - accountAddress *iotago.AccountAddress, - commitmentPerNode map[string]iotago.CommitmentID, - bicPerNoode map[string]iotago.BlockIssuanceCredits, - ) { - resp, err := client.Congestion(context.Background(), accountAddress, 0) - require.NoError(t, err) - require.NotNil(t, resp) + testFunc: func(t *testing.T) { + // node allows to get account only for the slot newer than lastCommittedSlot - MCA, we need fresh commitment + infoRes, err := defaultClient.Info(context.Background()) + require.NoError(t, err) - // node allows to get account only for the slot newer than lastCommittedSlot - MCA, we need fresh commitment - infoRes, err := client.Info(context.Background()) - require.NoError(t, err) - commitment, err := client.CommitmentBySlot(context.Background(), infoRes.Status.LatestCommitmentID.Slot()) - require.NoError(t, err) + commitment, err := defaultClient.CommitmentBySlot(context.Background(), infoRes.Status.LatestCommitmentID.Slot()) + require.NoError(t, err) + + commitmentID := commitment.MustID() - resp, err = client.Congestion(context.Background(), accountAddress, 0, commitment.MustID()) + // wait a bit to make sure the commitment is available on all nodes + time.Sleep(1 * time.Second) + + assetsPerSlot.forEachAccountAddress(func(accountAddress *iotago.AccountAddress) { + // get the BIC for the account from the default wallet + congestionResponse, err := defaultClient.Congestion(context.Background(), accountAddress, 0, commitmentID) require.NoError(t, err) - require.NotNil(t, resp) - // later we check if all nodes have returned the same BIC value for this account - bicPerNoode[node.Name] = resp.BlockIssuanceCredits + require.NotNil(t, congestionResponse) + + bic := congestionResponse.BlockIssuanceCredits + + // check if all nodes have the same BIC for this account + forEachNodeClient(func(nodeName string, client mock.Client) { + resp, err := client.Congestion(context.Background(), accountAddress, 0, commitmentID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + + require.Equalf(t, bic, resp.BlockIssuanceCredits, "node %s: BIC for account %s does not match: %d != %d", nodeName, accountAddress.Bech32(iotago.PrefixTestnet), bic, resp.BlockIssuanceCredits) + }) }) }, }, { name: "Test_Validators", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - pageSize := uint64(3) - resp, err := client.Validators(context.Background(), pageSize) - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, int(pageSize), len(resp.Validators), "There should be exactly %d validators returned on the first page", pageSize) - - resp, err = client.Validators(context.Background(), pageSize, resp.Cursor) - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, 1, len(resp.Validators), "There should be only one validator returned on the last page") + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + pageSize := uint64(3) + resp, err := client.Validators(context.Background(), pageSize) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.Equalf(t, int(pageSize), len(resp.Validators), "node %s: There should be exactly %d validators returned on the first page", nodeName, pageSize) + + resp, err = client.Validators(context.Background(), pageSize, resp.Cursor) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.Equalf(t, 1, len(resp.Validators), "node %s: There should be only one validator returned on the last page", nodeName) + }) }, }, { name: "Test_ValidatorsAll", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - resp, all, err := client.ValidatorsAll(context.Background()) - require.NoError(t, err) - require.True(t, all) - require.Equal(t, 4, len(resp.Validators)) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + resp, all, err := client.ValidatorsAll(context.Background()) + require.NoErrorf(t, err, "node %s", nodeName) + require.Truef(t, all, "node %s: All validators should be returned", nodeName) + require.Equalf(t, 4, len(resp.Validators), "node %s: There should be exactly 4 validators returned", nodeName) + }) }, }, { name: "Test_Rewards", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - assetsPerSlot.forEachOutput(t, func(t *testing.T, outputID iotago.OutputID, output iotago.Output) { - if output.Type() != iotago.OutputDelegation { - return - } - - resp, err := client.Rewards(context.Background(), outputID) - require.NoError(t, err) - require.NotNil(t, resp) - // rewards are zero, because we do not wait for the epoch end - require.EqualValues(t, 0, resp.Rewards) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + assetsPerSlot.forEachOutput(func(outputID iotago.OutputID, output iotago.Output) { + if output.Type() != iotago.OutputDelegation { + return + } + + resp, err := client.Rewards(context.Background(), outputID) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + + timeProvider := client.CommittedAPI().TimeProvider() + outputCreationEpoch := timeProvider.EpochFromSlot(outputID.Slot()) + + if outputCreationEpoch == timeProvider.CurrentEpoch() { + // rewards are zero, because we do not wait for the epoch end + require.EqualValuesf(t, 0, resp.Rewards, "node %s: Rewards should be zero", nodeName) + } else { + // rewards can be greater or equal to 0, since the delegation happened earlier + require.GreaterOrEqualf(t, resp.Rewards, iotago.Mana(0), "node %s: Rewards should be greater or equal to zero", nodeName) + } + }) }) }, }, { name: "Test_Committee", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - resp, err := client.Committee(context.Background()) - require.NoError(t, err) - require.NotNil(t, resp) - require.EqualValues(t, 4, len(resp.Committee)) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + resp, err := client.Committee(context.Background()) + require.NoErrorf(t, err, "node %s", nodeName) + require.NotNilf(t, resp, "node %s", nodeName) + require.EqualValuesf(t, 4, len(resp.Committee), "node %s: Committee length should be 4", nodeName) + }) }, }, { name: "Test_CommitteeWithEpoch", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - resp, err := client.Committee(context.Background(), 0) - require.NoError(t, err) - require.Equal(t, iotago.EpochIndex(0), resp.Epoch) - require.Equal(t, 4, len(resp.Committee)) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + resp, err := client.Committee(context.Background(), 0) + require.NoErrorf(t, err, "node %s", nodeName) + require.Equalf(t, iotago.EpochIndex(0), resp.Epoch, "node %s: Epoch should be 0", nodeName) + require.Equalf(t, 4, len(resp.Committee), "node %s: Committee length should be 4", nodeName) + }) }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - for _, node := range d.Nodes() { - test.testFunc(d.Testing, node, d.Client(node.Name)) - } + test.testFunc(d.Testing) }) } - - // check if the same values were returned by all nodes for the same slot - assetsPerSlot.assertCommitments(t) - assetsPerSlot.assertBICs(t) } func Test_CoreAPI_BadRequests(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -690,211 +796,250 @@ func Test_CoreAPI_BadRequests(t *testing.T) { d.AddValidatorNode("V4", "docker-network-inx-validator-4-1", "http://localhost:8040", "rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw") d.AddNode("node5", "docker-network-node-5-1", "http://localhost:8080") - runErr := d.Run() - require.NoError(t, runErr) + err := d.Run() + require.NoError(t, err) d.WaitUntilNetworkReady() + forEachNodeClient := func(consumer func(nodeName string, client mock.Client)) { + for _, node := range d.Nodes() { + client := d.Client(node.Name) + consumer(node.Name, client) + } + } + tests := []struct { name string - testFunc func(t *testing.T, node *dockertestframework.Node, client mock.Client) + testFunc func(t *testing.T) }{ { name: "Test_BlockByBlockID_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - blockID := tpkg.RandBlockID() - respBlock, err := client.BlockByBlockID(context.Background(), blockID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, respBlock) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + blockID := tpkg.RandBlockID() + respBlock, err := client.BlockByBlockID(context.Background(), blockID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, respBlock, "node %s", nodeName) + }) }, }, { name: "Test_BlockMetadataByBlockID_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - blockID := tpkg.RandBlockID() - resp, err := client.BlockMetadataByBlockID(context.Background(), blockID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + blockID := tpkg.RandBlockID() + resp, err := client.BlockMetadataByBlockID(context.Background(), blockID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_BlockWithMetadata_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - blockID := tpkg.RandBlockID() - resp, err := client.BlockWithMetadataByBlockID(context.Background(), blockID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + blockID := tpkg.RandBlockID() + resp, err := client.BlockWithMetadataByBlockID(context.Background(), blockID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_CommitmentBySlot_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - slot := iotago.SlotIndex(1000_000_000) - resp, err := client.CommitmentBySlot(context.Background(), slot) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + slot := iotago.SlotIndex(1000_000_000) + resp, err := client.CommitmentBySlot(context.Background(), slot) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_CommitmentByID_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - committmentID := tpkg.RandCommitmentID() - resp, err := client.CommitmentByID(context.Background(), committmentID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + committmentID := tpkg.RandCommitmentID() + resp, err := client.CommitmentByID(context.Background(), committmentID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_CommitmentUTXOChangesByID_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - committmentID := tpkg.RandCommitmentID() - resp, err := client.CommitmentUTXOChangesByID(context.Background(), committmentID) - require.Error(t, err) - // commitmentID is valid, but the UTXO changes does not exist in the storage - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + committmentID := tpkg.RandCommitmentID() + resp, err := client.CommitmentUTXOChangesByID(context.Background(), committmentID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { - "Test_CommitmentUTXOChangesFullByID_Failure", - func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - committmentID := tpkg.RandCommitmentID() - - resp, err := client.CommitmentUTXOChangesFullByID(context.Background(), committmentID) - require.Error(t, err) - // commitmentID is valid, but the UTXO changes does not exist in the storage - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + name: "Test_CommitmentUTXOChangesFullByID_Failure", + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + committmentID := tpkg.RandCommitmentID() + + resp, err := client.CommitmentUTXOChangesFullByID(context.Background(), committmentID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_CommitmentUTXOChangesBySlot_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - slot := iotago.SlotIndex(1000_000_000) - resp, err := client.CommitmentUTXOChangesBySlot(context.Background(), slot) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + slot := iotago.SlotIndex(1000_000_000) + resp, err := client.CommitmentUTXOChangesBySlot(context.Background(), slot) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_CommitmentUTXOChangesFullBySlot_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - slot := iotago.SlotIndex(1000_000_000) - - resp, err := client.CommitmentUTXOChangesFullBySlot(context.Background(), slot) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + slot := iotago.SlotIndex(1000_000_000) + + resp, err := client.CommitmentUTXOChangesFullBySlot(context.Background(), slot) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_OutputByID_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - outputID := tpkg.RandOutputID(0) - resp, err := client.OutputByID(context.Background(), outputID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + outputID := tpkg.RandOutputID(0) + resp, err := client.OutputByID(context.Background(), outputID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_OutputMetadata_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - outputID := tpkg.RandOutputID(0) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + outputID := tpkg.RandOutputID(0) - resp, err := client.OutputMetadataByID(context.Background(), outputID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + resp, err := client.OutputMetadataByID(context.Background(), outputID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_OutputWithMetadata_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - outputID := tpkg.RandOutputID(0) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + outputID := tpkg.RandOutputID(0) - out, outMetadata, err := client.OutputWithMetadataByID(context.Background(), outputID) - require.Error(t, err) - require.Nil(t, out) - require.Nil(t, outMetadata) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) + out, outMetadata, err := client.OutputWithMetadataByID(context.Background(), outputID) + require.Errorf(t, err, "node %s", nodeName) + require.Nilf(t, out, "node %s", nodeName) + require.Nilf(t, outMetadata, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + }) }, }, { name: "Test_TransactionsIncludedBlock_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - txID := tpkg.RandTransactionID() - resp, err := client.TransactionIncludedBlock(context.Background(), txID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + txID := tpkg.RandTransactionID() + resp, err := client.TransactionIncludedBlock(context.Background(), txID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_TransactionsIncludedBlockMetadata_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - txID := tpkg.RandTransactionID() - - resp, err := client.TransactionIncludedBlockMetadata(context.Background(), txID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + txID := tpkg.RandTransactionID() + + resp, err := client.TransactionIncludedBlockMetadata(context.Background(), txID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_TransactionsMetadata_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - txID := tpkg.RandTransactionID() - - resp, err := client.TransactionMetadata(context.Background(), txID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + txID := tpkg.RandTransactionID() + + resp, err := client.TransactionMetadata(context.Background(), txID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_Congestion_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - accountAddress := tpkg.RandAccountAddress() - commitmentID := tpkg.RandCommitmentID() - resp, err := client.Congestion(context.Background(), accountAddress, 0, commitmentID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + accountAddress := tpkg.RandAccountAddress() + commitmentID := tpkg.RandCommitmentID() + resp, err := client.Congestion(context.Background(), accountAddress, 0, commitmentID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_Committee_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - resp, err := client.Committee(context.Background(), 4) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusBadRequest)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + resp, err := client.Committee(context.Background(), 4000) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusBadRequest), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, { name: "Test_Rewards_Failure", - testFunc: func(t *testing.T, node *dockertestframework.Node, client mock.Client) { - outputID := tpkg.RandOutputID(0) - resp, err := client.Rewards(context.Background(), outputID) - require.Error(t, err) - require.True(t, dockertestframework.IsStatusCode(err, http.StatusNotFound)) - require.Nil(t, resp) + testFunc: func(t *testing.T) { + forEachNodeClient(func(nodeName string, client mock.Client) { + outputID := tpkg.RandOutputID(0) + resp, err := client.Rewards(context.Background(), outputID) + require.Errorf(t, err, "node %s", nodeName) + require.Truef(t, dockertestframework.IsStatusCode(err, http.StatusNotFound), "node %s", nodeName) + require.Nilf(t, resp, "node %s", nodeName) + }) }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - for _, node := range d.Nodes() { - test.testFunc(d.Testing, node, d.Client(node.Name)) - } + test.testFunc(d.Testing) }) } } diff --git a/tools/docker-network/tests/api_management_test.go b/tools/docker-network/tests/api_management_test.go index c10c36b95..bd7134839 100644 --- a/tools/docker-network/tests/api_management_test.go +++ b/tools/docker-network/tests/api_management_test.go @@ -12,7 +12,6 @@ import ( "github.com/iotaledger/iota-core/pkg/storage/database" "github.com/iotaledger/iota-core/tools/docker-network/tests/dockertestframework" - iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/iota.go/v4/api" ) @@ -22,21 +21,17 @@ func getContextWithTimeout(duration time.Duration) context.Context { return ctx } -// Test_ManagementAPI_Peers tests if the peer management API returns the expected results. +// Test_ManagementAPI_Peers_ValidRequests tests if the peer management API returns the expected results. // 1. Run docker network. // 2. List all peers of node 1. // 3. Delete a peer from node 1. // 4. List all peers of node 1 again and check if the peer was deleted. // 5. Re-Add the peer to node 1. // 6. List all peers of node 1 again and check if the peer was added. -func Test_ManagementAPI_Peers(t *testing.T) { +func Test_ManagementAPI_Peers_ValidRequests(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -45,13 +40,12 @@ func Test_ManagementAPI_Peers(t *testing.T) { d.AddValidatorNode("V4", "docker-network-inx-validator-4-1", "http://localhost:8040", "rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw") d.AddNode("node5", "docker-network-node-5-1", "http://localhost:8080") - runErr := d.Run() - require.NoError(t, runErr) + err := d.Run() + require.NoError(t, err) d.WaitUntilNetworkReady() - // wait longer for autopeering - d.AwaitCommitment(d.DefaultWallet().CurrentSlot()) + d.AwaitCommittedSlot(d.DefaultWallet().CurrentSlot(), true) // get the management client managementClient, err := d.Client("V1").Management(getContextWithTimeout(5 * time.Second)) @@ -123,12 +117,8 @@ func Test_ManagementAPI_Peers(t *testing.T) { func Test_ManagementAPI_Peers_BadRequests(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -137,8 +127,8 @@ func Test_ManagementAPI_Peers_BadRequests(t *testing.T) { d.AddValidatorNode("V4", "docker-network-inx-validator-4-1", "http://localhost:8040", "rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw") d.AddNode("node5", "docker-network-node-5-1", "http://localhost:8080") - runErr := d.Run() - require.NoError(t, runErr) + err := d.Run() + require.NoError(t, err) d.WaitUntilNetworkReady() @@ -187,13 +177,7 @@ func Test_ManagementAPI_Peers_BadRequests(t *testing.T) { func Test_ManagementAPI_Pruning(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(0, time.Now().Unix(), 10, 3), - iotago.WithLivenessOptions(10, 10, 2, 4, 5), - iotago.WithCongestionControlOptions(1, 1, 1, 400_000, 250_000, 50_000_000, 1000, 100), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - ), + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), ) defer d.Stop() @@ -203,8 +187,8 @@ func Test_ManagementAPI_Pruning(t *testing.T) { d.AddValidatorNode("V4", "docker-network-inx-validator-4-1", "http://localhost:8040", "rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw") d.AddNode("node5", "docker-network-node-5-1", "http://localhost:8080") - runErr := d.Run() - require.NoError(t, runErr) + err := d.Run() + require.NoError(t, err) d.WaitUntilNetworkReady() @@ -268,13 +252,8 @@ func Test_ManagementAPI_Pruning(t *testing.T) { func Test_ManagementAPI_Snapshots(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(0, time.Now().Unix(), 10, 3), - iotago.WithLivenessOptions(10, 10, 2, 4, 5), - iotago.WithCongestionControlOptions(1, 1, 1, 400_000, 250_000, 50_000_000, 1000, 100), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -283,8 +262,8 @@ func Test_ManagementAPI_Snapshots(t *testing.T) { d.AddValidatorNode("V4", "docker-network-inx-validator-4-1", "http://localhost:8040", "rms1pr8cxs3dzu9xh4cduff4dd4cxdthpjkpwmz2244f75m0urslrsvtsshrrjw") d.AddNode("node5", "docker-network-node-5-1", "http://localhost:8080") - runErr := d.Run() - require.NoError(t, runErr) + err := d.Run() + require.NoError(t, err) d.WaitUntilNetworkReady() diff --git a/tools/docker-network/tests/committeerotation_test.go b/tools/docker-network/tests/committeerotation_test.go index 576be1af8..9abcc7e9f 100644 --- a/tools/docker-network/tests/committeerotation_test.go +++ b/tools/docker-network/tests/committeerotation_test.go @@ -13,8 +13,19 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/iota-core/tools/docker-network/tests/dockertestframework" iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/iota.go/v4/api" ) +func calcNextEpoch(nodeStatus *api.InfoResNodeStatus, timeProvider *iotago.TimeProvider, minEpoch iotago.EpochIndex) iotago.EpochIndex { + currentEpoch := timeProvider.EpochFromSlot(nodeStatus.LatestAcceptedBlockSlot) + + if currentEpoch+1 > minEpoch { + return currentEpoch + 1 + } + + return minEpoch +} + // Test_SmallerCommittee tests if the committee rotated to a smaller committee than targetCommitteeSize // if less than targetCommitteeSize validators issued candidacy payloads. // 1. Run docker network, targetCommitteeSize=4, with 4 validators running. @@ -24,12 +35,8 @@ import ( // 5. Check that committee of size 4 is selected in next epoch. func Test_SmallerCommittee(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -45,20 +52,22 @@ func Test_SmallerCommittee(t *testing.T) { status := d.NodeStatus("V1") - clt := d.DefaultWallet().Client - currentEpoch := clt.CommittedAPI().TimeProvider().EpochFromSlot(status.LatestAcceptedBlockSlot) + defaultClient := d.DefaultWallet().Client + initialEpoch := defaultClient.CommittedAPI().TimeProvider().EpochFromSlot(status.LatestAcceptedBlockSlot) // stop inx-validator plugin of validator 2 err = d.StopContainer(d.Node("V2").ContainerName) require.NoError(t, err) - d.AssertCommittee(currentEpoch+2, d.AccountsFromNodes(d.Nodes("V1", "V3", "V4")...)) + nextEpoch := calcNextEpoch(d.NodeStatus("V1"), defaultClient.CommittedAPI().TimeProvider(), initialEpoch+2) + d.AssertCommittee(nextEpoch, d.AccountsFromNodes(d.Nodes("V1", "V3", "V4")...)) // restart inx-validator plugin of validator 2 err = d.RestartContainer(d.Node("V2").ContainerName) require.NoError(t, err) - d.AssertCommittee(currentEpoch+3, d.AccountsFromNodes(d.Nodes()...)) + nextEpoch = calcNextEpoch(d.NodeStatus("V1"), defaultClient.CommittedAPI().TimeProvider(), nextEpoch+1) + d.AssertCommittee(nextEpoch, d.AccountsFromNodes(d.Nodes()...)) } // Test_ReuseDueToNoFinalization tests if the committee members are the same (reused) due to no slot finalization at epochNearingThreshold and recovery after finalization comes back. @@ -69,12 +78,8 @@ func Test_SmallerCommittee(t *testing.T) { // 5. Check that committee of size 3 (V1, V2, V4) is selected in next epoch and finalization occurs again from that epoch. func Test_ReuseDueToNoFinalization(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -88,45 +93,61 @@ func Test_ReuseDueToNoFinalization(t *testing.T) { d.WaitUntilNetworkReady() - // stop 2 validators, finalization should stop + // stop 2 inx-validator plugins, finalization should stop err = d.StopContainer(d.Node("V2").ContainerName, d.Node("V3").ContainerName) require.NoError(t, err) - clt := d.DefaultWallet().Client + defaultClient := d.DefaultWallet().Client status := d.NodeStatus("V1") + // store initial finalized slot prevFinalizedSlot := status.LatestFinalizedSlot - fmt.Println("First finalized slot: ", prevFinalizedSlot) - - currentEpoch := clt.CommittedAPI().TimeProvider().EpochFromSlot(prevFinalizedSlot) + currentEpoch := defaultClient.CommittedAPI().TimeProvider().EpochFromSlot(prevFinalizedSlot) - // Due to no finalization, committee should be reused, remain 4 validators - d.AssertCommittee(currentEpoch+2, d.AccountsFromNodes(d.Nodes()...)) + // due to no finalization, committee should be reused, remain 4 validators + // we check 2 epochs ahead + nextEpoch := calcNextEpoch(d.NodeStatus("V1"), defaultClient.CommittedAPI().TimeProvider(), currentEpoch+2) + d.AssertCommittee(nextEpoch, d.AccountsFromNodes(d.Nodes()...)) // check if finalization stops - fmt.Println("Second finalized slot: ", status.LatestFinalizedSlot) - d.AssertFinalizedSlot(func(newFinalizedSlot iotago.SlotIndex) error { - if prevFinalizedSlot == newFinalizedSlot { + d.AssertFinalizedSlot(func(nodeName string, latestFinalizedSlot iotago.SlotIndex) error { + if prevFinalizedSlot == latestFinalizedSlot { + // finalization should have stopped return nil } - return ierrors.Errorf("NO finalization should happened, First finalized slot: %d, Second finalized slot: %d", prevFinalizedSlot, status.LatestFinalizedSlot) + return ierrors.Errorf("No finalization should have happened, Previous finalized slot: %d, Latest finalized slot: %d, Node: %s", prevFinalizedSlot, latestFinalizedSlot, nodeName) }) // revive 1 validator, committee size should be 3, finalization should resume err = d.RestartContainer(d.Node("V2").ContainerName) require.NoError(t, err) - d.AssertCommittee(currentEpoch+3, d.AccountsFromNodes(d.Nodes("V1", "V2", "V4")...)) + d.WaitUntilNodesHealthy() + + // check if V2 missed to announce the candidacy during inx-validator restart. + latestAcceptedBlockSlot := d.NodeStatus("V1").LatestAcceptedBlockSlot + annoucementStartEpoch := defaultClient.CommittedAPI().TimeProvider().EpochFromSlot(latestAcceptedBlockSlot) + maxRegistrationSlot := dockertestframework.GetMaxRegistrationSlot(defaultClient.CommittedAPI(), annoucementStartEpoch) + // the candidacy announcement needs to be done before the nearing threshold of the epoch + if latestAcceptedBlockSlot >= maxRegistrationSlot { + // it's too late for validator to issue candidacy payloads anymore, so we wait until the next epoch + annoucementStartEpoch++ + } + + // we check if the committee is updated in the next epoch, after candidacy announcement have been processed + checkCommitteeEpoch := annoucementStartEpoch + 1 + + d.AssertCommittee(checkCommitteeEpoch, d.AccountsFromNodes(d.Nodes("V1", "V2", "V4")...)) // wait finalization to catch up and check if the finalization resumes time.Sleep(5 * time.Second) - d.AssertFinalizedSlot(func(newFinalizedSlot iotago.SlotIndex) error { - if prevFinalizedSlot < newFinalizedSlot { + d.AssertFinalizedSlot(func(nodeName string, latestFinalizedSlot iotago.SlotIndex) error { + if prevFinalizedSlot < latestFinalizedSlot { return nil } - return ierrors.Errorf("Finalization should happened, Second finalized slot: %d, Third finalized slot: %d", prevFinalizedSlot, status.LatestFinalizedSlot) + return ierrors.Errorf("Finalization should have happened, Previous finalized slot: %d, Latest finalized slot: %d, Node: %s", prevFinalizedSlot, latestFinalizedSlot, nodeName) }) } @@ -138,12 +159,8 @@ func Test_ReuseDueToNoFinalization(t *testing.T) { // 5. Check finalization advances and the committee is changed to 3 committee members. func Test_NoCandidacyPayload(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6", false) @@ -157,11 +174,11 @@ func Test_NoCandidacyPayload(t *testing.T) { d.WaitUntilNetworkReady() - clt := d.DefaultWallet().Client + defaultClient := d.DefaultWallet().Client status := d.NodeStatus("V1") prevFinalizedSlot := status.LatestFinalizedSlot fmt.Println("First finalized slot: ", prevFinalizedSlot) - currentEpoch := clt.CommittedAPI().TimeProvider().EpochFromSlot(status.LatestAcceptedBlockSlot) + currentEpoch := defaultClient.CommittedAPI().TimeProvider().EpochFromSlot(status.LatestAcceptedBlockSlot) d.AssertCommittee(currentEpoch+1, d.AccountsFromNodes(d.Nodes()...)) @@ -169,12 +186,12 @@ func Test_NoCandidacyPayload(t *testing.T) { d.AssertCommittee(currentEpoch+2, d.AccountsFromNodes(d.Nodes()...)) // check if finalization continues - d.AssertFinalizedSlot(func(newFinalizedSlot iotago.SlotIndex) error { - if prevFinalizedSlot < newFinalizedSlot { + d.AssertFinalizedSlot(func(nodeName string, latestFinalizedSlot iotago.SlotIndex) error { + if prevFinalizedSlot < latestFinalizedSlot { return nil } - return ierrors.Errorf("Finalization should happened, First finalized slot: %d, Second finalized slot: %d", prevFinalizedSlot, newFinalizedSlot) + return ierrors.Errorf("Finalization should have happened, Previous finalized slot: %d, Latest finalized slot: %d, Node: %s", prevFinalizedSlot, latestFinalizedSlot, nodeName) }) // Start issuing candidacy payloads for 3 validators, and check if committee size is 3 @@ -189,11 +206,12 @@ func Test_NoCandidacyPayload(t *testing.T) { func Test_Staking(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(3), - )) + append( + dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc(), + iotago.WithTargetCommitteeSize(3), + )..., + ), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -211,16 +229,20 @@ func Test_Staking(t *testing.T) { t.Cleanup(cancel) // create implicit account for the validator - wallet, implicitAccountOutputData := d.CreateImplicitAccount(ctx) + implicitAccount := d.CreateImplicitAccount(ctx, "account-1") + + blockIssuance := implicitAccount.Wallet().GetNewBlockIssuanceResponse() + + latestCommitmentSlot := blockIssuance.LatestCommitment.Slot + stakingStartEpoch := d.DefaultWallet().StakingStartEpochFromSlot(latestCommitmentSlot) // create account with staking feature for the validator - accountData := d.CreateAccountFromImplicitAccount(wallet, - implicitAccountOutputData, - wallet.GetNewBlockIssuanceResponse(), - dockertestframework.WithStakingFeature(100, 1, 0), + accountWithWallet := d.CreateAccountFromImplicitAccount(implicitAccount, + blockIssuance, + dockertestframework.WithStakingFeature(100, 1, stakingStartEpoch), ) - d.AssertValidatorExists(accountData.Address) + d.AssertValidatorExists(accountWithWallet.Account().Address) } // Test_Delegation tests if committee changed due to delegation. @@ -232,11 +254,12 @@ func Test_Staking(t *testing.T) { func Test_Delegation(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(3), - )) + append( + dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc(), + iotago.WithTargetCommitteeSize(3), + )..., + ), + ) defer d.Stop() // V1 pubKey in hex: 0x293dc170d9a59474e6d81cfba7f7d924c09b25d7166bcfba606e53114d0a758b @@ -255,14 +278,14 @@ func Test_Delegation(t *testing.T) { d.WaitUntilNetworkReady() // create an account to perform delegation - wallet, _ := d.CreateAccountFromFaucet() + account := d.CreateAccountFromFaucet("account-1") // delegate all faucet funds to V2, V2 should replace V3 //nolint:forcetypeassert - delegationOutputData := d.DelegateToValidator(wallet, d.Node("V2").AccountAddress(t)) + delegationOutputData := d.DelegateToValidator(account.Wallet(), d.Node("V2").AccountAddress(t)) d.AssertCommittee(delegationOutputData.Output.(*iotago.DelegationOutput).StartEpoch+1, d.AccountsFromNodes(d.Nodes("V1", "V2", "V4")...)) // delegate all faucet funds to V3, V3 should replace V1 - delegationOutputData = d.DelegateToValidator(wallet, d.Node("V3").AccountAddress(t)) + delegationOutputData = d.DelegateToValidator(account.Wallet(), d.Node("V3").AccountAddress(t)) d.AssertCommittee(delegationOutputData.Output.(*iotago.DelegationOutput).StartEpoch+1, d.AccountsFromNodes(d.Nodes("V2", "V3", "V4")...)) } diff --git a/tools/docker-network/tests/dockertestframework/accounts.go b/tools/docker-network/tests/dockertestframework/accounts.go index d09293ee1..88b8d1ddd 100644 --- a/tools/docker-network/tests/dockertestframework/accounts.go +++ b/tools/docker-network/tests/dockertestframework/accounts.go @@ -5,6 +5,7 @@ package dockertestframework import ( "context" "fmt" + "sync" "github.com/stretchr/testify/require" @@ -43,11 +44,11 @@ func (d *DockerTestFramework) CheckAccountStatus(ctx context.Context, blkID iota d.AwaitTransactionPayloadAccepted(ctx, txID) // wait for the account to be committed - d.AwaitCommitment(slot) + d.AwaitCommittedSlot(slot, false) // Check the indexer if len(checkIndexer) > 0 && checkIndexer[0] { - indexerClt, err := d.defaultWallet.Client.Indexer(ctx) + indexerClt, err := clt.Indexer(ctx) require.NoError(d.Testing, err) _, _, _, err = indexerClt.Account(ctx, accountAddress) @@ -59,43 +60,88 @@ func (d *DockerTestFramework) CheckAccountStatus(ctx context.Context, blkID iota require.NoError(d.Testing, err) } +type ImplicitAccount struct { + wallet *mock.Wallet + outputData *mock.OutputData +} + +func (i *ImplicitAccount) Wallet() *mock.Wallet { + return i.wallet +} + +func (i *ImplicitAccount) OutputData() *mock.OutputData { + return i.outputData +} + // CreateImplicitAccount requests faucet funds and creates an implicit account. It already wait until the transaction is committed and the created account is useable. -func (d *DockerTestFramework) CreateImplicitAccount(ctx context.Context) (*mock.Wallet, *mock.OutputData) { - newWallet := mock.NewWallet(d.Testing, "", d.defaultWallet.Client, &DockerWalletClock{client: d.defaultWallet.Client}) - implicitAccountOutputData := d.RequestFaucetFunds(ctx, newWallet, iotago.AddressImplicitAccountCreation) +func (d *DockerTestFramework) CreateImplicitAccount(ctx context.Context, name string) *ImplicitAccount { + clt := d.defaultWallet.Client - accountID := iotago.AccountIDFromOutputID(implicitAccountOutputData.ID) + wallet := mock.NewWallet(d.Testing, name, clt, &DockerWalletClock{client: clt}) + outputData := d.RequestFaucetFunds(ctx, wallet, iotago.AddressImplicitAccountCreation) + + accountID := iotago.AccountIDFromOutputID(outputData.ID) accountAddress, ok := accountID.ToAddress().(*iotago.AccountAddress) require.True(d.Testing, ok) // make sure an implicit account is committed - d.CheckAccountStatus(ctx, iotago.EmptyBlockID, implicitAccountOutputData.ID.TransactionID(), implicitAccountOutputData.ID, accountAddress) + d.CheckAccountStatus(ctx, iotago.EmptyBlockID, outputData.ID.TransactionID(), outputData.ID, accountAddress) // update the wallet with the new account data - newWallet.SetBlockIssuer(&mock.AccountData{ + wallet.SetBlockIssuer(&mock.AccountData{ ID: accountID, Address: accountAddress, - OutputID: implicitAccountOutputData.ID, - AddressIndex: implicitAccountOutputData.AddressIndex, + OutputID: outputData.ID, + AddressIndex: outputData.AddressIndex, }) - return newWallet, implicitAccountOutputData + return &ImplicitAccount{ + wallet: wallet, + outputData: outputData, + } +} + +func (d *DockerTestFramework) CreateImplicitAccounts(ctx context.Context, count int, names ...string) []*ImplicitAccount { + var wg sync.WaitGroup + + implicitAccounts := make([]*ImplicitAccount, count) + + for i := range count { + wg.Add(1) + + // first create all implicit accounts in parallel + go func(nr int) { + defer wg.Done() + + // create implicit accounts + name := fmt.Sprintf("account-%d", nr) + if len(names) > nr { + name = names[nr] + } + implicitAccounts[nr] = d.CreateImplicitAccount(ctx, name) + }(i) + } + + // wait until all implicit accounts are created + wg.Wait() + + return implicitAccounts } // TransitionImplicitAccountToAccountOutputBlock consumes the given implicit account, then build the account transition block with the given account output options. -func (d *DockerTestFramework) TransitionImplicitAccountToAccountOutputBlock(accountWallet *mock.Wallet, implicitAccountOutputData *mock.OutputData, blockIssuance *api.IssuanceBlockHeaderResponse, opts ...options.Option[builder.AccountOutputBuilder]) (*mock.AccountData, *iotago.SignedTransaction, *iotago.Block) { +func (d *DockerTestFramework) TransitionImplicitAccountToAccountOutputBlock(implicitAccount *ImplicitAccount, blockIssuance *api.IssuanceBlockHeaderResponse, opts ...options.Option[builder.AccountOutputBuilder]) (*mock.AccountData, *iotago.SignedTransaction, *iotago.Block) { ctx := context.TODO() - var implicitBlockIssuerKey iotago.BlockIssuerKey = iotago.Ed25519PublicKeyHashBlockIssuerKeyFromImplicitAccountCreationAddress(accountWallet.ImplicitAccountCreationAddress()) + var implicitBlockIssuerKey iotago.BlockIssuerKey = iotago.Ed25519PublicKeyHashBlockIssuerKeyFromImplicitAccountCreationAddress(implicitAccount.Wallet().ImplicitAccountCreationAddress()) opts = append(opts, mock.WithBlockIssuerFeature( iotago.NewBlockIssuerKeys(implicitBlockIssuerKey), iotago.MaxSlotIndex, )) - signedTx := accountWallet.TransitionImplicitAccountToAccountOutputWithBlockIssuance("", []*mock.OutputData{implicitAccountOutputData}, blockIssuance, opts...) + signedTx := implicitAccount.Wallet().TransitionImplicitAccountToAccountOutputWithBlockIssuance("", []*mock.OutputData{implicitAccount.OutputData()}, blockIssuance, opts...) // The account transition block should be issued by the implicit account block issuer key. - block, err := accountWallet.CreateBasicBlock(ctx, "", mock.WithPayload(signedTx)) + block, err := implicitAccount.Wallet().CreateBasicBlock(ctx, "", mock.WithPayload(signedTx)) require.NoError(d.Testing, err) accOutputID := iotago.OutputIDFromTransactionIDAndIndex(signedTx.Transaction.MustID(), 0) accOutput := signedTx.Transaction.Outputs[0].(*iotago.AccountOutput) @@ -106,47 +152,56 @@ func (d *DockerTestFramework) TransitionImplicitAccountToAccountOutputBlock(acco Address: accAddress, Output: accOutput, OutputID: accOutputID, - AddressIndex: implicitAccountOutputData.AddressIndex, + AddressIndex: implicitAccount.OutputData().AddressIndex, } return accountOutputData, signedTx, block.ProtocolBlock() } // CreateAccountFromImplicitAccount transitions an account from the given implicit one to full one, it already wait until the transaction is committed and the created account is useable. -func (d *DockerTestFramework) CreateAccountFromImplicitAccount(accountWallet *mock.Wallet, implicitAccountOutputData *mock.OutputData, blockIssuance *api.IssuanceBlockHeaderResponse, opts ...options.Option[builder.AccountOutputBuilder]) *mock.AccountData { +func (d *DockerTestFramework) CreateAccountFromImplicitAccount(implicitAccount *ImplicitAccount, blockIssuance *api.IssuanceBlockHeaderResponse, opts ...options.Option[builder.AccountOutputBuilder]) *mock.AccountWithWallet { ctx := context.TODO() - accountData, signedTx, block := d.TransitionImplicitAccountToAccountOutputBlock(accountWallet, implicitAccountOutputData, blockIssuance, opts...) + accountData, signedTx, block := d.TransitionImplicitAccountToAccountOutputBlock(implicitAccount, blockIssuance, opts...) d.SubmitBlock(ctx, block) d.CheckAccountStatus(ctx, block.MustID(), signedTx.Transaction.MustID(), accountData.OutputID, accountData.Address, true) // update the wallet with the new account data - accountWallet.SetBlockIssuer(accountData) + implicitAccount.Wallet().SetBlockIssuer(accountData) - fmt.Printf("Account created, Bech addr: %s\n", accountData.Address.Bech32(accountWallet.Client.CommittedAPI().ProtocolParameters().Bech32HRP())) + fmt.Printf("Account created, Bech addr: %s\n", accountData.Address.Bech32(implicitAccount.Wallet().Client.CommittedAPI().ProtocolParameters().Bech32HRP())) - return accountWallet.Account(accountData.ID) + return mock.NewAccountWithWallet(implicitAccount.Wallet().Account(accountData.ID), implicitAccount.Wallet()) } // CreateAccountFromFaucet creates a new account by requesting faucet funds to an implicit account address and then transitioning the new output to a full account output. // It already waits until the transaction is committed and the created account is useable. -func (d *DockerTestFramework) CreateAccountFromFaucet() (*mock.Wallet, *mock.AccountData) { - ctx := context.TODO() +func (d *DockerTestFramework) CreateAccountFromFaucet(name string) *mock.AccountWithWallet { + return d.CreateAccountFromImplicitAccount(d.CreateImplicitAccount(context.TODO(), name), d.defaultWallet.GetNewBlockIssuanceResponse()) +} - newWallet, implicitAccountOutputData := d.CreateImplicitAccount(ctx) +func (d *DockerTestFramework) CreateAccountsFromFaucet(ctx context.Context, count int, names ...string) []*mock.AccountWithWallet { + implicitAccounts := d.CreateImplicitAccounts(ctx, count, names...) - accountData, signedTx, block := d.TransitionImplicitAccountToAccountOutputBlock(newWallet, implicitAccountOutputData, d.defaultWallet.GetNewBlockIssuanceResponse()) + blockIssuance := d.defaultWallet.GetNewBlockIssuanceResponse() - d.SubmitBlock(ctx, block) - d.CheckAccountStatus(ctx, block.MustID(), signedTx.Transaction.MustID(), accountData.OutputID, accountData.Address, true) + // transition all implicit accounts in parallel + var wg sync.WaitGroup + accounts := make([]*mock.AccountWithWallet, count) + for i := range count { + wg.Add(1) - // update the wallet with the new account data - newWallet.SetBlockIssuer(accountData) + go func(nr int) { + defer wg.Done() - fmt.Printf("Account created, Bech addr: %s\n", accountData.Address.Bech32(newWallet.Client.CommittedAPI().ProtocolParameters().Bech32HRP())) + accounts[nr] = d.CreateAccountFromImplicitAccount(implicitAccounts[nr], blockIssuance) + }(i) + } + // wait until all accounts are created + wg.Wait() - return newWallet, newWallet.Account(accountData.ID) + return accounts } // CreateNativeToken request faucet funds then use it to create native token for the account, and returns the updated Account. @@ -169,7 +224,7 @@ func (d *DockerTestFramework) CreateNativeToken(fromWallet *mock.Wallet, mintedA fmt.Println("Create native tokens transaction sent, blkID:", block.ID().ToHex(), ", txID:", signedTx.Transaction.MustID().ToHex(), ", slot:", block.ID().Slot()) // wait for the account to be committed - d.AwaitCommitment(block.ID().Slot()) + d.AwaitCommittedSlot(block.ID().Slot(), false) d.AssertIndexerAccount(fromWallet.BlockIssuer.AccountData) //nolint:forcetypeassert diff --git a/tools/docker-network/tests/dockertestframework/asserts.go b/tools/docker-network/tests/dockertestframework/asserts.go index 16dc0f4e7..f4340b982 100644 --- a/tools/docker-network/tests/dockertestframework/asserts.go +++ b/tools/docker-network/tests/dockertestframework/asserts.go @@ -6,7 +6,6 @@ import ( "context" "fmt" "sort" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -69,20 +68,11 @@ func (d *DockerTestFramework) AssertValidatorExists(accountAddr *iotago.AccountA func (d *DockerTestFramework) AssertCommittee(expectedEpoch iotago.EpochIndex, expectedCommitteeMember []string) { fmt.Println("Wait for committee selection..., expected epoch: ", expectedEpoch, ", expected committee size: ", len(expectedCommitteeMember)) - defer fmt.Println("Wait for committee selection......done") + defer fmt.Println("Wait for committee selection... done!") sort.Strings(expectedCommitteeMember) - status := d.NodeStatus("V1") - testAPI := d.defaultWallet.Client.CommittedAPI() - expectedSlotStart := testAPI.TimeProvider().EpochStart(expectedEpoch) - - if status.LatestAcceptedBlockSlot < expectedSlotStart { - slotToWait := expectedSlotStart - status.LatestAcceptedBlockSlot - secToWait := time.Duration(slotToWait) * time.Duration(testAPI.ProtocolParameters().SlotDurationInSeconds()) * time.Second - fmt.Println("Wait for ", secToWait, "until expected epoch: ", expectedEpoch) - time.Sleep(secToWait) - } + d.AwaitLatestAcceptedBlockSlot(d.defaultWallet.Client.CommittedAPI().TimeProvider().EpochStart(expectedEpoch), true) d.Eventually(func() error { for _, node := range d.Nodes() { @@ -110,11 +100,11 @@ func (d *DockerTestFramework) AssertCommittee(expectedEpoch iotago.EpochIndex, e }) } -func (d *DockerTestFramework) AssertFinalizedSlot(condition func(iotago.SlotIndex) error) { +func (d *DockerTestFramework) AssertFinalizedSlot(condition func(nodeName string, latestFinalizedSlot iotago.SlotIndex) error) { for _, node := range d.Nodes() { status := d.NodeStatus(node.Name) - err := condition(status.LatestFinalizedSlot) + err := condition(node.Name, status.LatestFinalizedSlot) require.NoError(d.Testing, err) } } diff --git a/tools/docker-network/tests/dockertestframework/awaits.go b/tools/docker-network/tests/dockertestframework/awaits.go index 14c9b696e..d5795e5de 100644 --- a/tools/docker-network/tests/dockertestframework/awaits.go +++ b/tools/docker-network/tests/dockertestframework/awaits.go @@ -4,6 +4,7 @@ package dockertestframework import ( "context" + "fmt" "time" "github.com/stretchr/testify/require" @@ -69,66 +70,78 @@ func (d *DockerTestFramework) AwaitTransactionFailure(ctx context.Context, txID }) } -func (d *DockerTestFramework) AwaitCommitment(targetSlot iotago.SlotIndex) { - currentCommittedSlot := d.NodeStatus("V1").LatestCommitmentID.Slot() +func (d *DockerTestFramework) awaitSlot(targetSlot iotago.SlotIndex, slotName string, getCurrentSlotFunc func() iotago.SlotIndex, printWaitMessage bool, offsetDeadline ...time.Duration) { + currentSlot := getCurrentSlotFunc() - // we wait at max "targetSlot - currentCommittedSlot" times * slot duration + if currentSlot >= targetSlot { + return + } + + // we wait at max "targetSlot - currentSlot" times * slot duration deadline := time.Duration(d.defaultWallet.Client.CommittedAPI().ProtocolParameters().SlotDurationInSeconds()) * time.Second - if currentCommittedSlot < targetSlot { - deadline *= time.Duration(targetSlot - currentCommittedSlot) + if currentSlot < targetSlot { + deadline *= time.Duration(targetSlot - currentSlot) + } + + if printWaitMessage { + fmt.Println(fmt.Sprintf("Wait for %v until %s slot %d is reached... (current: %d)", deadline.Truncate(time.Millisecond), slotName, targetSlot, currentSlot)) } // give some extra time for peering etc - deadline += 30 * time.Second + if len(offsetDeadline) > 0 { + deadline += offsetDeadline[0] + } else { + // add 30 seconds as default + deadline += 30 * time.Second + } d.EventuallyWithDurations(func() error { - latestCommittedSlot := d.NodeStatus("V1").LatestCommitmentID.Slot() - if targetSlot > latestCommittedSlot { - return ierrors.Errorf("committed slot %d is not reached yet, current committed slot %d", targetSlot, latestCommittedSlot) + currentSlot := getCurrentSlotFunc() + if targetSlot > currentSlot { + return ierrors.Errorf("%s slot %d is not reached yet, %s slot %d", slotName, targetSlot, slotName, currentSlot) } return nil }, deadline, 1*time.Second) } -func (d *DockerTestFramework) AwaitFinalization(targetSlot iotago.SlotIndex) { - currentFinalizedSlot := d.NodeStatus("V1").LatestFinalizedSlot - - // we wait at max "targetSlot - currentFinalizedSlot" times * slot duration - deadline := time.Duration(d.defaultWallet.Client.CommittedAPI().ProtocolParameters().SlotDurationInSeconds()) * time.Second - if currentFinalizedSlot < targetSlot { - deadline *= time.Duration(targetSlot - currentFinalizedSlot) - } - - // give some extra time for peering etc - deadline += 30 * time.Second +func (d *DockerTestFramework) AwaitLatestAcceptedBlockSlot(targetSlot iotago.SlotIndex, printWaitMessage bool, offsetDeadline ...time.Duration) { + d.awaitSlot(targetSlot, "latest accepted block", func() iotago.SlotIndex { + return d.NodeStatus("V1").LatestAcceptedBlockSlot + }, printWaitMessage, offsetDeadline...) +} - d.EventuallyWithDurations(func() error { - currentFinalisedSlot := d.NodeStatus("V1").LatestFinalizedSlot - if targetSlot > currentFinalisedSlot { - return ierrors.Errorf("finalized slot %d is not reached yet", targetSlot) - } +func (d *DockerTestFramework) AwaitCommittedSlot(targetSlot iotago.SlotIndex, printWaitMessage bool, offsetDeadline ...time.Duration) { + d.awaitSlot(targetSlot, "committed", func() iotago.SlotIndex { + return d.NodeStatus("V1").LatestCommitmentID.Slot() + }, printWaitMessage, offsetDeadline...) +} - return nil - }, deadline, 1*time.Second) +func (d *DockerTestFramework) AwaitFinalizedSlot(targetSlot iotago.SlotIndex, printWaitMessage bool, offsetDeadline ...time.Duration) { + d.awaitSlot(targetSlot, "finalized", func() iotago.SlotIndex { + return d.NodeStatus("V1").LatestFinalizedSlot + }, printWaitMessage, offsetDeadline...) } func (d *DockerTestFramework) AwaitEpochFinalized() { - //nolint:lostcancel - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + clt := d.defaultWallet.Client - info, err := d.defaultWallet.Client.Info(ctx) + info, err := clt.Info(ctx) require.NoError(d.Testing, err) - currentEpoch := d.defaultWallet.Client.CommittedAPI().TimeProvider().EpochFromSlot(info.Status.LatestFinalizedSlot) + currentEpoch := clt.CommittedAPI().TimeProvider().EpochFromSlot(info.Status.LatestFinalizedSlot) // await the start slot of the next epoch - d.AwaitFinalization(d.defaultWallet.Client.CommittedAPI().TimeProvider().EpochStart(currentEpoch + 1)) + d.AwaitFinalizedSlot(clt.CommittedAPI().TimeProvider().EpochStart(currentEpoch+1), true) } func (d *DockerTestFramework) AwaitAddressUnspentOutputAccepted(ctx context.Context, wallet *mock.Wallet, addr iotago.Address) (outputID iotago.OutputID, output iotago.Output, err error) { indexerClt, err := wallet.Client.Indexer(ctx) require.NoError(d.Testing, err) + addrBech := addr.Bech32(d.defaultWallet.Client.CommittedAPI().ProtocolParameters().Bech32HRP()) for t := time.Now(); time.Since(t) < d.optsWaitFor; time.Sleep(d.optsTick) { diff --git a/tools/docker-network/tests/dockertestframework/blocks.go b/tools/docker-network/tests/dockertestframework/blocks.go index c034c0a8f..296544a7b 100644 --- a/tools/docker-network/tests/dockertestframework/blocks.go +++ b/tools/docker-network/tests/dockertestframework/blocks.go @@ -87,8 +87,6 @@ func (d *DockerTestFramework) CreateNFTBlockFromInput(wallet *mock.Wallet, input } func (d *DockerTestFramework) SubmitBlock(ctx context.Context, blk *iotago.Block) { - clt := d.defaultWallet.Client - - _, err := clt.SubmitBlock(ctx, blk) + _, err := d.defaultWallet.Client.SubmitBlock(ctx, blk) require.NoError(d.Testing, err) } diff --git a/tools/docker-network/tests/dockertestframework/faucet.go b/tools/docker-network/tests/dockertestframework/faucet.go index 6ec732ce0..51ce3eee7 100644 --- a/tools/docker-network/tests/dockertestframework/faucet.go +++ b/tools/docker-network/tests/dockertestframework/faucet.go @@ -20,7 +20,7 @@ import ( func (d *DockerTestFramework) WaitUntilFaucetHealthy() { fmt.Println("Wait until the faucet is healthy...") - defer fmt.Println("Wait until the faucet is healthy......done") + defer fmt.Println("Wait until the faucet is healthy... done!") d.Eventually(func() error { req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, d.optsFaucetURL+"/health", nil) @@ -116,7 +116,7 @@ func (d *DockerTestFramework) RequestFaucetFundsAndAllotManaTo(fromWallet *mock. d.AwaitTransactionPayloadAccepted(ctx, signedTx.Transaction.MustID()) // allotment is updated when the transaction is committed - d.AwaitCommitment(block.ID().Slot()) + d.AwaitCommittedSlot(block.ID().Slot(), false) // check if the mana is allotted toCongestionResp, err := clt.Congestion(ctx, to.Address, 0, preAllotmentCommitmentID) diff --git a/tools/docker-network/tests/dockertestframework/framework.go b/tools/docker-network/tests/dockertestframework/framework.go index abb474795..cafc4ae32 100644 --- a/tools/docker-network/tests/dockertestframework/framework.go +++ b/tools/docker-network/tests/dockertestframework/framework.go @@ -100,13 +100,16 @@ func NewDockerTestFramework(t *testing.T, opts ...options.Option[DockerTestFrame } func (d *DockerTestFramework) DockerComposeUp(detach ...bool) error { - cmd := exec.Command("docker", "compose", "up") + cmd := exec.Command("docker", "compose", "--profile", "full", "up") if len(detach) > 0 && detach[0] { - cmd = exec.Command("docker", "compose", "up", "-d") + cmd = exec.Command("docker", "compose", "--profile", "full", "up", "-d") } cmd.Env = os.Environ() + + // we want to retry the candidacy much quicker in the tests, because our epochs are super short + cmd.Env = append(cmd.Env, "CANDIDACY_RETRY_INTERVAL=1s") for _, node := range d.Nodes() { cmd.Env = append(cmd.Env, fmt.Sprintf("ISSUE_CANDIDACY_PAYLOAD_%s=%t", node.Name, node.IssueCandidacyPayload)) if node.DatabasePath != "" { @@ -131,7 +134,7 @@ func (d *DockerTestFramework) DockerComposeUp(detach ...bool) error { func (d *DockerTestFramework) Run() error { // first we remove old containers, volumes and orphans - _ = exec.Command("docker", "compose", "down", "-v", "--remove-orphans").Run() + _ = exec.Command("docker", "compose", "--profile", "full", "down", "-v", "--remove-orphans").Run() ch := make(chan error) stopCh := make(chan struct{}) @@ -188,12 +191,12 @@ func (d *DockerTestFramework) Stop() { defer fmt.Println("Stop the network.....done") // remove volumes and orphans - _ = exec.Command("docker", "compose", "down", "-v", "--remove-orphans").Run() + _ = exec.Command("docker", "compose", "--profile", "full", "down", "-v", "--remove-orphans").Run() _ = exec.Command("rm", d.snapshotPath).Run() //nolint:gosec } func (d *DockerTestFramework) StopContainer(containerName ...string) error { - fmt.Println("Stop validator", containerName, "......") + fmt.Println("Stopping container", containerName, "......") args := append([]string{"stop"}, containerName...) @@ -201,7 +204,7 @@ func (d *DockerTestFramework) StopContainer(containerName ...string) error { } func (d *DockerTestFramework) RestartContainer(containerName ...string) error { - fmt.Println("Restart validator", containerName, "......") + fmt.Println("Restarting container", containerName, "......") args := append([]string{"restart"}, containerName...) diff --git a/tools/docker-network/tests/dockertestframework/framework_eventapi.go b/tools/docker-network/tests/dockertestframework/framework_eventapi.go index 2652f84c3..b48758d2e 100644 --- a/tools/docker-network/tests/dockertestframework/framework_eventapi.go +++ b/tools/docker-network/tests/dockertestframework/framework_eventapi.go @@ -13,6 +13,7 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/options" + "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/testsuite/mock" iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/iota.go/v4/api" @@ -26,12 +27,6 @@ func WithEventAPIWaitFor(waitFor time.Duration) options.Option[EventAPIDockerTes } } -func WithEventAPITick(tick time.Duration) options.Option[EventAPIDockerTestFramework] { - return func(d *EventAPIDockerTestFramework) { - d.optsTick = tick - } -} - type EventAPIDockerTestFramework struct { Testing *testing.T @@ -41,7 +36,6 @@ type EventAPIDockerTestFramework struct { finishChan chan struct{} optsWaitFor time.Duration - optsTick time.Duration } func NewEventAPIDockerTestFramework(t *testing.T, dockerFramework *DockerTestFramework) *EventAPIDockerTestFramework { @@ -51,7 +45,6 @@ func NewEventAPIDockerTestFramework(t *testing.T, dockerFramework *DockerTestFra DefaultClient: dockerFramework.defaultWallet.Client, finishChan: make(chan struct{}), optsWaitFor: 3 * time.Minute, - optsTick: 5 * time.Second, } } @@ -69,18 +62,21 @@ func (e *EventAPIDockerTestFramework) ConnectEventAPIClient(ctx context.Context) } // SubmitDataBlockStream submits a stream of data blocks to the network for the given duration. -func (e *EventAPIDockerTestFramework) SubmitDataBlockStream(wallet *mock.Wallet, duration time.Duration) { +func (e *EventAPIDockerTestFramework) SubmitDataBlockStream(wallet *mock.Wallet, duration time.Duration, tick time.Duration, countPerTick int, blockSubmittedCallback func(*blocks.Block)) { timer := time.NewTimer(duration) defer timer.Stop() - ticker := time.NewTicker(e.optsTick) + ticker := time.NewTicker(tick) defer ticker.Stop() for { select { case <-ticker.C: - for i := 0; i < 10; i++ { - e.dockerFramework.defaultWallet.CreateAndSubmitBasicBlock(context.TODO(), "tagged_data_block", mock.WithPayload(tpkg.RandTaggedData([]byte("tag")))) + for range countPerTick { + block, err := wallet.CreateAndSubmitBasicBlock(context.TODO(), "tagged_data_block", mock.WithPayload(tpkg.RandTaggedData([]byte("tag")))) + require.NoError(e.Testing, err) + + blockSubmittedCallback(block) } case <-timer.C: return @@ -88,7 +84,7 @@ func (e *EventAPIDockerTestFramework) SubmitDataBlockStream(wallet *mock.Wallet, } } -func (e *EventAPIDockerTestFramework) AssertBlockMetadataStateAcceptedBlocks(ctx context.Context, eventClt *nodeclient.EventAPIClient) { +func (e *EventAPIDockerTestFramework) AssertBlockMetadataStateAcceptedBlocks(ctx context.Context, eventClt *nodeclient.EventAPIClient, receivedCallback func()) { acceptedChan, subInfo := eventClt.BlockMetadataAcceptedBlocks() require.Nil(e.Testing, subInfo.Error()) @@ -103,11 +99,16 @@ func (e *EventAPIDockerTestFramework) AssertBlockMetadataStateAcceptedBlocks(ctx case blk := <-acceptedChan: require.Equal(e.Testing, api.BlockStateAccepted, blk.BlockState, "Block %s is pending in BlockMetadataAccepted topic", blk.BlockID.ToHex()) - resp, err := eventClt.Client.BlockMetadataByBlockID(ctx, blk.BlockID) + resp, err := eventClt.Client.BlockWithMetadataByBlockID(ctx, blk.BlockID) require.NoError(e.Testing, err) + // accepted, confirmed are accepted - require.NotEqualf(e.Testing, api.BlockStatePending, resp.BlockState, "Block %s is pending in BlockMetadataAccepted topic", blk.BlockID.ToHex()) + require.NotEqualf(e.Testing, api.BlockStatePending, resp.Metadata.BlockState, "Block %s is pending in BlockMetadataAccepted topic", blk.BlockID.ToHex()) + if resp.Block.Body.Type() == iotago.BlockBodyTypeBasic && resp.Block.Body.(*iotago.BasicBlockBody).Payload.PayloadType() == iotago.PayloadTaggedData { + // only count the basic blocks with tagged data, ignore the validation and candidate blocks + receivedCallback() + } case <-ctx.Done(): return } @@ -115,7 +116,7 @@ func (e *EventAPIDockerTestFramework) AssertBlockMetadataStateAcceptedBlocks(ctx }() } -func (e *EventAPIDockerTestFramework) AssertBlockMetadataStateConfirmedBlocks(ctx context.Context, eventClt *nodeclient.EventAPIClient) { +func (e *EventAPIDockerTestFramework) AssertBlockMetadataStateConfirmedBlocks(ctx context.Context, eventClt *nodeclient.EventAPIClient, receivedCallback func()) { acceptedChan, subInfo := eventClt.BlockMetadataConfirmedBlocks() require.Nil(e.Testing, subInfo.Error()) @@ -130,11 +131,15 @@ func (e *EventAPIDockerTestFramework) AssertBlockMetadataStateConfirmedBlocks(ct case blk := <-acceptedChan: require.Equal(e.Testing, api.BlockStateConfirmed, blk.BlockState, "Block %s is pending in BlockMetadataConfirmed topic", blk.BlockID.ToHex()) - resp, err := eventClt.Client.BlockMetadataByBlockID(ctx, blk.BlockID) + resp, err := eventClt.Client.BlockWithMetadataByBlockID(ctx, blk.BlockID) require.NoError(e.Testing, err) - require.NotEqualf(e.Testing, api.BlockStatePending, resp.BlockState, "Block %s is pending in BlockMetadataConfirmed endpoint", blk.BlockID.ToHex()) - require.NotEqualf(e.Testing, api.BlockStateAccepted, resp.BlockState, "Block %s is accepted in BlockMetadataConfirmed endpoint", blk.BlockID.ToHex()) + require.NotEqualf(e.Testing, api.BlockStatePending, resp.Metadata.BlockState, "Block %s is pending in BlockMetadataConfirmed endpoint", blk.BlockID.ToHex()) + require.NotEqualf(e.Testing, api.BlockStateAccepted, resp.Metadata.BlockState, "Block %s is accepted in BlockMetadataConfirmed endpoint", blk.BlockID.ToHex()) + if resp.Block.Body.Type() == iotago.BlockBodyTypeBasic && resp.Block.Body.(*iotago.BasicBlockBody).Payload.PayloadType() == iotago.PayloadTaggedData { + // only count the basic blocks with tagged data, ignore the validation and candidate blocks + receivedCallback() + } case <-ctx.Done(): return } @@ -611,7 +616,7 @@ func (e *EventAPIDockerTestFramework) assertOutputMetadataTopics(ctx context.Con } } -func (e *EventAPIDockerTestFramework) AwaitEventAPITopics(t *testing.T, cancleFunc context.CancelFunc, numOfTopics int) error { +func (e *EventAPIDockerTestFramework) AwaitEventAPITopics(t *testing.T, cancelFunc context.CancelFunc, numOfTopics int) error { counter := 0 timer := time.NewTimer(e.optsWaitFor) defer timer.Stop() @@ -619,7 +624,7 @@ func (e *EventAPIDockerTestFramework) AwaitEventAPITopics(t *testing.T, cancleFu for { select { case <-timer.C: - cancleFunc() + cancelFunc() return ierrors.New("Timeout, did not receive signals from all topics") case <-e.finishChan: counter++ diff --git a/tools/docker-network/tests/dockertestframework/misc.go b/tools/docker-network/tests/dockertestframework/misc.go new file mode 100644 index 000000000..5e270276e --- /dev/null +++ b/tools/docker-network/tests/dockertestframework/misc.go @@ -0,0 +1,12 @@ +//go:build dockertests + +package dockertestframework + +import ( + iotago "github.com/iotaledger/iota.go/v4" +) + +func GetMaxRegistrationSlot(committedAPI iotago.API, epoch iotago.EpochIndex) iotago.SlotIndex { + epochEndSlot := committedAPI.TimeProvider().EpochEnd(epoch) + return epochEndSlot - committedAPI.ProtocolParameters().EpochNearingThreshold() +} diff --git a/tools/docker-network/tests/dockertestframework/options.go b/tools/docker-network/tests/dockertestframework/options.go index b706ce12a..74de1dba1 100644 --- a/tools/docker-network/tests/dockertestframework/options.go +++ b/tools/docker-network/tests/dockertestframework/options.go @@ -17,6 +17,30 @@ var DefaultProtocolParametersOptions = []options.Option[iotago.V3ProtocolParamet iotago.WithNetworkOptions(fmt.Sprintf("docker-tests-%d", time.Now().Unix()), iotago.PrefixTestnet), } +// ShortSlotsAndEpochsProtocolParametersOptionsFunc sets the protocol parameters to have 5s slots and 40s epochs. +// It needs to be a function, otherwise the time.Now() would be evaluated at initialization time, and not at the time of the test. +// This would cause the tests to force commit all slots at the beginning, because the genesis time is far in the past. +var ShortSlotsAndEpochsProtocolParametersOptionsFunc = func() []options.Option[iotago.V3ProtocolParameters] { + return []options.Option[iotago.V3ProtocolParameters]{ + iotago.WithStorageOptions(100, 1, 10, 100, 100, 100), + iotago.WithWorkScoreOptions(500, 110_000, 7_500, 40_000, 90_000, 50_000, 40_000, 70_000, 5_000, 15_000), + //iotago.WithTimeProviderOptions(0, time.Now().Unix(), 10, 13), + iotago.WithTimeProviderOptions(5, time.Now().Unix(), 3, 3), + //iotago.WithLivenessOptions(15, 30, 10, 20, 60), + iotago.WithLivenessOptions(3, 3, 2, 4, 5), + iotago.WithSupplyOptions(1813620509061365, 63, 1, 17, 32, 21, 70), + //iotago.WithCongestionControlOptions(1, 1, 1, 400_000_000, 250_000_000, 50_000_000, 1000, 100), + iotago.WithCongestionControlOptions(1, 1, 1, 100_000_000, 75_000_000, 50_000_000, 1000, 100), + iotago.WithStakingOptions(10, 10, 10), + iotago.WithVersionSignalingOptions(7, 5, 7), + //iotago.WithRewardsOptions(8, 11, 2, 384), + iotago.WithRewardsOptions(8, 10, 2, 384), + //iotago.WithTargetCommitteeSize(32), + iotago.WithTargetCommitteeSize(4), + iotago.WithChainSwitchingThreshold(3), + } +} + // DefaultAccountOptions are the default snapshot options for the docker network. func DefaultAccountOptions(protocolParams *iotago.V3ProtocolParameters) []options.Option[snapshotcreator.Options] { return []options.Option[snapshotcreator.Options]{ diff --git a/tools/docker-network/tests/dockertestframework/rewards.go b/tools/docker-network/tests/dockertestframework/rewards.go index 257517aaf..c2845d42c 100644 --- a/tools/docker-network/tests/dockertestframework/rewards.go +++ b/tools/docker-network/tests/dockertestframework/rewards.go @@ -11,24 +11,25 @@ import ( iotago "github.com/iotaledger/iota.go/v4" ) -func (d *DockerTestFramework) ClaimRewardsForValidator(ctx context.Context, validatorWallet *mock.Wallet) { - validatorAccountData := validatorWallet.BlockIssuer.AccountData +func (d *DockerTestFramework) ClaimRewardsForValidator(ctx context.Context, validatorWithWallet *mock.AccountWithWallet) { + wallet := validatorWithWallet.Wallet() + validatorAccountData := validatorWithWallet.Account() outputData := &mock.OutputData{ ID: validatorAccountData.OutputID, Address: validatorAccountData.Address, AddressIndex: validatorAccountData.AddressIndex, Output: validatorAccountData.Output, } - signedTx := validatorWallet.ClaimValidatorRewards("", outputData) + signedTx := wallet.ClaimValidatorRewards("", outputData) - validatorWallet.CreateAndSubmitBasicBlock(ctx, "claim_rewards_validator", mock.WithPayload(signedTx)) + wallet.CreateAndSubmitBasicBlock(ctx, "claim_rewards_validator", mock.WithPayload(signedTx)) d.AwaitTransactionPayloadAccepted(ctx, signedTx.Transaction.MustID()) // update account data of validator - validatorWallet.SetBlockIssuer(&mock.AccountData{ - ID: validatorWallet.BlockIssuer.AccountData.ID, - Address: validatorWallet.BlockIssuer.AccountData.Address, - AddressIndex: validatorWallet.BlockIssuer.AccountData.AddressIndex, + validatorWithWallet.UpdateAccount(&mock.AccountData{ + ID: wallet.BlockIssuer.AccountData.ID, + Address: wallet.BlockIssuer.AccountData.Address, + AddressIndex: wallet.BlockIssuer.AccountData.AddressIndex, OutputID: iotago.OutputIDFromTransactionIDAndIndex(signedTx.Transaction.MustID(), 0), Output: signedTx.Transaction.Outputs[0].(*iotago.AccountOutput), }) diff --git a/tools/docker-network/tests/eventapi_test.go b/tools/docker-network/tests/eventapi_test.go index b1255624e..529e338e4 100644 --- a/tools/docker-network/tests/eventapi_test.go +++ b/tools/docker-network/tests/eventapi_test.go @@ -5,33 +5,23 @@ package tests import ( "context" "fmt" + "sync/atomic" "testing" "time" "github.com/stretchr/testify/require" + "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" + "github.com/iotaledger/iota-core/pkg/testsuite/mock" "github.com/iotaledger/iota-core/tools/docker-network/tests/dockertestframework" iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/iota.go/v4/api" ) -var eventAPITests = map[string]func(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework){ - "Test_Commitments": test_Commitments, - "Test_ValidationBlocks": test_ValidationBlocks, - "Test_BasicTaggedDataBlocks": test_BasicTaggedDataBlocks, - "Test_DelegationTransactionBlocks": test_DelegationTransactionBlocks, - "Test_AccountTransactionBlocks": test_AccountTransactionBlocks, - "Test_FoundryTransactionBlocks": test_FoundryTransactionBlocks, - "Test_NFTTransactionBlocks": test_NFTTransactionBlocks, - "Test_BlockMetadataMatchedCoreAPI": test_BlockMetadataMatchedCoreAPI, -} - func Test_MQTTTopics(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -47,14 +37,66 @@ func Test_MQTTTopics(t *testing.T) { e := dockertestframework.NewEventAPIDockerTestFramework(t, d) - for name, test := range eventAPITests { - t.Run(name, func(t *testing.T) { - test(t, e) + // prepare accounts to speed up tests + accounts := d.CreateAccountsFromFaucet(context.Background(), 5) + + type test struct { + name string + testFunc func(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, account *mock.AccountWithWallet) + account *mock.AccountWithWallet + } + + tests := []*test{ + { + name: "Test_Commitments", + testFunc: test_Commitments, + account: nil, + }, + { + name: "Test_ValidationBlocks", + testFunc: test_ValidationBlocks, + account: nil, + }, + { + name: "Test_BasicTaggedDataBlocks", + testFunc: test_BasicTaggedDataBlocks, + account: accounts[0], + }, + { + name: "Test_DelegationTransactionBlocks", + testFunc: test_DelegationTransactionBlocks, + account: accounts[1], + }, + { + name: "Test_AccountTransactionBlocks", + testFunc: test_AccountTransactionBlocks, + account: nil, + }, + { + name: "Test_FoundryTransactionBlocks", + testFunc: test_FoundryTransactionBlocks, + account: accounts[2], + }, + { + name: "Test_NFTTransactionBlocks", + testFunc: test_NFTTransactionBlocks, + account: accounts[3], + }, + { + name: "Test_BlockMetadataMatchedCoreAPI", + testFunc: test_BlockMetadataMatchedCoreAPI, + account: accounts[4], + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.testFunc(t, e, test.account) }) } } -func test_Commitments(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework) { +func test_Commitments(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, _ *mock.AccountWithWallet) { // get event API client ready ctx, cancel := context.WithCancel(context.Background()) @@ -90,7 +132,7 @@ func test_Commitments(t *testing.T, e *dockertestframework.EventAPIDockerTestFra require.NoError(t, err) } -func test_ValidationBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework) { +func test_ValidationBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, _ *mock.AccountWithWallet) { // get event API client ready ctx, cancel := context.WithCancel(context.Background()) @@ -120,19 +162,16 @@ func test_ValidationBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTe require.NoError(t, err) } -func test_BasicTaggedDataBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework) { +func test_BasicTaggedDataBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, account *mock.AccountWithWallet) { // get event API client ready ctx, cancel := context.WithCancel(context.Background()) eventClt := e.ConnectEventAPIClient(ctx) defer eventClt.Close() - // create an account to issue blocks - wallet, _ := e.DockerTestFramework().CreateAccountFromFaucet() - // prepare data blocks to send expectedBlocks := make(map[string]*iotago.Block) for i := 0; i < 10; i++ { - blk := e.DockerTestFramework().CreateTaggedDataBlock(wallet, []byte("tag")) + blk := e.DockerTestFramework().CreateTaggedDataBlock(account.Wallet(), []byte("tag")) expectedBlocks[blk.MustID().ToHex()] = blk } @@ -172,22 +211,21 @@ func test_BasicTaggedDataBlocks(t *testing.T, e *dockertestframework.EventAPIDoc require.NoError(t, err) } -func test_DelegationTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework) { +func test_DelegationTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, account *mock.AccountWithWallet) { // get event API client ready ctx, cancel := context.WithCancel(context.Background()) eventClt := e.ConnectEventAPIClient(ctx) defer eventClt.Close() // create an account to issue blocks - wallet, _ := e.DockerTestFramework().CreateAccountFromFaucet() - fundsOutputData := e.DockerTestFramework().RequestFaucetFunds(ctx, wallet, iotago.AddressEd25519) + fundsOutputData := e.DockerTestFramework().RequestFaucetFunds(ctx, account.Wallet(), iotago.AddressEd25519) // prepare data blocks to send - delegationId, outputId, blk := e.DockerTestFramework().CreateDelegationBlockFromInput(wallet, e.DockerTestFramework().Node("V2").AccountAddress(t), fundsOutputData) + delegationId, outputId, blk := e.DockerTestFramework().CreateDelegationBlockFromInput(account.Wallet(), e.DockerTestFramework().Node("V2").AccountAddress(t), fundsOutputData) expectedBlocks := map[string]*iotago.Block{ blk.MustID().ToHex(): blk, } - delegationOutput := wallet.Output(outputId) + delegationOutput := account.Wallet().Output(outputId) asserts := []func(){ func() { e.AssertTransactionBlocks(ctx, eventClt, expectedBlocks) }, @@ -234,7 +272,7 @@ func test_DelegationTransactionBlocks(t *testing.T, e *dockertestframework.Event require.NoError(t, err) } -func test_AccountTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework) { +func test_AccountTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, _ *mock.AccountWithWallet) { // get event API client ready ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -245,15 +283,15 @@ func test_AccountTransactionBlocks(t *testing.T, e *dockertestframework.EventAPI // implicit account transition { // create an implicit account by requesting faucet funds - wallet, implicitAccountOutputData := e.DockerTestFramework().CreateImplicitAccount(ctx) + implicitAccount := e.DockerTestFramework().CreateImplicitAccount(ctx, "account-tx-blocks") // prepare account transition block - accountData, _, blk := e.DockerTestFramework().TransitionImplicitAccountToAccountOutputBlock(wallet, implicitAccountOutputData, wallet.GetNewBlockIssuanceResponse()) + accountData, _, blk := e.DockerTestFramework().TransitionImplicitAccountToAccountOutputBlock(implicitAccount, implicitAccount.Wallet().GetNewBlockIssuanceResponse()) expectedBlocks := map[string]*iotago.Block{ blk.MustID().ToHex(): blk, } - accountOutputData := wallet.Output(accountData.OutputID) + accountOutputData := implicitAccount.Wallet().Output(accountData.OutputID) assertions := []func(){ func() { e.AssertTransactionBlocks(ctx, eventClt, expectedBlocks) }, @@ -303,22 +341,21 @@ func test_AccountTransactionBlocks(t *testing.T, e *dockertestframework.EventAPI } } -func test_FoundryTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework) { +func test_FoundryTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, account *mock.AccountWithWallet) { // get event API client ready ctx, cancel := context.WithCancel(context.Background()) eventClt := e.ConnectEventAPIClient(ctx) defer eventClt.Close() { - wallet, account := e.DockerTestFramework().CreateAccountFromFaucet() - fundsOutputData := e.DockerTestFramework().RequestFaucetFunds(ctx, wallet, iotago.AddressEd25519) + fundsOutputData := e.DockerTestFramework().RequestFaucetFunds(ctx, account.Wallet(), iotago.AddressEd25519) // prepare foundry output block - foundryId, outputId, blk := e.DockerTestFramework().CreateFoundryBlockFromInput(wallet, fundsOutputData.ID, 5_000_000, 10_000_000_000) + foundryId, outputId, blk := e.DockerTestFramework().CreateFoundryBlockFromInput(account.Wallet(), fundsOutputData.ID, 5_000_000, 10_000_000_000) expectedBlocks := map[string]*iotago.Block{ blk.MustID().ToHex(): blk, } - foundryOutput := wallet.Output(outputId) + foundryOutput := account.Wallet().Output(outputId) assertions := []func(){ func() { e.AssertTransactionBlocks(ctx, eventClt, expectedBlocks) }, @@ -328,10 +365,10 @@ func test_FoundryTransactionBlocks(t *testing.T, e *dockertestframework.EventAPI func() { e.AssertTransactionBlocksByTag(ctx, eventClt, expectedBlocks, []byte("foundry")) }, func() { e.AssertTransactionMetadataByTransactionID(ctx, eventClt, outputId.TransactionID()) }, func() { e.AssertTransactionMetadataIncludedBlocks(ctx, eventClt, outputId.TransactionID()) }, - func() { e.AssertAccountOutput(ctx, eventClt, account.ID) }, + func() { e.AssertAccountOutput(ctx, eventClt, account.Account().ID) }, func() { e.AssertFoundryOutput(ctx, eventClt, foundryId) }, func() { e.AssertOutput(ctx, eventClt, outputId) }, - func() { e.AssertOutput(ctx, eventClt, account.OutputID) }, + func() { e.AssertOutput(ctx, eventClt, account.Account().OutputID) }, func() { e.AssertOutputsWithMetadataByUnlockConditionAndAddress(ctx, eventClt, api.EventAPIUnlockConditionAny, foundryOutput.Address) }, @@ -368,22 +405,21 @@ func test_FoundryTransactionBlocks(t *testing.T, e *dockertestframework.EventAPI } } -func test_NFTTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework) { +func test_NFTTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, account *mock.AccountWithWallet) { // get event API client ready ctx, cancel := context.WithCancel(context.Background()) eventClt := e.ConnectEventAPIClient(ctx) defer eventClt.Close() { - wallet, _ := e.DockerTestFramework().CreateAccountFromFaucet() - fundsOutputData := e.DockerTestFramework().RequestFaucetFunds(ctx, wallet, iotago.AddressEd25519) + fundsOutputData := e.DockerTestFramework().RequestFaucetFunds(ctx, account.Wallet(), iotago.AddressEd25519) // prepare NFT output block - nftId, outputId, blk := e.DockerTestFramework().CreateNFTBlockFromInput(wallet, fundsOutputData) + nftId, outputId, blk := e.DockerTestFramework().CreateNFTBlockFromInput(account.Wallet(), fundsOutputData) expectedBlocks := map[string]*iotago.Block{ blk.MustID().ToHex(): blk, } - nftOutput := wallet.Output(outputId) + nftOutput := account.Wallet().Output(outputId) assertions := []func(){ func() { e.AssertTransactionBlocks(ctx, eventClt, expectedBlocks) }, @@ -431,18 +467,28 @@ func test_NFTTransactionBlocks(t *testing.T, e *dockertestframework.EventAPIDock } } -func test_BlockMetadataMatchedCoreAPI(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework) { +func test_BlockMetadataMatchedCoreAPI(t *testing.T, e *dockertestframework.EventAPIDockerTestFramework, account *mock.AccountWithWallet) { // get event API client ready ctx, cancel := context.WithCancel(context.Background()) eventClt := e.ConnectEventAPIClient(ctx) defer eventClt.Close() - { - wallet, _ := e.DockerTestFramework().CreateAccountFromFaucet() + receivedAcceptedCounter := atomic.Int64{} + receivedConfirmedCounter := atomic.Int64{} + sentCounter := atomic.Int64{} + { assertions := []func(){ - func() { e.AssertBlockMetadataStateAcceptedBlocks(ctx, eventClt) }, - func() { e.AssertBlockMetadataStateConfirmedBlocks(ctx, eventClt) }, + func() { + e.AssertBlockMetadataStateAcceptedBlocks(ctx, eventClt, func() { + receivedAcceptedCounter.Add(1) + }) + }, + func() { + e.AssertBlockMetadataStateConfirmedBlocks(ctx, eventClt, func() { + receivedConfirmedCounter.Add(1) + }) + }, } totalTopics := len(assertions) @@ -455,8 +501,26 @@ func test_BlockMetadataMatchedCoreAPI(t *testing.T, e *dockertestframework.Event require.NoError(t, err) // issue blocks - e.SubmitDataBlockStream(wallet, 5*time.Minute) + fmt.Println("Submitting blocks for 30s...") + var maxSlot iotago.SlotIndex + e.SubmitDataBlockStream(account.Wallet(), 30*time.Second, 1*time.Second, 10, func(block *blocks.Block) { + sentCounter.Add(1) + if block.ID().Slot() > maxSlot { + maxSlot = block.ID().Slot() + } + }) + + // wait until all blocks are committed + e.DockerTestFramework().AwaitCommittedSlot(maxSlot, true) + // wait until all topics receives all expected objects + time.Sleep(1 * time.Second) + + // cancel listening cancel() + + // check if we received all expected objects + require.Equal(t, sentCounter.Load(), receivedAcceptedCounter.Load(), "receivedAcceptedCounter != sentCounter") + require.Equal(t, sentCounter.Load(), receivedConfirmedCounter.Load(), "receivedConfirmedCounter != sentCounter") } } diff --git a/tools/docker-network/tests/mempool_invalid_signatures_test.go b/tools/docker-network/tests/mempool_invalid_signatures_test.go index 6af13dcac..6a3e1b5aa 100644 --- a/tools/docker-network/tests/mempool_invalid_signatures_test.go +++ b/tools/docker-network/tests/mempool_invalid_signatures_test.go @@ -6,7 +6,6 @@ import ( "context" "fmt" "testing" - "time" "github.com/stretchr/testify/require" @@ -18,12 +17,8 @@ import ( func Test_MempoolInvalidSignatures(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, - dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(4), - )) + dockertestframework.WithProtocolParametersOptions(dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc()...), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -37,7 +32,8 @@ func Test_MempoolInvalidSignatures(t *testing.T) { d.WaitUntilNetworkReady() - wallet, _ := d.CreateAccountFromFaucet() + account := d.CreateAccountFromFaucet("account-1") + wallet := account.Wallet() ctx := context.Background() fundsOutputData := d.RequestFaucetFunds(ctx, wallet, iotago.AddressEd25519) diff --git a/tools/docker-network/tests/rewards_test.go b/tools/docker-network/tests/rewards_test.go index 3a3022c25..f58ac553f 100644 --- a/tools/docker-network/tests/rewards_test.go +++ b/tools/docker-network/tests/rewards_test.go @@ -19,15 +19,19 @@ import ( // Test_ValidatorRewards tests the rewards for a validator. // 1. Create 2 accounts with staking feature. // 2. Issue candidacy payloads for the accounts and wait until the accounts is in the committee. -// 3. One of the account issues 3 validation blocks per slot, the other account issues 1 validation block per slot until claiming slot is reached. +// 3. One of the account issues 5 validation blocks per slot, the other account issues 1 validation block per slot until claiming slot is reached. // 4. Claim rewards and check if the mana increased as expected, the account that issued less validation blocks should have less mana. func Test_ValidatorRewards(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithStakingOptions(2, 10, 10), - )) + append( + dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc(), + iotago.WithStakingOptions(2, 10, 10), + iotago.WithRewardsOptions(8, 11, 2, 384), + iotago.WithTargetCommitteeSize(32), + )..., + ), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -46,90 +50,109 @@ func Test_ValidatorRewards(t *testing.T) { // cancel the context when the test is done t.Cleanup(cancel) - clt := d.DefaultWallet().Client - slotsDuration := clt.CommittedAPI().ProtocolParameters().SlotDurationInSeconds() + defaultClient := d.DefaultWallet().Client - // create good account - goodWallet, goodAccountOutputData := d.CreateImplicitAccount(ctx) + // create two implicit accounts for "good" and "lazy" validator + validatorCount := 2 + implicitAccounts := d.CreateImplicitAccounts(ctx, validatorCount, "goodValidator", "lazyValidator") - blockIssuance, err := clt.BlockIssuance(ctx) + blockIssuance, err := defaultClient.BlockIssuance(ctx) require.NoError(t, err) latestCommitmentSlot := blockIssuance.LatestCommitment.Slot + + // we can't set the staking start epoch too much in the future, because it is bound to the latest commitment slot plus MaxCommittableAge stakingStartEpoch := d.DefaultWallet().StakingStartEpochFromSlot(latestCommitmentSlot) - // Set end epoch so the staking feature can be removed as soon as possible. - endEpoch := stakingStartEpoch + clt.CommittedAPI().ProtocolParameters().StakingUnbondingPeriod() - // The earliest epoch in which we can remove the staking feature and claim rewards. - goodClaimingSlot := clt.CommittedAPI().TimeProvider().EpochStart(endEpoch + 1) - - goodAccountData := d.CreateAccountFromImplicitAccount(goodWallet, - goodAccountOutputData, - blockIssuance, - dockertestframework.WithStakingFeature(100, 1, stakingStartEpoch, endEpoch), - ) - initialMana := goodAccountData.Output.StoredMana() - issueCandidacyPayloadInBackground(ctx, - d, - goodWallet, - clt.CommittedAPI().TimeProvider().CurrentSlot(), - goodClaimingSlot) + // we want to claim the rewards as soon as possible + stakingEndEpoch := stakingStartEpoch + defaultClient.CommittedAPI().ProtocolParameters().StakingUnbondingPeriod() + + // create accounts with staking feature for the validators + var wg sync.WaitGroup + validators := make([]*mock.AccountWithWallet, validatorCount) + for i := range validatorCount { + wg.Add(1) + + go func(validatorNr int) { + defer wg.Done() + + // create account with staking feature for every validator + validators[validatorNr] = d.CreateAccountFromImplicitAccount(implicitAccounts[validatorNr], + blockIssuance, + dockertestframework.WithStakingFeature(100, 1, stakingStartEpoch, stakingEndEpoch), + ) + }(i) + } + wg.Wait() + + goodValidator := validators[0] + lazyValidator := validators[1] - // create lazy account - lazyWallet, lazyAccountOutputData := d.CreateImplicitAccount(ctx) + goodValidatorInitialMana := goodValidator.Account().Output.StoredMana() + lazyValidatorInitialMana := lazyValidator.Account().Output.StoredMana() - blockIssuance, err = clt.BlockIssuance(ctx) - require.NoError(t, err) + annoucementStartEpoch := stakingStartEpoch - latestCommitmentSlot = blockIssuance.LatestCommitment.Slot - stakingStartEpoch = d.DefaultWallet().StakingStartEpochFromSlot(latestCommitmentSlot) - endEpoch = stakingStartEpoch + clt.CommittedAPI().ProtocolParameters().StakingUnbondingPeriod() - lazyClaimingSlot := clt.CommittedAPI().TimeProvider().EpochStart(endEpoch + 1) + // check if we missed to announce the candidacy during the staking start epoch because it takes time to create the account. + latestAcceptedBlockSlot := d.NodeStatus("V1").LatestAcceptedBlockSlot + currentEpoch := defaultClient.CommittedAPI().TimeProvider().EpochFromSlot(latestAcceptedBlockSlot) + if annoucementStartEpoch < currentEpoch { + annoucementStartEpoch = currentEpoch + } - lazyAccountData := d.CreateAccountFromImplicitAccount(lazyWallet, - lazyAccountOutputData, - blockIssuance, - dockertestframework.WithStakingFeature(100, 1, stakingStartEpoch, endEpoch), - ) + maxRegistrationSlot := dockertestframework.GetMaxRegistrationSlot(defaultClient.CommittedAPI(), annoucementStartEpoch) + + // the candidacy announcement needs to be done before the nearing threshold of the epoch + // and we shouldn't start trying in the last possible slot, otherwise the tests might be wonky + if latestAcceptedBlockSlot >= maxRegistrationSlot { + // we are already too late, we can't issue candidacy payloads anymore, so lets start with the next epoch + annoucementStartEpoch++ + } - lazyInitialMana := lazyAccountData.Output.StoredMana() - issueCandidacyPayloadInBackground(ctx, - d, - lazyWallet, - clt.CommittedAPI().TimeProvider().CurrentSlot(), - lazyClaimingSlot) + // issue candidacy payloads for the validators in the background + for _, validator := range validators { + issueCandidacyAnnouncementsInBackground(ctx, + d, + validator.Wallet(), + annoucementStartEpoch, + // we don't need to issue candidacy payloads for the last epoch + stakingEndEpoch-1) + } // make sure the account is in the committee, so it can issue validation blocks - goodAccountAddrBech32 := goodAccountData.Address.Bech32(clt.CommittedAPI().ProtocolParameters().Bech32HRP()) - lazyAccountAddrBech32 := lazyAccountData.Address.Bech32(clt.CommittedAPI().ProtocolParameters().Bech32HRP()) - d.AssertCommittee(stakingStartEpoch+1, append(d.AccountsFromNodes(d.Nodes("V1", "V3", "V2", "V4")...), goodAccountAddrBech32, lazyAccountAddrBech32)) + goodValidatorAddrBech32 := goodValidator.Account().Address.Bech32(defaultClient.CommittedAPI().ProtocolParameters().Bech32HRP()) + lazyValidatorAddrBech32 := lazyValidator.Account().Address.Bech32(defaultClient.CommittedAPI().ProtocolParameters().Bech32HRP()) + d.AssertCommittee(annoucementStartEpoch+1, append(d.AccountsFromNodes(d.Nodes("V1", "V3", "V2", "V4")...), goodValidatorAddrBech32, lazyValidatorAddrBech32)) + + // create a new wait group for the next step + wg = sync.WaitGroup{} // issue validation blocks to have performance - currentSlot := clt.CommittedAPI().TimeProvider().CurrentSlot() - slotToWait := lazyClaimingSlot - currentSlot - secToWait := time.Duration(slotToWait) * time.Duration(slotsDuration) * time.Second - fmt.Println("Issue validation blocks, wait for ", secToWait, "until expected slot: ", lazyClaimingSlot) + currentSlot := defaultClient.CommittedAPI().TimeProvider().CurrentSlot() + validationBlocksEndSlot := defaultClient.CommittedAPI().TimeProvider().EpochEnd(stakingEndEpoch) + secondsToWait := time.Duration(validationBlocksEndSlot-currentSlot) * time.Duration(defaultClient.CommittedAPI().ProtocolParameters().SlotDurationInSeconds()) * time.Second + fmt.Println("Issuing validation blocks, wait for ", secondsToWait, "until expected slot: ", validationBlocksEndSlot) - var wg sync.WaitGroup - issueValidationBlockInBackground(ctx, &wg, goodWallet, currentSlot, goodClaimingSlot, 5) - issueValidationBlockInBackground(ctx, &wg, lazyWallet, currentSlot, lazyClaimingSlot, 1) + issueValidationBlocksInBackground(ctx, d, &wg, goodValidator.Wallet(), currentSlot, validationBlocksEndSlot, 5) + issueValidationBlocksInBackground(ctx, d, &wg, lazyValidator.Wallet(), currentSlot, validationBlocksEndSlot, 1) + // wait until all validation blocks are issued wg.Wait() // claim rewards that put to the account output - d.AwaitCommitment(lazyClaimingSlot) - d.ClaimRewardsForValidator(ctx, goodWallet) - d.ClaimRewardsForValidator(ctx, lazyWallet) + d.AwaitCommittedSlot(validationBlocksEndSlot, true) + d.ClaimRewardsForValidator(ctx, goodValidator) + d.ClaimRewardsForValidator(ctx, lazyValidator) // check if the mana increased as expected - goodWalletAccountOutput := goodWallet.BlockIssuer.AccountData.Output - require.Greater(t, goodWalletAccountOutput.StoredMana(), initialMana) + goodValidatorFinalMana := goodValidator.Account().Output.StoredMana() + lazyValidatorFinalMana := lazyValidator.Account().Output.StoredMana() - lazyWalletAccountOutput := lazyWallet.BlockIssuer.AccountData.Output - require.Greater(t, lazyWalletAccountOutput.StoredMana(), lazyInitialMana) + require.Greater(t, goodValidatorFinalMana, goodValidatorInitialMana) + require.Greater(t, lazyValidatorFinalMana, lazyValidatorInitialMana) // account that issued more validation blocks should have more mana - require.Greater(t, goodWalletAccountOutput.StoredMana(), lazyWalletAccountOutput.StoredMana()) + require.Greater(t, goodValidatorFinalMana, lazyValidatorFinalMana) } // Test_DelegatorRewards tests the rewards for a delegator. @@ -139,10 +162,14 @@ func Test_ValidatorRewards(t *testing.T) { func Test_DelegatorRewards(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 3), - iotago.WithLivenessOptions(10, 10, 2, 4, 5), - iotago.WithStakingOptions(3, 10, 10), - )) + append( + dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc(), + iotago.WithStakingOptions(3, 10, 10), + iotago.WithRewardsOptions(8, 11, 2, 384), + iotago.WithTargetCommitteeSize(32), + )..., + ), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -157,12 +184,15 @@ func Test_DelegatorRewards(t *testing.T) { d.WaitUntilNetworkReady() ctx := context.Background() - delegatorWallet, _ := d.CreateAccountFromFaucet() + + account := d.CreateAccountFromFaucet("account-1") + delegatorWallet := account.Wallet() + clt := delegatorWallet.Client // delegate funds to V2 delegationOutputData := d.DelegateToValidator(delegatorWallet, d.Node("V2").AccountAddress(t)) - d.AwaitCommitment(delegationOutputData.ID.CreationSlot()) + d.AwaitCommittedSlot(delegationOutputData.ID.CreationSlot(), true) // check if V2 received the delegator stake v2Resp, err := clt.Validator(ctx, d.Node("V2").AccountAddress(t)) @@ -172,12 +202,7 @@ func Test_DelegatorRewards(t *testing.T) { // wait until next epoch so the rewards can be claimed //nolint:forcetypeassert expectedSlot := clt.CommittedAPI().TimeProvider().EpochStart(delegationOutputData.Output.(*iotago.DelegationOutput).StartEpoch + 2) - if currentSlot := clt.CommittedAPI().TimeProvider().CurrentSlot(); currentSlot < expectedSlot { - slotToWait := expectedSlot - currentSlot - secToWait := time.Duration(slotToWait) * time.Duration(clt.CommittedAPI().ProtocolParameters().SlotDurationInSeconds()) * time.Second - fmt.Println("Wait for ", secToWait, "until expected slot: ", expectedSlot) - time.Sleep(secToWait) - } + d.AwaitLatestAcceptedBlockSlot(expectedSlot, true) // claim rewards that put to an basic output rewardsOutputID := d.ClaimRewardsForDelegator(ctx, delegatorWallet, delegationOutputData) @@ -197,10 +222,14 @@ func Test_DelegatorRewards(t *testing.T) { func Test_DelayedClaimingRewards(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 4), - iotago.WithLivenessOptions(10, 10, 2, 4, 8), - iotago.WithStakingOptions(3, 10, 10), - )) + append( + dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc(), + iotago.WithStakingOptions(3, 10, 10), + iotago.WithRewardsOptions(8, 11, 2, 384), + iotago.WithTargetCommitteeSize(32), + )..., + ), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -215,13 +244,16 @@ func Test_DelayedClaimingRewards(t *testing.T) { d.WaitUntilNetworkReady() ctx := context.Background() - delegatorWallet, _ := d.CreateAccountFromFaucet() + + account := d.CreateAccountFromFaucet("account-1") + delegatorWallet := account.Wallet() + clt := delegatorWallet.Client { // delegate funds to V2 delegationOutputData := d.DelegateToValidator(delegatorWallet, d.Node("V2").AccountAddress(t)) - d.AwaitCommitment(delegationOutputData.ID.CreationSlot()) + d.AwaitCommittedSlot(delegationOutputData.ID.CreationSlot(), true) // check if V2 received the delegator stake v2Resp, err := clt.Validator(ctx, d.Node("V2").AccountAddress(t)) @@ -234,7 +266,7 @@ func Test_DelayedClaimingRewards(t *testing.T) { latestCommitmentSlot := delegatorWallet.GetNewBlockIssuanceResponse().LatestCommitment.Slot delegationEndEpoch := dockertestframework.GetDelegationEndEpoch(apiForSlot, currentSlot, latestCommitmentSlot) delegationOutputData = d.DelayedClaimingTransition(ctx, delegatorWallet, delegationOutputData) - d.AwaitCommitment(delegationOutputData.ID.CreationSlot()) + d.AwaitCommittedSlot(delegationOutputData.ID.CreationSlot(), true) // the delegated stake should be removed from the validator, so the pool stake should equal to the validator stake v2Resp, err = clt.Validator(ctx, d.Node("V2").AccountAddress(t)) @@ -243,12 +275,8 @@ func Test_DelayedClaimingRewards(t *testing.T) { // wait until next epoch to destroy the delegation expectedSlot := clt.CommittedAPI().TimeProvider().EpochStart(delegationEndEpoch) - if currentSlot := delegatorWallet.CurrentSlot(); currentSlot < expectedSlot { - slotToWait := expectedSlot - currentSlot - secToWait := time.Duration(slotToWait) * time.Duration(clt.CommittedAPI().ProtocolParameters().SlotDurationInSeconds()) * time.Second - fmt.Println("Wait for ", secToWait, "until expected slot: ", expectedSlot) - time.Sleep(secToWait) - } + d.AwaitLatestAcceptedBlockSlot(expectedSlot, true) + fmt.Println("Claim rewards for delegator") d.ClaimRewardsForDelegator(ctx, delegatorWallet, delegationOutputData) } @@ -259,7 +287,7 @@ func Test_DelayedClaimingRewards(t *testing.T) { // delay claiming rewards in the same slot of delegation delegationOutputData = d.DelayedClaimingTransition(ctx, delegatorWallet, delegationOutputData) - d.AwaitCommitment(delegationOutputData.ID.CreationSlot()) + d.AwaitCommittedSlot(delegationOutputData.ID.CreationSlot(), true) // the delegated stake should be 0, thus poolStake should be equal to validatorStake v2Resp, err := clt.Validator(ctx, d.Node("V2").AccountAddress(t)) @@ -271,60 +299,89 @@ func Test_DelayedClaimingRewards(t *testing.T) { } } -func issueCandidacyPayloadInBackground(ctx context.Context, d *dockertestframework.DockerTestFramework, wallet *mock.Wallet, startSlot, endSlot iotago.SlotIndex) { +// issue candidacy announcements for the account in the background, one per epoch +func issueCandidacyAnnouncementsInBackground(ctx context.Context, d *dockertestframework.DockerTestFramework, wallet *mock.Wallet, startEpoch iotago.EpochIndex, endEpoch iotago.EpochIndex) { go func() { - fmt.Println("Issuing candidacy payloads for account", wallet.BlockIssuer.AccountData.ID, "in the background...") - defer fmt.Println("Issuing candidacy payloads for account", wallet.BlockIssuer.AccountData.ID, "in the background......done") + fmt.Println("Issuing candidacy announcements for account", wallet.BlockIssuer.AccountData.ID, "in the background...") + defer fmt.Println("Issuing candidacy announcements for account", wallet.BlockIssuer.AccountData.ID, "in the background... done!") - for i := startSlot; i < endSlot; i++ { - // wait until the slot is reached - for { - if ctx.Err() != nil { - // context is canceled - return - } + for epoch := startEpoch; epoch <= endEpoch; epoch++ { + if ctx.Err() != nil { + // context is canceled + return + } - if wallet.CurrentSlot() == i { - break - } - time.Sleep(2 * time.Second) + // wait until the epoch start is reached + d.AwaitLatestAcceptedBlockSlot(d.DefaultWallet().Client.CommittedAPI().TimeProvider().EpochStart(epoch), false) + if ctx.Err() != nil { + // context is canceled + return } - d.IssueCandidacyPayloadFromAccount(ctx, wallet) + fmt.Println("Issuing candidacy payload for account", wallet.BlockIssuer.AccountData.ID, "in epoch", epoch, "...") + committedAPI := d.DefaultWallet().Client.CommittedAPI() + + // check if we are still in the epoch + latestAcceptedBlockSlot := d.NodeStatus("V1").LatestAcceptedBlockSlot + currentEpoch := committedAPI.TimeProvider().EpochFromSlot(latestAcceptedBlockSlot) + + require.Equal(d.Testing, epoch, currentEpoch, "epoch mismatch") + + // the candidacy announcement needs to be done before the nearing threshold + maxRegistrationSlot := dockertestframework.GetMaxRegistrationSlot(committedAPI, epoch) + + candidacyBlockID := d.IssueCandidacyPayloadFromAccount(ctx, wallet) + require.LessOrEqualf(d.Testing, candidacyBlockID.Slot(), maxRegistrationSlot, "candidacy announcement block slot is greater than max registration slot for the epoch (%d>%d)", candidacyBlockID.Slot(), maxRegistrationSlot) } }() } -func issueValidationBlockInBackground(ctx context.Context, wg *sync.WaitGroup, wallet *mock.Wallet, startSlot, endSlot iotago.SlotIndex, blocksPerSlot int) { +// issue validation blocks for the account in the background, blocksPerSlot per slot with a cooldown between the blocks +func issueValidationBlocksInBackground(ctx context.Context, d *dockertestframework.DockerTestFramework, wg *sync.WaitGroup, wallet *mock.Wallet, startSlot iotago.SlotIndex, endSlot iotago.SlotIndex, blocksPerSlot int) { wg.Add(1) go func() { defer wg.Done() - fmt.Println("Issuing validation block for wallet", wallet.Name, "in the background...") - defer fmt.Println("Issuing validation block for wallet", wallet.Name, "in the background......done") - for i := startSlot; i < endSlot; i++ { + fmt.Println("Issuing validation blocks for wallet", wallet.Name, "in the background...") + defer fmt.Println("Issuing validation blocks for wallet", wallet.Name, "in the background... done!") + + validationBlockCooldown := time.Duration(d.DefaultWallet().Client.CommittedAPI().ProtocolParameters().SlotDurationInSeconds()) * time.Second / time.Duration(blocksPerSlot) + + for slot := startSlot; slot <= endSlot; slot++ { + if ctx.Err() != nil { + // context is canceled + return + } + // wait until the slot is reached - for { - if ctx.Err() != nil { - // context is canceled - return - } + d.AwaitLatestAcceptedBlockSlot(slot, false) + if ctx.Err() != nil { + // context is canceled + return + } - if wallet.CurrentSlot() == i { - break - } - time.Sleep(2 * time.Second) + // check if we are still in the slot + currentCommittedSlot := d.NodeStatus("V1").LatestCommitmentID.Slot() + if currentCommittedSlot >= slot { + // slot is already committed, no need to issue validation blocks + continue } - for range blocksPerSlot { + ts := time.Now() + for validationBlockNr := range blocksPerSlot { if ctx.Err() != nil { // context is canceled return } + fmt.Println("Issuing validation block nr.", validationBlockNr, "for wallet", wallet.Name, "in slot", slot, "...") wallet.CreateAndSubmitValidationBlock(ctx, "", nil) - time.Sleep(1 * time.Second) + + if validationBlockNr < blocksPerSlot-1 { + // wait until the next validation block can be issued + <-time.After(time.Until(ts.Add(time.Duration(validationBlockNr+1) * validationBlockCooldown))) + } } } }() diff --git a/tools/docker-network/tests/run_tests.sh b/tools/docker-network/tests/run_tests.sh index 5abf2b638..d614ab092 100755 --- a/tools/docker-network/tests/run_tests.sh +++ b/tools/docker-network/tests/run_tests.sh @@ -32,7 +32,10 @@ popd # If no arguments were passed, run all tests if [ $# -eq 0 ]; then echo "Running all tests..." - go test ./... -tags ${BUILD_TAGS} -v -timeout=${TIMEOUT} + # The following command will run all tests in the current directory only, but we + # want to do it this way because otherwise the logs are not shown in the console + # until all the tests are done. + go test -tags ${BUILD_TAGS} -v -timeout=${TIMEOUT} else # Concatenate all test names with a pipe tests=$(printf "|%s" "$@") diff --git a/tools/docker-network/tests/sync_snapshot_test.go b/tools/docker-network/tests/sync_snapshot_test.go index f800a426f..11144b31e 100644 --- a/tools/docker-network/tests/sync_snapshot_test.go +++ b/tools/docker-network/tests/sync_snapshot_test.go @@ -18,12 +18,12 @@ import ( func Test_SyncFromSnapshot(t *testing.T) { d := dockertestframework.NewDockerTestFramework(t, dockertestframework.WithProtocolParametersOptions( - iotago.WithTimeProviderOptions(0, time.Now().Unix(), 10, 3), - iotago.WithLivenessOptions(10, 10, 2, 4, 5), - iotago.WithCongestionControlOptions(1, 1, 1, 400_000, 250_000, 50_000_000, 1000, 100), - iotago.WithRewardsOptions(8, 10, 2, 384), - iotago.WithTargetCommitteeSize(3), - )) + append( + dockertestframework.ShortSlotsAndEpochsProtocolParametersOptionsFunc(), + iotago.WithTargetCommitteeSize(3), + )..., + ), + ) defer d.Stop() d.AddValidatorNode("V1", "docker-network-inx-validator-1-1", "http://localhost:8050", "rms1pzg8cqhfxqhq7pt37y8cs4v5u4kcc48lquy2k73ehsdhf5ukhya3y5rx2w6") @@ -38,29 +38,29 @@ func Test_SyncFromSnapshot(t *testing.T) { d.WaitUntilNetworkReady() ctx := context.Background() - clt := d.DefaultWallet().Client + defaultClient := d.DefaultWallet().Client - createAccountAndDelegateTo := func(receiver *dockertestframework.Node) (*mock.Wallet, *mock.AccountData, *mock.OutputData) { - delegatorWallet, accountData := d.CreateAccountFromFaucet() - clt := delegatorWallet.Client + createAccountAndDelegateTo := func(receiver *dockertestframework.Node, name string) (*mock.Wallet, *mock.AccountData, *mock.OutputData) { + delegatorAccount := d.CreateAccountFromFaucet(name) + clt := delegatorAccount.Wallet().Client // delegate funds to receiver - delegationOutputData := d.DelegateToValidator(delegatorWallet, receiver.AccountAddress(t)) - d.AwaitCommitment(delegationOutputData.ID.CreationSlot()) + delegationOutputData := d.DelegateToValidator(delegatorAccount.Wallet(), receiver.AccountAddress(t)) + d.AwaitCommittedSlot(delegationOutputData.ID.CreationSlot(), true) // check if receiver received the delegator stake resp, err := clt.Validator(ctx, receiver.AccountAddress(t)) require.NoError(t, err) require.Greater(t, resp.PoolStake, resp.ValidatorStake) - return delegatorWallet, accountData, delegationOutputData + return delegatorAccount.Wallet(), delegatorAccount.Account(), delegationOutputData } - v1DelegatorWallet, v1DelegatorAccountData, v1DelegationOutputData := createAccountAndDelegateTo(d.Node("V1")) - v2DelegatorWallet, v2DelegatorAccountData, v2DelegationOutputData := createAccountAndDelegateTo(d.Node("V2")) + v1DelegatorWallet, v1DelegatorAccountData, v1DelegationOutputData := createAccountAndDelegateTo(d.Node("V1"), "account-1") + v2DelegatorWallet, v2DelegatorAccountData, v2DelegationOutputData := createAccountAndDelegateTo(d.Node("V2"), "account-2") //nolint:forcetypeassert - currentEpoch := clt.CommittedAPI().TimeProvider().CurrentEpoch() + currentEpoch := defaultClient.CommittedAPI().TimeProvider().CurrentEpoch() expectedEpoch := v2DelegationOutputData.Output.(*iotago.DelegationOutput).StartEpoch + 2 for range expectedEpoch - currentEpoch { d.AwaitEpochFinalized() @@ -80,7 +80,7 @@ func Test_SyncFromSnapshot(t *testing.T) { d.AwaitEpochFinalized() - managementClient, err := clt.Management(getContextWithTimeout(5 * time.Second)) + managementClient, err := defaultClient.Management(getContextWithTimeout(5 * time.Second)) require.NoError(t, err) // take the snapshot and restart node5 @@ -96,7 +96,7 @@ func Test_SyncFromSnapshot(t *testing.T) { d.AwaitEpochFinalized() // check if the committee is the same among nodes - currentEpoch := clt.CommittedAPI().TimeProvider().CurrentEpoch() + currentEpoch := defaultClient.CommittedAPI().TimeProvider().CurrentEpoch() d.AssertCommittee(currentEpoch, d.AccountsFromNodes(d.Nodes("V1", "V2", "V4")...)) // check if the account and rewardsOutput are available @@ -117,8 +117,8 @@ func Test_SyncFromSnapshot(t *testing.T) { require.NoError(t, err) // create V3 delegator, the committee should change to V1, V3, V4 - v3DelegatorWallet, v3DelegatorAccountData, v3DelegationOutputData := createAccountAndDelegateTo(d.Node("V3")) - currentEpoch = clt.CommittedAPI().TimeProvider().CurrentEpoch() + v3DelegatorWallet, v3DelegatorAccountData, v3DelegationOutputData := createAccountAndDelegateTo(d.Node("V3"), "account-3") + currentEpoch = defaultClient.CommittedAPI().TimeProvider().CurrentEpoch() expectedEpoch = v3DelegationOutputData.Output.(*iotago.DelegationOutput).StartEpoch + 1 for range expectedEpoch - currentEpoch { d.AwaitEpochFinalized() @@ -136,7 +136,7 @@ func Test_SyncFromSnapshot(t *testing.T) { // Deletes the database of node5 and restarts it with the just created snapshot. d.ResetNode("node5", response.FilePath) - currentEpoch = clt.CommittedAPI().TimeProvider().EpochFromSlot(v3DelegatorWallet.CurrentSlot()) + currentEpoch = defaultClient.CommittedAPI().TimeProvider().EpochFromSlot(v3DelegatorWallet.CurrentSlot()) d.AssertCommittee(currentEpoch, d.AccountsFromNodes(d.Nodes("V1", "V3", "V4")...)) node5Clt = d.Client("node5")