Skip to content

Commit

Permalink
Merge pull request #5813 from multiversx/tests-for-requests
Browse files Browse the repository at this point in the history
Tests for requests
  • Loading branch information
sstanculeanu authored Mar 5, 2024
2 parents c29d8d5 + 319fc0c commit 4a0b699
Show file tree
Hide file tree
Showing 10 changed files with 1,575 additions and 111 deletions.
5 changes: 3 additions & 2 deletions factory/processing/blockProcessorCreator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"github.com/multiversx/mx-chain-go/testscommon"
componentsMock "github.com/multiversx/mx-chain-go/testscommon/components"
"github.com/multiversx/mx-chain-go/testscommon/hashingMocks"
"github.com/multiversx/mx-chain-go/testscommon/processMocks"
stateMock "github.com/multiversx/mx-chain-go/testscommon/state"
storageManager "github.com/multiversx/mx-chain-go/testscommon/storage"
trieMock "github.com/multiversx/mx-chain-go/testscommon/trie"
Expand All @@ -41,7 +42,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) {

bp, err := pcf.NewBlockProcessor(
&testscommon.RequestHandlerStub{},
&mock.ForkDetectorStub{},
&processMocks.ForkDetectorStub{},
&mock.EpochStartTriggerStub{},
&mock.BoostrapStorerStub{},
&mock.ValidatorStatisticsProcessorStub{},
Expand Down Expand Up @@ -167,7 +168,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) {

bp, err := pcf.NewBlockProcessor(
&testscommon.RequestHandlerStub{},
&mock.ForkDetectorStub{},
&processMocks.ForkDetectorStub{},
&mock.EpochStartTriggerStub{},
&mock.BoostrapStorerStub{},
&mock.ValidatorStatisticsProcessorStub{},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"encoding/hex"
"fmt"
"math/big"
"sync"
"testing"
"time"

Expand All @@ -14,13 +13,14 @@ import (
"github.com/multiversx/mx-chain-core-go/data/block"
"github.com/multiversx/mx-chain-core-go/data/transaction"
"github.com/multiversx/mx-chain-crypto-go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/multiversx/mx-chain-go/dataRetriever"
"github.com/multiversx/mx-chain-go/integrationTests"
"github.com/multiversx/mx-chain-go/process/factory"
"github.com/multiversx/mx-chain-go/sharding"
"github.com/multiversx/mx-chain-go/state"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) {
Expand Down Expand Up @@ -61,15 +61,15 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) {

proposerNode := nodes[0]

//sender shard keys, receivers keys
// sender shard keys, receivers keys
sendersPrivateKeys := make([]crypto.PrivateKey, 3)
receiversPublicKeys := make(map[uint32][]crypto.PublicKey)
for i := 0; i < txToGenerateInEachMiniBlock; i++ {
sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard)
//receivers in same shard with the sender
// receivers in same shard with the sender
_, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard)
receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk)
//receivers in other shards
// receivers in other shards
for _, shardId := range recvShards {
_, pk, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId)
receiversPublicKeys[shardId] = append(receiversPublicKeys[shardId], pk)
Expand Down Expand Up @@ -111,13 +111,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) {
continue
}

//test sender balances
// test sender balances
for _, sk := range sendersPrivateKeys {
valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPublicKeys))))
valRemaining := big.NewInt(0).Sub(valMinting, valTransferred)
integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining)
}
//test receiver balances from same shard
// test receiver balances from same shard
for _, pk := range receiversPublicKeys[proposerNode.ShardCoordinator.SelfId()] {
integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx)
}
Expand All @@ -136,7 +136,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) {
continue
}

//test receiver balances from same shard
// test receiver balances from same shard
for _, pk := range receiversPublicKeys[n.ShardCoordinator.SelfId()] {
integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx)
}
Expand Down Expand Up @@ -352,87 +352,6 @@ func TestSimpleTransactionsWithMoreValueThanBalanceYieldReceiptsInMultiShardedEn
}
}

func TestExecuteBlocksWithGapsBetweenBlocks(t *testing.T) {
//TODO fix this test
t.Skip("TODO fix this test")
if testing.Short() {
t.Skip("this is not a short test")
}
nodesPerShard := 2
shardConsensusGroupSize := 2
nbMetaNodes := 400
nbShards := 1
consensusGroupSize := 400

cacheMut := &sync.Mutex{}

putCounter := 0
cacheMap := make(map[string]interface{})

// create map of shard - testNodeProcessors for metachain and shard chain
nodesMap := integrationTests.CreateNodesWithNodesCoordinatorWithCacher(
nodesPerShard,
nbMetaNodes,
nbShards,
shardConsensusGroupSize,
consensusGroupSize,
)

roundsPerEpoch := uint64(1000)
maxGasLimitPerBlock := uint64(100000)
gasPrice := uint64(10)
gasLimit := uint64(100)
for _, nodes := range nodesMap {
integrationTests.SetEconomicsParameters(nodes, maxGasLimitPerBlock, gasPrice, gasLimit)
integrationTests.DisplayAndStartNodes(nodes[0:1])

for _, node := range nodes {
node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch)
}
}

defer func() {
for _, nodes := range nodesMap {
for _, n := range nodes {
n.Close()
}
}
}()

round := uint64(1)
roundDifference := 10
nonce := uint64(1)

firstNodeOnMeta := nodesMap[core.MetachainShardId][0]
body, header, _ := firstNodeOnMeta.ProposeBlock(round, nonce)

// set bitmap for all consensus nodes signing
bitmap := make([]byte, consensusGroupSize/8+1)
for i := range bitmap {
bitmap[i] = 0xFF
}

bitmap[consensusGroupSize/8] >>= uint8(8 - (consensusGroupSize % 8))
err := header.SetPubKeysBitmap(bitmap)
assert.Nil(t, err)

firstNodeOnMeta.CommitBlock(body, header)

round += uint64(roundDifference)
nonce++
putCounter = 0

cacheMut.Lock()
for k := range cacheMap {
delete(cacheMap, k)
}
cacheMut.Unlock()

firstNodeOnMeta.ProposeBlock(round, nonce)

assert.Equal(t, roundDifference, putCounter)
}

// TestShouldSubtractTheCorrectTxFee uses the mock VM as it's gas model is predictable
// The test checks the tx fee subtraction from the sender account when deploying a SC
// It also checks the fee obtained by the leader is correct
Expand Down
141 changes: 141 additions & 0 deletions process/block/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/multiversx/mx-chain-core-go/data/scheduled"
"github.com/multiversx/mx-chain-core-go/hashing"
"github.com/multiversx/mx-chain-core-go/marshal"

"github.com/multiversx/mx-chain-go/dataRetriever"
"github.com/multiversx/mx-chain-go/process"
"github.com/multiversx/mx-chain-go/process/block/bootstrapStorage"
Expand Down Expand Up @@ -182,6 +183,10 @@ func (mp *metaProcessor) ReceivedShardHeader(header data.HeaderHandler, shardHea
mp.receivedShardHeader(header, shardHeaderHash)
}

func (mp *metaProcessor) GetDataPool() dataRetriever.PoolsHolder {
return mp.dataPool
}

func (mp *metaProcessor) AddHdrHashToRequestedList(hdr data.HeaderHandler, hdrHash []byte) {
mp.hdrsForCurrBlock.mutHdrsForBlock.Lock()
defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock()
Expand Down Expand Up @@ -565,3 +570,139 @@ func (bp *baseProcessor) SetNonceOfFirstCommittedBlock(nonce uint64) {
func (bp *baseProcessor) CheckSentSignaturesAtCommitTime(header data.HeaderHandler) error {
return bp.checkSentSignaturesAtCommitTime(header)
}

// GetHdrForBlock -
func (mp *metaProcessor) GetHdrForBlock() *hdrForBlock {
return mp.hdrsForCurrBlock
}

// ChannelReceiveAllHeaders -
func (mp *metaProcessor) ChannelReceiveAllHeaders() chan bool {
return mp.chRcvAllHdrs
}

// ComputeExistingAndRequestMissingShardHeaders -
func (mp *metaProcessor) ComputeExistingAndRequestMissingShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) {
return mp.computeExistingAndRequestMissingShardHeaders(metaBlock)
}

// ComputeExistingAndRequestMissingMetaHeaders -
func (sp *shardProcessor) ComputeExistingAndRequestMissingMetaHeaders(header data.ShardHeaderHandler) (uint32, uint32) {
return sp.computeExistingAndRequestMissingMetaHeaders(header)
}

// GetHdrForBlock -
func (sp *shardProcessor) GetHdrForBlock() *hdrForBlock {
return sp.hdrsForCurrBlock
}

// ChannelReceiveAllHeaders -
func (sp *shardProcessor) ChannelReceiveAllHeaders() chan bool {
return sp.chRcvAllMetaHdrs
}

// InitMaps -
func (hfb *hdrForBlock) InitMaps() {
hfb.initMaps()
hfb.resetMissingHdrs()
}

// Clone -
func (hfb *hdrForBlock) Clone() *hdrForBlock {
return hfb
}

// SetNumMissingHdrs -
func (hfb *hdrForBlock) SetNumMissingHdrs(num uint32) {
hfb.mutHdrsForBlock.Lock()
hfb.missingHdrs = num
hfb.mutHdrsForBlock.Unlock()
}

// SetNumMissingFinalityAttestingHdrs -
func (hfb *hdrForBlock) SetNumMissingFinalityAttestingHdrs(num uint32) {
hfb.mutHdrsForBlock.Lock()
hfb.missingFinalityAttestingHdrs = num
hfb.mutHdrsForBlock.Unlock()
}

// SetHighestHdrNonce -
func (hfb *hdrForBlock) SetHighestHdrNonce(shardId uint32, nonce uint64) {
hfb.mutHdrsForBlock.Lock()
hfb.highestHdrNonce[shardId] = nonce
hfb.mutHdrsForBlock.Unlock()
}

// HdrInfo -
type HdrInfo struct {
UsedInBlock bool
Hdr data.HeaderHandler
}

// SetHdrHashAndInfo -
func (hfb *hdrForBlock) SetHdrHashAndInfo(hash string, info *HdrInfo) {
hfb.mutHdrsForBlock.Lock()
hfb.hdrHashAndInfo[hash] = &hdrInfo{
hdr: info.Hdr,
usedInBlock: info.UsedInBlock,
}
hfb.mutHdrsForBlock.Unlock()
}

// GetHdrHashMap -
func (hfb *hdrForBlock) GetHdrHashMap() map[string]data.HeaderHandler {
m := make(map[string]data.HeaderHandler)

hfb.mutHdrsForBlock.RLock()
for hash, hi := range hfb.hdrHashAndInfo {
m[hash] = hi.hdr
}
hfb.mutHdrsForBlock.RUnlock()

return m
}

// GetHighestHdrNonce -
func (hfb *hdrForBlock) GetHighestHdrNonce() map[uint32]uint64 {
m := make(map[uint32]uint64)

hfb.mutHdrsForBlock.RLock()
for shardId, nonce := range hfb.highestHdrNonce {
m[shardId] = nonce
}
hfb.mutHdrsForBlock.RUnlock()

return m
}

// GetMissingHdrs -
func (hfb *hdrForBlock) GetMissingHdrs() uint32 {
hfb.mutHdrsForBlock.RLock()
defer hfb.mutHdrsForBlock.RUnlock()

return hfb.missingHdrs
}

// GetMissingFinalityAttestingHdrs -
func (hfb *hdrForBlock) GetMissingFinalityAttestingHdrs() uint32 {
hfb.mutHdrsForBlock.RLock()
defer hfb.mutHdrsForBlock.RUnlock()

return hfb.missingFinalityAttestingHdrs
}

// GetHdrHashAndInfo -
func (hfb *hdrForBlock) GetHdrHashAndInfo() map[string]*HdrInfo {
hfb.mutHdrsForBlock.RLock()
defer hfb.mutHdrsForBlock.RUnlock()

m := make(map[string]*HdrInfo)
for hash, hi := range hfb.hdrHashAndInfo {
m[hash] = &HdrInfo{
UsedInBlock: hi.usedInBlock,
Hdr: hi.hdr,
}
}

return m
}
Loading

0 comments on commit 4a0b699

Please sign in to comment.