diff --git a/txcache/README.md b/txcache/README.md new file mode 100644 index 0000000000..cb6a564ce2 --- /dev/null +++ b/txcache/README.md @@ -0,0 +1,210 @@ +## Mempool + +### Glossary + +1. **selection session:** an ephemeral session during which the mempool selects transactions for a proposer. A session starts when a proposer asks the mempool for transactions and ends when the mempool returns the transactions. The most important part of a session is the _selection loop_. +2. **transaction PPU:** the price per unit of computation, for a transaction. It's computed as `initiallyPaidFee / gasLimit`. +3. **initially paid transaction fee:** the fee for processing a transaction, as known before its actual processing. That is, without knowing the _refund_ component. + +### Configuration + +1. **SelectTransactions::gasRequested:** `10_000_000_000`, the maximum total gas limit of the transactions to be returned to a proposer (one _selection session_). This value is provided by the Protocol. +2. **SelectTransactions::maxNum:** `30_000`, the maximum number of transactions to be returned to a proposer (one _selection session_). This value is provided by the Protocol. + +### Transactions selection + +### Paragraph 1 + +When a proposer asks the mempool for transactions, it provides the following parameters: + + - `gasRequested`: the maximum total gas limit of the transactions to be returned + - `maxNum`: the maximum number of transactions to be returned + +### Paragraph 2 + +The PPU (price per gas unit) of a transaction, is computed (once it enters the mempool) as follows: + +``` +ppu = initiallyPaidFee / gasLimit +``` + +In the formula above, + +``` +initiallyPaidFee = + dataCost * gasPrice + + executionCost * gasPrice * network.gasPriceModifier + +dataCost = network.minGasLimit + len(data) * network.gasPerDataByte + +executionCost = gasLimit - dataCost +``` + +Network parameters (as of November of 2024): + +``` +gasPriceModifier = 0.01 +minGasLimit = 50_000 +gasPerDataByte = 1_500 +``` + +#### Examples + +**(a)** A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_000_000_000`: + +``` +initiallyPaidFee = 50_000_000_000 atoms +ppu = 1_000_000_000 atoms +``` + +**(b)** A simple native transfer with `gasLimit = 50_000` and `gasPrice = 1_500_000_000`: + +``` +initiallyPaidFee = gasLimit * gasPrice = 75_000_000_000 atoms +ppu = 75_000_000_000 / 50_000 = 1_500_000_000 atoms +``` + +**(c)** A simple native transfer with a data payload of 7 bytes, with `gasLimit = 50_000 + 7 * 1500` and `gasPrice = 1_000_000_000`: + +``` +initiallyPaidFee = 60_500_000_000_000 atoms +ppu = 60_500_000_000_000 / 60_500 = 1_000_000_000 atoms +``` + +That is, for simple native transfers (whether they hold a data payload or not), the PPU is equal to the gas price. + +**(d)** A contract call with `gasLimit = 75_000_000` and `gasPrice = 1_000_000_000`, with a data payload of `42` bytes: + +``` +initiallyPaidFee = 861_870_000_000_000 atoms +ppu = 11_491_600 atoms +``` + +**(e)** Similar to **(d)**, but with `gasPrice = 2_000_000_000`: + +``` +initiallyPaidFee = 1_723_740_000_000_000 atoms +ppu = 22_983_200 atoms +``` + +That is, for contract calls, the PPU is not equal to the gas price, but much lower, due to the contract call _cost subsidy_. **A higher gas price will result in a higher PPU.** + +### Paragraph 3 + +Transaction **A** is considered **more valuable (for the Network)** than transaction **B** if **it has a higher PPU**. + +If two transactions have the same PPU, they are ordered by gas limit (higher is better, promoting less "execution fragmentation"). In the end, they are ordered using an arbitrary, but deterministic rule: the transaction with the "lower" transaction hash "wins" the comparison. + +Pseudo-code: + +``` +func isTransactionMoreValuableForNetwork(A, B): + if A.ppu > B.ppu: + return true + if A.ppu < B.ppu: + return false + + if A.gasLimit > B.gasLimit: + return true + if A.gasLimit < B.gasLimit: + return false + + return A.hash < B.hash +``` + +### Paragraph 4 + +The mempool selects transactions as follows (pseudo-code): + +``` +func selectTransactions(gasRequested, maxNum): + // Setup phase + senders := list of all current senders in the mempool, in an arbitrary order + bunchesOfTransactions := sourced from senders, nicely sorted by nonce + + // Holds selected transactions + selectedTransactions := empty + + // Holds not-yet-selected transactions, ordered by PPU + competitionHeap := empty + + for each bunch in bunchesOfTransactions: + competitionHeap.push(next available transaction from bunch) + + // Selection loop + while competitionHeap is not empty: + mostValuableTransaction := competitionHeap.pop() + + // Check if adding the next transaction exceeds limits + if selectedTransactions.totalGasLimit + mostValuableTransaction.gasLimit > gasRequested: + break + if selectedTransactions.length + 1 > maxNum: + break + + selectedTransactions.append(mostValuableTransaction) + + nextTransaction := next available transaction from the bunch of mostValuableTransaction + if nextTransaction exists: + competitionHeap.push(nextTransaction) + + return selectedTransactions +``` + +Thus, the mempool selects transactions using an efficient and value-driven algorithm that ensures the most valuable transactions (in terms of PPU) are prioritized while maintaining correct nonce sequencing per sender. The selection process is as follows: + +**Setup phase:** + + - **Snapshot of senders:** + - Before starting the selection loop, obtain a snapshot of all current senders in the mempool in an arbitrary order. + + - **Organize transactions into bunches:** + - For each sender, collect all their pending transactions and organize them into a "bunch." + - Each bunch is: + - **Sorted by nonce:** Transactions are ordered in ascending order based on their nonce values. + + - **Prepare the heap:** + - Extract the first transaction (lowest nonce) from each sender's bunch. + - Place these transactions onto a max heap, which is ordered based on the transaction's PPU. + +**Selection loop:** + + - **Iterative selection:** + - Continue the loop until either the total gas of selected transactions meets or exceeds `gasRequested`, or the number of selected transactions reaches `maxNum`. + - In each iteration: + - **Select the most valuable transaction:** + - Pop the transaction with the highest PPU from the heap. + - Append this transaction to the list of `selectedTransactions`. + - **Update the sender's bunch:** + - If the sender of the selected transaction has more transactions in their bunch: + - Take the next transaction (next higher nonce) from the bunch. + - Push this transaction onto the heap to compete in subsequent iterations. + - This process ensures that at each step, the most valuable transaction across all senders is selected while maintaining proper nonce order for each sender. + + - **Early termination:** + - The selection loop can terminate early if either of the following conditions is satisfied before all transactions are processed: + - The accumulated gas of selected transactions meets or exceeds `gasRequested`. + - The number of selected transactions reaches `maxNum`. + +**Additional notes:** + - Within the selection loop, the current nonce of the sender is queried from the blockchain, lazily (when needed). + - If an initial nonce gap is detected, the sender is (completely) skipped in the current selection session. + - If a middle nonce gap is detected, the sender is skipped (from now on) in the current selection session. + - Transactions with nonces lower than the current nonce of the sender are skipped. + - Transactions having the same nonce as a previously selected one (in the scope of a sender) are skipped. Also see paragraph 5. + - Incorrectly guarded transactions are skipped. + - Once the accumulated fees of selected transactions of a given sender exceed the sender's balance, the sender is skipped (from now one). + + +### Paragraph 5 + +On the node's side, the selected transactions are shuffled using a deterministic algorithm. This shuffling ensures that the transaction order remains unpredictable to the proposer, effectively preventing _front-running attacks_. Therefore, being selected first by the mempool does not guarantee that a transaction will be included first in the block. Additionally, selection by the mempool does not ensure inclusion in the very next block, as the proposer has the final authority on which transactions to include, based on **the remaining space available** in the block. + +### Order of transactions of the same sender + +Transactions from the same sender are organized based on specific rules to ensure proper sequencing for the selection flow: + +1. **Nonce ascending**: transactions are primarily sorted by their nonce values in ascending order. This sequence ensures that the transactions are processed in the order intended by the sender, as the nonce represents the transaction number in the sender's sequence. + +2. **Gas price descending (same nonce)**: if multiple transactions share the same nonce, they are sorted by their gas prices in descending order - transactions offering higher gas prices are prioritized. This mechanism allows one to easily override a pending transaction with a higher gas price. + +3. **Hash ascending (same nonce and gas price)**: for transactions that have identical nonce and gas price, the tie is broken by sorting them based on their transaction hash in ascending order. This provides a consistent and deterministic ordering when other factors are equal. While this ordering isn't a critical aspect of the mempool's operation, it ensures logical consistency. diff --git a/txcache/config.go b/txcache/config.go new file mode 100644 index 0000000000..1e371d41c0 --- /dev/null +++ b/txcache/config.go @@ -0,0 +1,118 @@ +package txcache + +import ( + "encoding/json" + "fmt" + + "github.com/multiversx/mx-chain-storage-go/common" +) + +const numChunksLowerBound = 1 +const numChunksUpperBound = 128 +const maxNumItemsLowerBound = 4 +const maxNumBytesLowerBound = maxNumItemsLowerBound * 1 +const maxNumBytesUpperBound = 1_073_741_824 // one GB +const maxNumItemsPerSenderLowerBound = 1 +const maxNumBytesPerSenderLowerBound = maxNumItemsPerSenderLowerBound * 1 +const maxNumBytesPerSenderUpperBound = 33_554_432 // 32 MB +const numItemsToPreemptivelyEvictLowerBound = uint32(1) + +// ConfigSourceMe holds cache configuration +type ConfigSourceMe struct { + Name string + NumChunks uint32 + EvictionEnabled bool + NumBytesThreshold uint32 + NumBytesPerSenderThreshold uint32 + CountThreshold uint32 + CountPerSenderThreshold uint32 + NumItemsToPreemptivelyEvict uint32 +} + +type senderConstraints struct { + maxNumTxs uint32 + maxNumBytes uint32 +} + +func (config *ConfigSourceMe) verify() error { + if len(config.Name) == 0 { + return fmt.Errorf("%w: config.Name is invalid", common.ErrInvalidConfig) + } + if config.NumChunks < numChunksLowerBound || config.NumChunks > numChunksUpperBound { + return fmt.Errorf("%w: config.NumChunks is invalid", common.ErrInvalidConfig) + } + if config.NumBytesPerSenderThreshold < maxNumBytesPerSenderLowerBound || config.NumBytesPerSenderThreshold > maxNumBytesPerSenderUpperBound { + return fmt.Errorf("%w: config.NumBytesPerSenderThreshold is invalid", common.ErrInvalidConfig) + } + if config.CountPerSenderThreshold < maxNumItemsPerSenderLowerBound { + return fmt.Errorf("%w: config.CountPerSenderThreshold is invalid", common.ErrInvalidConfig) + } + + if config.NumBytesThreshold < maxNumBytesLowerBound || config.NumBytesThreshold > maxNumBytesUpperBound { + return fmt.Errorf("%w: config.NumBytesThreshold is invalid", common.ErrInvalidConfig) + } + if config.CountThreshold < maxNumItemsLowerBound { + return fmt.Errorf("%w: config.CountThreshold is invalid", common.ErrInvalidConfig) + } + if config.NumItemsToPreemptivelyEvict < numItemsToPreemptivelyEvictLowerBound { + return fmt.Errorf("%w: config.NumItemsToPreemptivelyEvict is invalid", common.ErrInvalidConfig) + } + + return nil +} + +func (config *ConfigSourceMe) getSenderConstraints() senderConstraints { + return senderConstraints{ + maxNumBytes: config.NumBytesPerSenderThreshold, + maxNumTxs: config.CountPerSenderThreshold, + } +} + +// String returns a readable representation of the object +func (config *ConfigSourceMe) String() string { + bytes, err := json.Marshal(config) + if err != nil { + log.Error("ConfigSourceMe.String", "err", err) + } + + return string(bytes) +} + +// ConfigDestinationMe holds cache configuration +type ConfigDestinationMe struct { + Name string + NumChunks uint32 + MaxNumItems uint32 + MaxNumBytes uint32 + NumItemsToPreemptivelyEvict uint32 +} + +func (config *ConfigDestinationMe) verify() error { + if len(config.Name) == 0 { + return fmt.Errorf("%w: config.Name is invalid", common.ErrInvalidConfig) + } + if config.NumChunks < numChunksLowerBound || config.NumChunks > numChunksUpperBound { + return fmt.Errorf("%w: config.NumChunks is invalid", common.ErrInvalidConfig) + } + if config.MaxNumItems < maxNumItemsLowerBound { + return fmt.Errorf("%w: config.MaxNumItems is invalid", common.ErrInvalidConfig) + } + if config.MaxNumBytes < maxNumBytesLowerBound || config.MaxNumBytes > maxNumBytesUpperBound { + return fmt.Errorf("%w: config.MaxNumBytes is invalid", common.ErrInvalidConfig) + } + if config.NumItemsToPreemptivelyEvict < numItemsToPreemptivelyEvictLowerBound { + return fmt.Errorf("%w: config.NumItemsToPreemptivelyEvict is invalid", common.ErrInvalidConfig) + } + + return nil +} + +// String returns a readable representation of the object +func (config *ConfigDestinationMe) String() string { + bytes, err := json.Marshal(config) + if err != nil { + log.Error("ConfigDestinationMe.String", "err", err) + } + + return string(bytes) +} diff --git a/txcache/constants.go b/txcache/constants.go new file mode 100644 index 0000000000..fe5f19939f --- /dev/null +++ b/txcache/constants.go @@ -0,0 +1,5 @@ +package txcache + +const diagnosisMaxTransactionsToDisplay = 10000 +const initialCapacityOfSelectionSlice = 30000 +const selectionLoopDurationCheckInterval = 10 diff --git a/txcache/crossTxCache.go b/txcache/crossTxCache.go new file mode 100644 index 0000000000..1a64e77b9a --- /dev/null +++ b/txcache/crossTxCache.go @@ -0,0 +1,123 @@ +package txcache + +import ( + "github.com/multiversx/mx-chain-storage-go/immunitycache" + "github.com/multiversx/mx-chain-storage-go/types" +) + +var _ types.Cacher = (*CrossTxCache)(nil) + +// CrossTxCache holds cross-shard transactions (where destination == me) +type CrossTxCache struct { + *immunitycache.ImmunityCache + config ConfigDestinationMe +} + +// NewCrossTxCache creates a new transactions cache +func NewCrossTxCache(config ConfigDestinationMe) (*CrossTxCache, error) { + log.Debug("NewCrossTxCache", "config", config.String()) + + err := config.verify() + if err != nil { + return nil, err + } + + immunityCacheConfig := immunitycache.CacheConfig{ + Name: config.Name, + NumChunks: config.NumChunks, + MaxNumBytes: config.MaxNumBytes, + MaxNumItems: config.MaxNumItems, + NumItemsToPreemptivelyEvict: config.NumItemsToPreemptivelyEvict, + } + + immunityCache, err := immunitycache.NewImmunityCache(immunityCacheConfig) + if err != nil { + return nil, err + } + + cache := CrossTxCache{ + ImmunityCache: immunityCache, + config: config, + } + + return &cache, nil +} + +// ImmunizeTxsAgainstEviction marks items as non-evictable +func (cache *CrossTxCache) ImmunizeTxsAgainstEviction(keys [][]byte) { + numNow, numFuture := cache.ImmunityCache.ImmunizeKeys(keys) + log.Trace("CrossTxCache.ImmunizeTxsAgainstEviction", + "name", cache.config.Name, + "len(keys)", len(keys), + "numNow", numNow, + "numFuture", numFuture, + ) + cache.Diagnose(false) +} + +// AddTx adds a transaction in the cache +func (cache *CrossTxCache) AddTx(tx *WrappedTransaction) (has, added bool) { + log.Trace("CrossTxCache.AddTx", "name", cache.config.Name, "txHash", tx.TxHash) + return cache.HasOrAdd(tx.TxHash, tx, int(tx.Size)) +} + +// GetByTxHash gets the transaction by hash +func (cache *CrossTxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { + item, ok := cache.ImmunityCache.Get(txHash) + if !ok { + return nil, false + } + tx, ok := item.(*WrappedTransaction) + if !ok { + return nil, false + } + + return tx, true +} + +// Get returns the unwrapped payload of a TransactionWrapper +// Implemented for compatibility reasons (see txPoolsCleaner.go). +func (cache *CrossTxCache) Get(key []byte) (value interface{}, ok bool) { + wrapped, ok := cache.GetByTxHash(key) + if !ok { + return nil, false + } + + return wrapped.Tx, true +} + +// Peek returns the unwrapped payload of a TransactionWrapper +// Implemented for compatibility reasons (see transactions.go, common.go). +func (cache *CrossTxCache) Peek(key []byte) (value interface{}, ok bool) { + return cache.Get(key) +} + +// RemoveTxByHash removes tx by hash +func (cache *CrossTxCache) RemoveTxByHash(txHash []byte) bool { + log.Trace("CrossTxCache.RemoveTxByHash", "name", cache.config.Name, "txHash", txHash) + return cache.RemoveWithResult(txHash) +} + +// ForEachTransaction iterates over the transactions in the cache +func (cache *CrossTxCache) ForEachTransaction(function ForEachTransaction) { + cache.ForEachItem(func(key []byte, item interface{}) { + tx, ok := item.(*WrappedTransaction) + if !ok { + return + } + + function(key, tx) + }) +} + +// GetTransactionsPoolForSender returns an empty slice, only to respect the interface +// CrossTxCache does not support transaction selection (not applicable, since transactions are already half-executed), +// thus does not handle nonces, nonce gaps etc. +func (cache *CrossTxCache) GetTransactionsPoolForSender(_ string) []*WrappedTransaction { + return make([]*WrappedTransaction, 0) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cache *CrossTxCache) IsInterfaceNil() bool { + return cache == nil +} diff --git a/txcache/crossTxCache_test.go b/txcache/crossTxCache_test.go new file mode 100644 index 0000000000..eca4a64b71 --- /dev/null +++ b/txcache/crossTxCache_test.go @@ -0,0 +1,83 @@ +package txcache + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCrossTxCache_DoImmunizeTxsAgainstEviction(t *testing.T) { + cache := newCrossTxCacheToTest(1, 8, math.MaxUint16) + + cache.addTestTxs("a", "b", "c", "d") + numNow, numFuture := cache.ImmunizeKeys(hashesAsBytes([]string{"a", "b", "e", "f"})) + require.Equal(t, 2, numNow) + require.Equal(t, 2, numFuture) + require.Equal(t, 4, cache.Len()) + + cache.addTestTxs("e", "f", "g", "h") + require.ElementsMatch(t, []string{"a", "b", "c", "d", "e", "f", "g", "h"}, hashesAsStrings(cache.Keys())) + + cache.addTestTxs("i", "j", "k", "l") + require.ElementsMatch(t, []string{"a", "b", "e", "f", "i", "j", "k", "l"}, hashesAsStrings(cache.Keys())) +} + +func TestCrossTxCache_Get(t *testing.T) { + cache := newCrossTxCacheToTest(1, 8, math.MaxUint16) + + cache.addTestTxs("a", "b", "c", "d") + a, ok := cache.GetByTxHash([]byte("a")) + require.True(t, ok) + require.NotNil(t, a) + + x, ok := cache.GetByTxHash([]byte("x")) + require.False(t, ok) + require.Nil(t, x) + + aTx, ok := cache.Get([]byte("a")) + require.True(t, ok) + require.NotNil(t, aTx) + require.Equal(t, a.Tx, aTx) + + xTx, ok := cache.Get([]byte("x")) + require.False(t, ok) + require.Nil(t, xTx) + + aTx, ok = cache.Peek([]byte("a")) + require.True(t, ok) + require.NotNil(t, aTx) + require.Equal(t, a.Tx, aTx) + + xTx, ok = cache.Peek([]byte("x")) + require.False(t, ok) + require.Nil(t, xTx) + + require.Equal(t, make([]*WrappedTransaction, 0), cache.GetTransactionsPoolForSender("")) +} + +func newCrossTxCacheToTest(numChunks uint32, maxNumItems uint32, numMaxBytes uint32) *CrossTxCache { + cache, err := NewCrossTxCache(ConfigDestinationMe{ + Name: "test", + NumChunks: numChunks, + MaxNumItems: maxNumItems, + MaxNumBytes: numMaxBytes, + NumItemsToPreemptivelyEvict: numChunks * 1, + }) + if err != nil { + panic(fmt.Sprintf("newCrossTxCacheToTest(): %s", err)) + } + + return cache +} + +func (cache *CrossTxCache) addTestTxs(hashes ...string) { + for _, hash := range hashes { + _, _ = cache.addTestTx(hash) + } +} + +func (cache *CrossTxCache) addTestTx(hash string) (ok, added bool) { + return cache.AddTx(createTx([]byte(hash), ".", uint64(42))) +} diff --git a/txcache/diagnosis.go b/txcache/diagnosis.go new file mode 100644 index 0000000000..df2a99fe61 --- /dev/null +++ b/txcache/diagnosis.go @@ -0,0 +1,120 @@ +package txcache + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + logger "github.com/multiversx/mx-chain-logger-go" +) + +type printedTransaction struct { + Hash string `json:"hash"` + PPU uint64 `json:"ppu"` + Nonce uint64 `json:"nonce"` + Sender string `json:"sender"` + GasPrice uint64 `json:"gasPrice"` + GasLimit uint64 `json:"gasLimit"` + Receiver string `json:"receiver"` + DataLength int `json:"dataLength"` +} + +// Diagnose checks the state of the cache for inconsistencies and displays a summary, senders and transactions. +func (cache *TxCache) Diagnose(_ bool) { + cache.diagnoseCounters() + cache.diagnoseTransactions() +} + +func (cache *TxCache) diagnoseCounters() { + if log.GetLevel() > logger.LogDebug { + return + } + + sizeInBytes := cache.NumBytes() + numTxsEstimate := int(cache.CountTx()) + numTxsInChunks := cache.txByHash.backingMap.Count() + txsKeys := cache.txByHash.backingMap.Keys() + numSendersEstimate := int(cache.CountSenders()) + numSendersInChunks := cache.txListBySender.backingMap.Count() + sendersKeys := cache.txListBySender.backingMap.Keys() + + fine := numSendersEstimate == numSendersInChunks + fine = fine && (int(numSendersEstimate) == len(sendersKeys)) + fine = fine && (numTxsEstimate == numTxsInChunks && numTxsEstimate == len(txsKeys)) + + log.Debug("diagnoseCounters", + "fine", fine, + "numTxsEstimate", numTxsEstimate, + "numTxsInChunks", numTxsInChunks, + "len(txsKeys)", len(txsKeys), + "sizeInBytes", sizeInBytes, + "numBytesThreshold", cache.config.NumBytesThreshold, + "numSendersEstimate", numSendersEstimate, + "numSendersInChunks", numSendersInChunks, + "len(sendersKeys)", len(sendersKeys), + ) +} + +func (cache *TxCache) diagnoseTransactions() { + if logDiagnoseTransactions.GetLevel() > logger.LogTrace { + return + } + + transactions := cache.getAllTransactions() + if len(transactions) == 0 { + return + } + + numToDisplay := core.MinInt(diagnosisMaxTransactionsToDisplay, len(transactions)) + logDiagnoseTransactions.Trace("diagnoseTransactions", "numTransactions", len(transactions), "numToDisplay", numToDisplay) + logDiagnoseTransactions.Trace(marshalTransactionsToNewlineDelimitedJSON(transactions[:numToDisplay], "diagnoseTransactions")) +} + +// marshalTransactionsToNewlineDelimitedJSON converts a list of transactions to a newline-delimited JSON string. +// Note: each line is indexed, to improve readability. The index is easily removable if separate analysis is needed. +func marshalTransactionsToNewlineDelimitedJSON(transactions []*WrappedTransaction, linePrefix string) string { + builder := strings.Builder{} + builder.WriteString("\n") + + for i, wrappedTx := range transactions { + printedTx := convertWrappedTransactionToPrintedTransaction(wrappedTx) + printedTxJSON, _ := json.Marshal(printedTx) + + builder.WriteString(fmt.Sprintf("%s#%d: ", linePrefix, i)) + builder.WriteString(string(printedTxJSON)) + builder.WriteString("\n") + } + + builder.WriteString("\n") + return builder.String() +} + +func convertWrappedTransactionToPrintedTransaction(wrappedTx *WrappedTransaction) *printedTransaction { + transaction := wrappedTx.Tx + + return &printedTransaction{ + Hash: hex.EncodeToString(wrappedTx.TxHash), + Nonce: transaction.GetNonce(), + Receiver: hex.EncodeToString(transaction.GetRcvAddr()), + Sender: hex.EncodeToString(transaction.GetSndAddr()), + GasPrice: transaction.GetGasPrice(), + GasLimit: transaction.GetGasLimit(), + DataLength: len(transaction.GetData()), + PPU: wrappedTx.PricePerUnit, + } +} + +func displaySelectionOutcome(contextualLogger logger.Logger, linePrefix string, transactions []*WrappedTransaction) { + if contextualLogger.GetLevel() > logger.LogTrace { + return + } + + if len(transactions) > 0 { + contextualLogger.Trace("displaySelectionOutcome - transactions (as newline-separated JSON):") + contextualLogger.Trace(marshalTransactionsToNewlineDelimitedJSON(transactions, linePrefix)) + } else { + contextualLogger.Trace("displaySelectionOutcome - transactions: none") + } +} diff --git a/txcache/disabledCache.go b/txcache/disabledCache.go new file mode 100644 index 0000000000..d448ba5996 --- /dev/null +++ b/txcache/disabledCache.go @@ -0,0 +1,129 @@ +package txcache + +import ( + "github.com/multiversx/mx-chain-storage-go/types" +) + +var _ types.Cacher = (*DisabledCache)(nil) + +// DisabledCache represents a disabled cache +type DisabledCache struct { +} + +// NewDisabledCache creates a new disabled cache +func NewDisabledCache() *DisabledCache { + return &DisabledCache{} +} + +// AddTx does nothing +func (cache *DisabledCache) AddTx(_ *WrappedTransaction) (ok bool, added bool) { + return false, false +} + +// GetByTxHash returns no transaction +func (cache *DisabledCache) GetByTxHash(_ []byte) (*WrappedTransaction, bool) { + return nil, false +} + +// SelectTransactions returns an empty slice +func (cache *DisabledCache) SelectTransactions(uint64, int) ([]*WrappedTransaction, uint64) { + return make([]*WrappedTransaction, 0), 0 +} + +// RemoveTxByHash does nothing +func (cache *DisabledCache) RemoveTxByHash(_ []byte) bool { + return false +} + +// Len returns zero +func (cache *DisabledCache) Len() int { + return 0 +} + +// SizeInBytesContained returns 0 +func (cache *DisabledCache) SizeInBytesContained() uint64 { + return 0 +} + +// NumBytes returns zero +func (cache *DisabledCache) NumBytes() int { + return 0 +} + +// ForEachTransaction does nothing +func (cache *DisabledCache) ForEachTransaction(_ ForEachTransaction) { +} + +// Clear does nothing +func (cache *DisabledCache) Clear() { +} + +// Put does nothing +func (cache *DisabledCache) Put(_ []byte, _ interface{}, _ int) (evicted bool) { + return false +} + +// Get returns no transaction +func (cache *DisabledCache) Get(_ []byte) (value interface{}, ok bool) { + return nil, false +} + +// Has returns false +func (cache *DisabledCache) Has(_ []byte) bool { + return false +} + +// Peek returns no transaction +func (cache *DisabledCache) Peek(_ []byte) (value interface{}, ok bool) { + return nil, false +} + +// HasOrAdd returns false, does nothing +func (cache *DisabledCache) HasOrAdd(_ []byte, _ interface{}, _ int) (has, added bool) { + return false, false +} + +// Remove does nothing +func (cache *DisabledCache) Remove(_ []byte) { +} + +// Keys returns an empty slice +func (cache *DisabledCache) Keys() [][]byte { + return make([][]byte, 0) +} + +// MaxSize returns zero +func (cache *DisabledCache) MaxSize() int { + return 0 +} + +// RegisterHandler does nothing +func (cache *DisabledCache) RegisterHandler(func(key []byte, value interface{}), string) { +} + +// UnRegisterHandler does nothing +func (cache *DisabledCache) UnRegisterHandler(string) { +} + +// ImmunizeTxsAgainstEviction does nothing +func (cache *DisabledCache) ImmunizeTxsAgainstEviction(_ [][]byte) { +} + +// Diagnose does nothing +func (cache *DisabledCache) Diagnose(_ bool) { +} + +// GetTransactionsPoolForSender returns an empty slice +func (cache *DisabledCache) GetTransactionsPoolForSender(_ string) []*WrappedTransaction { + return make([]*WrappedTransaction, 0) +} + +// Close does nothing +func (cache *DisabledCache) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cache *DisabledCache) IsInterfaceNil() bool { + return cache == nil +} diff --git a/txcache/disabledCache_test.go b/txcache/disabledCache_test.go new file mode 100644 index 0000000000..9725a01eab --- /dev/null +++ b/txcache/disabledCache_test.go @@ -0,0 +1,68 @@ +package txcache + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDisabledCache_DoesNothing(t *testing.T) { + cache := NewDisabledCache() + + ok, added := cache.AddTx(nil) + require.False(t, ok) + require.False(t, added) + + tx, ok := cache.GetByTxHash([]byte{}) + require.Nil(t, tx) + require.False(t, ok) + + selection, accumulatedGas := cache.SelectTransactions(42, 42) + require.Equal(t, 0, len(selection)) + require.Equal(t, uint64(0), accumulatedGas) + + removed := cache.RemoveTxByHash([]byte{}) + require.False(t, removed) + + length := cache.Len() + require.Equal(t, 0, length) + + require.NotPanics(t, func() { cache.ForEachTransaction(func(_ []byte, _ *WrappedTransaction) {}) }) + + txs := cache.GetTransactionsPoolForSender("") + require.Equal(t, make([]*WrappedTransaction, 0), txs) + + cache.Clear() + + evicted := cache.Put(nil, nil, 0) + require.False(t, evicted) + + value, ok := cache.Get([]byte{}) + require.Nil(t, value) + require.False(t, ok) + + value, ok = cache.Peek([]byte{}) + require.Nil(t, value) + require.False(t, ok) + + has := cache.Has([]byte{}) + require.False(t, has) + + has, added = cache.HasOrAdd([]byte{}, nil, 0) + require.False(t, has) + require.False(t, added) + + cache.Remove([]byte{}) + + keys := cache.Keys() + require.Equal(t, 0, len(keys)) + + maxSize := cache.MaxSize() + require.Equal(t, 0, maxSize) + + require.NotPanics(t, func() { cache.RegisterHandler(func(_ []byte, _ interface{}) {}, "") }) + require.False(t, cache.IsInterfaceNil()) + + err := cache.Close() + require.Nil(t, err) +} diff --git a/txcache/errors.go b/txcache/errors.go new file mode 100644 index 0000000000..71ee0169f7 --- /dev/null +++ b/txcache/errors.go @@ -0,0 +1,8 @@ +package txcache + +import "errors" + +var errNilMempoolHost = errors.New("nil mempool host") +var errNilSelectionSession = errors.New("nil selection session") +var errItemAlreadyInCache = errors.New("item already in cache") +var errEmptyBunchOfTransactions = errors.New("empty bunch of transactions") diff --git a/txcache/eviction.go b/txcache/eviction.go new file mode 100644 index 0000000000..61d09cfb20 --- /dev/null +++ b/txcache/eviction.go @@ -0,0 +1,169 @@ +package txcache + +import ( + "container/heap" + + "github.com/multiversx/mx-chain-core-go/core" +) + +// evictionJournal keeps a short journal about the eviction process +// This is useful for debugging and reasoning about the eviction +type evictionJournal struct { + numEvicted int + numEvictedByPass []int +} + +// doEviction does cache eviction. +// We do not allow more evictions to start concurrently. +func (cache *TxCache) doEviction() *evictionJournal { + if cache.isEvictionInProgress.IsSet() { + return nil + } + + if !cache.isCapacityExceeded() { + return nil + } + + cache.evictionMutex.Lock() + defer cache.evictionMutex.Unlock() + + _ = cache.isEvictionInProgress.SetReturningPrevious() + defer cache.isEvictionInProgress.Reset() + + if !cache.isCapacityExceeded() { + return nil + } + + logRemove.Debug("doEviction: before eviction", + "num bytes", cache.NumBytes(), + "num txs", cache.CountTx(), + "num senders", cache.CountSenders(), + ) + + stopWatch := core.NewStopWatch() + stopWatch.Start("eviction") + + evictionJournal := cache.evictLeastLikelyToSelectTransactions() + + stopWatch.Stop("eviction") + + logRemove.Debug( + "doEviction: after eviction", + "num bytes", cache.NumBytes(), + "num now", cache.CountTx(), + "num senders", cache.CountSenders(), + "duration", stopWatch.GetMeasurement("eviction"), + "evicted txs", evictionJournal.numEvicted, + ) + + return evictionJournal +} + +func (cache *TxCache) isCapacityExceeded() bool { + exceeded := cache.areThereTooManyBytes() || cache.areThereTooManySenders() || cache.areThereTooManyTxs() + return exceeded +} + +func (cache *TxCache) areThereTooManyBytes() bool { + numBytes := cache.NumBytes() + tooManyBytes := numBytes > int(cache.config.NumBytesThreshold) + return tooManyBytes +} + +func (cache *TxCache) areThereTooManySenders() bool { + numSenders := cache.CountSenders() + tooManySenders := numSenders > uint64(cache.config.CountThreshold) + return tooManySenders +} + +func (cache *TxCache) areThereTooManyTxs() bool { + numTxs := cache.CountTx() + tooManyTxs := numTxs > uint64(cache.config.CountThreshold) + return tooManyTxs +} + +// Eviction tolerates concurrent transaction additions / removals. +func (cache *TxCache) evictLeastLikelyToSelectTransactions() *evictionJournal { + senders := cache.getSenders() + bunches := make([]bunchOfTransactions, 0, len(senders)) + + for _, sender := range senders { + // Include transactions after gaps, as well (important), unlike when selecting transactions for processing. + // Reverse the order of transactions (will come in handy later, when creating the min-heap). + bunch := sender.getTxsReversed() + bunches = append(bunches, bunch) + } + + journal := &evictionJournal{} + + // Heap is reused among passes. + // Items popped from the heap are added to "transactionsToEvict" (slice is re-created in each pass). + transactionsHeap := newMinTransactionsHeap(len(bunches)) + heap.Init(transactionsHeap) + + // Initialize the heap with the first transaction of each bunch + for _, bunch := range bunches { + item, err := newTransactionsHeapItem(bunch) + if err != nil { + continue + } + + // Items will be reused (see below). Each sender gets one (and only one) item in the heap. + heap.Push(transactionsHeap, item) + } + + for pass := 0; cache.isCapacityExceeded(); pass++ { + transactionsToEvict := make(bunchOfTransactions, 0, cache.config.NumItemsToPreemptivelyEvict) + transactionsToEvictHashes := make([][]byte, 0, cache.config.NumItemsToPreemptivelyEvict) + + // Select transactions (sorted). + for transactionsHeap.Len() > 0 { + // Always pick the "worst" transaction. + item := heap.Pop(transactionsHeap).(*transactionsHeapItem) + + if len(transactionsToEvict) >= int(cache.config.NumItemsToPreemptivelyEvict) { + // We have enough transactions to evict in this pass. + break + } + + transactionsToEvict = append(transactionsToEvict, item.currentTransaction) + transactionsToEvictHashes = append(transactionsToEvictHashes, item.currentTransaction.TxHash) + + // If there are more transactions in the same bunch (same sender as the popped item), + // add the next one to the heap (to compete with the others in being "the worst"). + // Item is reused (same originating sender), pushed back on the heap. + if item.gotoNextTransaction() { + heap.Push(transactionsHeap, item) + } + } + + if len(transactionsToEvict) == 0 { + // No more transactions to evict. + break + } + + // For each sender, find the "lowest" (in nonce) transaction to evict, + // so that we can remove all transactions with higher or equal nonces (of a sender) in one go (see below). + lowestToEvictBySender := make(map[string]uint64) + + for _, tx := range transactionsToEvict { + sender := string(tx.Tx.GetSndAddr()) + lowestToEvictBySender[sender] = tx.Tx.GetNonce() + } + + // Remove those transactions from "txListBySender". + for sender, nonce := range lowestToEvictBySender { + cache.txListBySender.removeTransactionsWithHigherOrEqualNonce([]byte(sender), nonce) + } + + // Remove those transactions from "txByHash". + _ = cache.txByHash.RemoveTxsBulk(transactionsToEvictHashes) + + journal.numEvictedByPass = append(journal.numEvictedByPass, len(transactionsToEvict)) + journal.numEvicted += len(transactionsToEvict) + + logRemove.Debug("evictLeastLikelyToSelectTransactions", "pass", pass, "num evicted", len(transactionsToEvict)) + } + + return journal +} diff --git a/txcache/eviction_test.go b/txcache/eviction_test.go new file mode 100644 index 0000000000..cbb911c0d9 --- /dev/null +++ b/txcache/eviction_test.go @@ -0,0 +1,227 @@ +package txcache + +import ( + "fmt" + "math" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" + "github.com/stretchr/testify/require" +) + +func TestTxCache_DoEviction_BecauseOfCount(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, + } + + host := txcachemocks.NewMempoolHostMock() + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + cache.AddTx(createTx([]byte("hash-alice"), "alice", 1).withGasPrice(1 * oneBillion)) + cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withGasPrice(2 * oneBillion)) + cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withGasPrice(3 * oneBillion)) + cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withGasPrice(4 * oneBillion)) + cache.AddTx(createTx([]byte("hash-dan"), "dan", 1).withGasPrice(5 * oneBillion)) + + journal := cache.doEviction() + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, []int{1}, journal.numEvictedByPass) + + // Alice and Bob evicted. Carol still there (better score). + _, ok := cache.GetByTxHash([]byte("hash-carol")) + require.True(t, ok) + require.Equal(t, uint64(4), cache.CountSenders()) + require.Equal(t, uint64(4), cache.CountTx()) +} + +func TestTxCache_DoEviction_BecauseOfSize(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, + } + + host := txcachemocks.NewMempoolHostMock() + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + cache.AddTx(createTx([]byte("hash-alice"), "alice", 1).withSize(256).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-bob"), "bob", 1).withSize(256).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-carol"), "carol", 1).withSize(256).withGasLimit(500000).withGasPrice(1.5 * oneBillion)) + cache.AddTx(createTx([]byte("hash-eve"), "eve", 1).withSize(256).withGasLimit(500000).withGasPrice(3 * oneBillion)) + + journal := cache.doEviction() + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, []int{1}, journal.numEvictedByPass) + + // Alice and Bob evicted (lower score). Carol and Eve still there. + _, ok := cache.GetByTxHash([]byte("hash-carol")) + require.True(t, ok) + _, ok = cache.GetByTxHash([]byte("hash-eve")) + require.True(t, ok) + require.Equal(t, uint64(3), cache.CountSenders()) + require.Equal(t, uint64(3), cache.CountTx()) +} + +func TestTxCache_DoEviction_DoesNothingWhenAlreadyInProgress(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 1, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, + } + + host := txcachemocks.NewMempoolHostMock() + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + _ = cache.isEvictionInProgress.SetReturningPrevious() + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", uint64(1))) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", uint64(2))) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", uint64(3))) + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", uint64(4))) + cache.AddTx(createTx([]byte("hash-alice-5"), "alice", uint64(5))) + + // Nothing is evicted because eviction is already in progress. + journal := cache.doEviction() + require.Nil(t, journal) + require.Equal(t, uint64(5), cache.CountTx()) + + cache.isEvictionInProgress.Reset() + + // Now eviction can happen. + journal = cache.doEviction() + require.NotNil(t, journal) + require.Equal(t, 1, journal.numEvicted) + require.Equal(t, 4, int(cache.CountTx())) +} + +func TestBenchmarkTxCache_DoEviction(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 300000, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 50000, + } + + host := txcachemocks.NewMempoolHostMock() + + sw := core.NewStopWatch() + + t.Run("numSenders = 35000, numTransactions = 10", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 35000, 10) + cache.config.EvictionEnabled = true + + require.Equal(t, uint64(350000), cache.CountTx()) + + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 50000, journal.numEvicted) + require.Equal(t, 1, len(journal.numEvictedByPass)) + }) + + t.Run("numSenders = 100000, numTransactions = 5", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 100000, 5) + cache.config.EvictionEnabled = true + + require.Equal(t, uint64(500000), cache.CountTx()) + + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 200000, journal.numEvicted) + require.Equal(t, 4, len(journal.numEvictedByPass)) + }) + + t.Run("numSenders = 400000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 400000, 1) + cache.config.EvictionEnabled = true + + require.Equal(t, uint64(400000), cache.CountTx()) + + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 100000, journal.numEvicted) + require.Equal(t, 2, len(journal.numEvictedByPass)) + }) + + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + cache.config.EvictionEnabled = false + addManyTransactionsWithUniformDistribution(cache, 10000, 100) + cache.config.EvictionEnabled = true + + require.Equal(t, uint64(1000000), cache.CountTx()) + + sw.Start(t.Name()) + journal := cache.doEviction() + sw.Stop(t.Name()) + + require.Equal(t, 700000, journal.numEvicted) + require.Equal(t, 14, len(journal.numEvictedByPass)) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.092625s (TestBenchmarkTxCache_DoEviction/numSenders_=_35000,_numTransactions_=_10) + // 0.426718s (TestBenchmarkTxCache_DoEviction/numSenders_=_100000,_numTransactions_=_5) + // 0.546757s (TestBenchmarkTxCache_DoEviction/numSenders_=_10000,_numTransactions_=_100) + // 0.542678s (TestBenchmarkTxCache_DoEviction/numSenders_=_400000,_numTransactions_=_1) +} diff --git a/txcache/interface.go b/txcache/interface.go new file mode 100644 index 0000000000..b6d0aee5d0 --- /dev/null +++ b/txcache/interface.go @@ -0,0 +1,25 @@ +package txcache + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-storage-go/types" +) + +// MempoolHost provides blockchain information for mempool operations +type MempoolHost interface { + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int + GetTransferredValue(tx data.TransactionHandler) *big.Int + IsInterfaceNil() bool +} + +// SelectionSession provides blockchain information for transaction selection +type SelectionSession interface { + GetAccountState(accountKey []byte) (*types.AccountState, error) + IsIncorrectlyGuarded(tx data.TransactionHandler) bool + IsInterfaceNil() bool +} + +// ForEachTransaction is an iterator callback +type ForEachTransaction func(txHash []byte, value *WrappedTransaction) diff --git a/txcache/loggers.go b/txcache/loggers.go new file mode 100644 index 0000000000..ecedbfec7b --- /dev/null +++ b/txcache/loggers.go @@ -0,0 +1,9 @@ +package txcache + +import logger "github.com/multiversx/mx-chain-logger-go" + +var log = logger.GetOrCreate("txcache/main") +var logAdd = logger.GetOrCreate("txcache/add") +var logRemove = logger.GetOrCreate("txcache/remove") +var logSelect = logger.GetOrCreate("txcache/select") +var logDiagnoseTransactions = logger.GetOrCreate("txcache/diagnose/transactions") diff --git a/txcache/maps/concurrentMap.go b/txcache/maps/concurrentMap.go new file mode 100644 index 0000000000..cb24048b43 --- /dev/null +++ b/txcache/maps/concurrentMap.go @@ -0,0 +1,180 @@ +package maps + +import ( + "sync" +) + +// This implementation is a simplified version of: +// https://github.com/multiversx/concurrent-map, which is based on: +// https://github.com/orcaman/concurrent-map + +// ConcurrentMap is a thread safe map of type string:Anything. +// To avoid lock bottlenecks this map is divided to several map chunks. +type ConcurrentMap struct { + mutex sync.RWMutex + nChunks uint32 + chunks []*concurrentMapChunk +} + +// concurrentMapChunk is a thread safe string to anything map. +type concurrentMapChunk struct { + items map[string]interface{} + mutex sync.RWMutex +} + +// NewConcurrentMap creates a new concurrent map. +func NewConcurrentMap(nChunks uint32) *ConcurrentMap { + // We cannot have a map with no chunks + if nChunks == 0 { + nChunks = 1 + } + + m := ConcurrentMap{ + nChunks: nChunks, + } + + m.initializeChunks() + + return &m +} + +func (m *ConcurrentMap) initializeChunks() { + // Assignment is not an atomic operation, so we have to wrap this in a critical section + m.mutex.Lock() + defer m.mutex.Unlock() + + m.chunks = make([]*concurrentMapChunk, m.nChunks) + + for i := uint32(0); i < m.nChunks; i++ { + m.chunks[i] = &concurrentMapChunk{ + items: make(map[string]interface{}), + } + } +} + +// Set sets the given value under the specified key. +func (m *ConcurrentMap) Set(key string, value interface{}) { + chunk := m.getChunk(key) + chunk.mutex.Lock() + chunk.items[key] = value + chunk.mutex.Unlock() +} + +// SetIfAbsent sets the given value under the specified key if no value was associated with it. +func (m *ConcurrentMap) SetIfAbsent(key string, value interface{}) bool { + chunk := m.getChunk(key) + chunk.mutex.Lock() + _, ok := chunk.items[key] + if !ok { + chunk.items[key] = value + } + chunk.mutex.Unlock() + return !ok +} + +// Get retrieves an element from map under given key. +func (m *ConcurrentMap) Get(key string) (interface{}, bool) { + chunk := m.getChunk(key) + chunk.mutex.RLock() + val, ok := chunk.items[key] + chunk.mutex.RUnlock() + return val, ok +} + +// Has looks up an item under specified key. +func (m *ConcurrentMap) Has(key string) bool { + chunk := m.getChunk(key) + chunk.mutex.RLock() + _, ok := chunk.items[key] + chunk.mutex.RUnlock() + return ok +} + +// Remove removes an element from the map. +func (m *ConcurrentMap) Remove(key string) (interface{}, bool) { + chunk := m.getChunk(key) + chunk.mutex.Lock() + defer chunk.mutex.Unlock() + + item := chunk.items[key] + delete(chunk.items, key) + return item, item != nil +} + +func (m *ConcurrentMap) getChunk(key string) *concurrentMapChunk { + m.mutex.RLock() + defer m.mutex.RUnlock() + return m.chunks[fnv32(key)%m.nChunks] +} + +// fnv32 implements https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function for 32 bits +func fnv32(key string) uint32 { + hash := uint32(2166136261) + const prime32 = uint32(16777619) + for i := 0; i < len(key); i++ { + hash *= prime32 + hash ^= uint32(key[i]) + } + return hash +} + +// Clear clears the map +func (m *ConcurrentMap) Clear() { + // There is no need to explicitly remove each item for each chunk + // The garbage collector will remove the data from memory + m.initializeChunks() +} + +// Count returns the number of elements within the map +func (m *ConcurrentMap) Count() int { + count := 0 + chunks := m.getChunks() + + for _, chunk := range chunks { + chunk.mutex.RLock() + count += len(chunk.items) + chunk.mutex.RUnlock() + } + return count +} + +// Keys returns all keys as []string +func (m *ConcurrentMap) Keys() []string { + count := m.Count() + chunks := m.getChunks() + + // count is not exact anymore, since we are in a different lock than the one aquired by Count() (but is a good approximation) + keys := make([]string, 0, count) + + for _, chunk := range chunks { + chunk.mutex.RLock() + for key := range chunk.items { + keys = append(keys, key) + } + chunk.mutex.RUnlock() + } + + return keys +} + +// IterCb is an iterator callback +type IterCb func(key string, v interface{}) + +// IterCb iterates over the map (cheapest way to read all elements in a map) +func (m *ConcurrentMap) IterCb(fn IterCb) { + chunks := m.getChunks() + + for _, chunk := range chunks { + chunk.mutex.RLock() + for key, value := range chunk.items { + fn(key, value) + } + chunk.mutex.RUnlock() + } +} + +func (m *ConcurrentMap) getChunks() []*concurrentMapChunk { + m.mutex.RLock() + defer m.mutex.RUnlock() + return m.chunks +} diff --git a/txcache/maps/concurrentMap_test.go b/txcache/maps/concurrentMap_test.go new file mode 100644 index 0000000000..705b87791e --- /dev/null +++ b/txcache/maps/concurrentMap_test.go @@ -0,0 +1,160 @@ +package maps + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewConcurrentMap(t *testing.T) { + myMap := NewConcurrentMap(4) + require.Equal(t, uint32(4), myMap.nChunks) + require.Equal(t, 4, len(myMap.chunks)) + + // 1 is minimum number of chunks + myMap = NewConcurrentMap(0) + require.Equal(t, uint32(1), myMap.nChunks) + require.Equal(t, 1, len(myMap.chunks)) +} + +func TestConcurrentMap_Get(t *testing.T) { + myMap := NewConcurrentMap(4) + myMap.Set("a", "foo") + myMap.Set("b", 42) + + a, ok := myMap.Get("a") + require.True(t, ok) + require.Equal(t, "foo", a) + + b, ok := myMap.Get("b") + require.True(t, ok) + require.Equal(t, 42, b) +} + +func TestConcurrentMap_Count(t *testing.T) { + myMap := NewConcurrentMap(4) + myMap.Set("a", "a") + myMap.Set("b", "b") + myMap.Set("c", "c") + + require.Equal(t, 3, myMap.Count()) +} + +func TestConcurrentMap_Keys(t *testing.T) { + myMap := NewConcurrentMap(4) + myMap.Set("1", 0) + myMap.Set("2", 0) + myMap.Set("3", 0) + myMap.Set("4", 0) + + require.Equal(t, 4, len(myMap.Keys())) +} + +func TestConcurrentMap_Has(t *testing.T) { + myMap := NewConcurrentMap(4) + myMap.SetIfAbsent("a", "a") + myMap.SetIfAbsent("b", "b") + + require.True(t, myMap.Has("a")) + require.True(t, myMap.Has("b")) + require.False(t, myMap.Has("c")) +} + +func TestConcurrentMap_Remove(t *testing.T) { + myMap := NewConcurrentMap(4) + myMap.SetIfAbsent("a", "a") + myMap.SetIfAbsent("b", "b") + + _, ok := myMap.Remove("b") + require.True(t, ok) + _, ok = myMap.Remove("x") + require.False(t, ok) + + require.True(t, myMap.Has("a")) + require.False(t, myMap.Has("b")) +} + +func TestConcurrentMap_Clear(t *testing.T) { + myMap := NewConcurrentMap(4) + myMap.SetIfAbsent("a", "a") + myMap.SetIfAbsent("b", "b") + + myMap.Clear() + + require.Equal(t, 0, myMap.Count()) +} + +func TestConcurrentMap_ClearConcurrentWithRead(t *testing.T) { + myMap := NewConcurrentMap(4) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + for j := 0; j < 1000; j++ { + myMap.Clear() + } + + wg.Done() + }() + + go func() { + for j := 0; j < 1000; j++ { + require.Equal(t, 0, myMap.Count()) + require.Len(t, myMap.Keys(), 0) + require.Equal(t, false, myMap.Has("foobar")) + item, ok := myMap.Get("foobar") + require.Nil(t, item) + require.False(t, ok) + myMap.IterCb(func(key string, item interface{}) { + }) + } + + wg.Done() + }() + + wg.Wait() +} + +func TestConcurrentMap_ClearConcurrentWithWrite(t *testing.T) { + myMap := NewConcurrentMap(4) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + for j := 0; j < 10000; j++ { + myMap.Clear() + } + + wg.Done() + }() + + go func() { + for j := 0; j < 10000; j++ { + myMap.Set("foobar", "foobar") + myMap.SetIfAbsent("foobar", "foobar") + _, _ = myMap.Remove("foobar") + } + + wg.Done() + }() + + wg.Wait() +} + +func TestConcurrentMap_IterCb(t *testing.T) { + myMap := NewConcurrentMap(4) + + myMap.Set("a", "a") + myMap.Set("b", "b") + myMap.Set("c", "c") + + i := 0 + myMap.IterCb(func(key string, value interface{}) { + i++ + }) + + require.Equal(t, 3, i) +} diff --git a/txcache/selection.go b/txcache/selection.go new file mode 100644 index 0000000000..10e81bffde --- /dev/null +++ b/txcache/selection.go @@ -0,0 +1,125 @@ +package txcache + +import ( + "container/heap" + "time" +) + +func (cache *TxCache) doSelectTransactions(session SelectionSession, gasRequested uint64, maxNum int, selectionLoopMaximumDuration time.Duration) (bunchOfTransactions, uint64) { + bunches := cache.acquireBunchesOfTransactions() + + return selectTransactionsFromBunches(session, bunches, gasRequested, maxNum, selectionLoopMaximumDuration) +} + +func (cache *TxCache) acquireBunchesOfTransactions() []bunchOfTransactions { + senders := cache.getSenders() + bunches := make([]bunchOfTransactions, 0, len(senders)) + + for _, sender := range senders { + bunches = append(bunches, sender.getTxs()) + } + + return bunches +} + +// Selection tolerates concurrent transaction additions / removals. +func selectTransactionsFromBunches(session SelectionSession, bunches []bunchOfTransactions, gasRequested uint64, maxNum int, selectionLoopMaximumDuration time.Duration) (bunchOfTransactions, uint64) { + selectedTransactions := make(bunchOfTransactions, 0, initialCapacityOfSelectionSlice) + sessionWrapper := newSelectionSessionWrapper(session) + + // Items popped from the heap are added to "selectedTransactions". + transactionsHeap := newMaxTransactionsHeap(len(bunches)) + heap.Init(transactionsHeap) + + // Initialize the heap with the first transaction of each bunch + for _, bunch := range bunches { + item, err := newTransactionsHeapItem(bunch) + if err != nil { + continue + } + + // Items will be reused (see below). Each sender gets one (and only one) item in the heap. + heap.Push(transactionsHeap, item) + } + + accumulatedGas := uint64(0) + selectionLoopStartTime := time.Now() + + // Select transactions (sorted). + for transactionsHeap.Len() > 0 { + // Always pick the best transaction. + item := heap.Pop(transactionsHeap).(*transactionsHeapItem) + gasLimit := item.currentTransaction.Tx.GetGasLimit() + + if accumulatedGas+gasLimit > gasRequested { + break + } + if len(selectedTransactions) >= maxNum { + break + } + if len(selectedTransactions)%selectionLoopDurationCheckInterval == 0 { + if time.Since(selectionLoopStartTime) > selectionLoopMaximumDuration { + logSelect.Debug("TxCache.selectTransactionsFromBunches, selection loop timeout", "duration", time.Since(selectionLoopStartTime)) + break + } + } + + shouldSkipSender := detectSkippableSender(sessionWrapper, item) + if shouldSkipSender { + // Item was popped from the heap, but not used downstream. + // Therefore, the sender is completely ignored (from now on) in the current selection session. + continue + } + + shouldSkipTransaction := detectSkippableTransaction(sessionWrapper, item) + if !shouldSkipTransaction { + accumulatedGas += gasLimit + selectedTransaction := item.selectCurrentTransaction() + selectedTransactions = append(selectedTransactions, selectedTransaction) + sessionWrapper.accumulateConsumedBalance(selectedTransaction) + } + + // If there are more transactions in the same bunch (same sender as the popped item), + // add the next one to the heap (to compete with the others). + // Heap item is reused (same originating sender), pushed back on the heap. + if item.gotoNextTransaction() { + heap.Push(transactionsHeap, item) + } + } + + return selectedTransactions, accumulatedGas +} + +// Note (future micro-optimization): we can merge "detectSkippableSender()" and "detectSkippableTransaction()" into a single function, +// any share the result of "sessionWrapper.getNonce()". +func detectSkippableSender(sessionWrapper *selectionSessionWrapper, item *transactionsHeapItem) bool { + nonce := sessionWrapper.getNonce(item.sender) + + if item.detectInitialGap(nonce) { + return true + } + if item.detectMiddleGap() { + return true + } + if sessionWrapper.detectWillFeeExceedBalance(item.currentTransaction) { + return true + } + + return false +} + +func detectSkippableTransaction(sessionWrapper *selectionSessionWrapper, item *transactionsHeapItem) bool { + nonce := sessionWrapper.getNonce(item.sender) + + if item.detectLowerNonce(nonce) { + return true + } + if item.detectIncorrectlyGuarded(sessionWrapper) { + return true + } + if item.detectNonceDuplicate() { + return true + } + + return false +} diff --git a/txcache/selectionSessionWrapper.go b/txcache/selectionSessionWrapper.go new file mode 100644 index 0000000000..d80cb1a2f6 --- /dev/null +++ b/txcache/selectionSessionWrapper.go @@ -0,0 +1,105 @@ +package txcache + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/data" +) + +// After moving "mx-chain-storage-go/txcache" into "mx-chain-go", maybe merge this component into "SelectionSession". +type selectionSessionWrapper struct { + session SelectionSession + recordsByAddress map[string]*accountRecord +} + +type accountRecord struct { + initialNonce uint64 + initialBalance *big.Int + consumedBalance *big.Int +} + +func newSelectionSessionWrapper(session SelectionSession) *selectionSessionWrapper { + return &selectionSessionWrapper{ + session: session, + recordsByAddress: make(map[string]*accountRecord), + } +} + +func (sessionWrapper *selectionSessionWrapper) getAccountRecord(address []byte) *accountRecord { + record, ok := sessionWrapper.recordsByAddress[string(address)] + if ok { + return record + } + + state, err := sessionWrapper.session.GetAccountState(address) + if err != nil { + logSelect.Debug("selectionSessionWrapper.getAccountRecord, could not retrieve account state", "address", address, "err", err) + + record = &accountRecord{ + initialNonce: 0, + initialBalance: big.NewInt(0), + consumedBalance: big.NewInt(0), + } + } else { + record = &accountRecord{ + initialNonce: state.Nonce, + initialBalance: state.Balance, + consumedBalance: big.NewInt(0), + } + } + + sessionWrapper.recordsByAddress[string(address)] = record + return record +} + +func (sessionWrapper *selectionSessionWrapper) getNonce(address []byte) uint64 { + return sessionWrapper.getAccountRecord(address).initialNonce +} + +func (sessionWrapper *selectionSessionWrapper) accumulateConsumedBalance(tx *WrappedTransaction) { + sender := tx.Tx.GetSndAddr() + feePayer := tx.FeePayer + + senderRecord := sessionWrapper.getAccountRecord(sender) + feePayerRecord := sessionWrapper.getAccountRecord(feePayer) + + transferredValue := tx.TransferredValue + if transferredValue != nil { + senderRecord.consumedBalance.Add(senderRecord.consumedBalance, transferredValue) + } + + fee := tx.Fee + if fee != nil { + feePayerRecord.consumedBalance.Add(feePayerRecord.consumedBalance, fee) + } +} + +func (sessionWrapper *selectionSessionWrapper) detectWillFeeExceedBalance(tx *WrappedTransaction) bool { + fee := tx.Fee + if fee == nil { + return false + } + + // Here, we are not interested into an eventual transfer of value (we only check if there's enough balance to pay the transaction fee). + feePayer := tx.FeePayer + feePayerRecord := sessionWrapper.getAccountRecord(feePayer) + + futureConsumedBalance := new(big.Int).Add(feePayerRecord.consumedBalance, fee) + feePayerBalance := feePayerRecord.initialBalance + + willFeeExceedBalance := futureConsumedBalance.Cmp(feePayerBalance) > 0 + if willFeeExceedBalance { + logSelect.Trace("selectionSessionWrapper.detectWillFeeExceedBalance", + "tx", tx.TxHash, + "feePayer", feePayer, + "initialBalance", feePayerRecord.initialBalance, + "consumedBalance", feePayerRecord.consumedBalance, + ) + } + + return willFeeExceedBalance +} + +func (sessionWrapper *selectionSessionWrapper) isIncorrectlyGuarded(tx data.TransactionHandler) bool { + return sessionWrapper.session.IsIncorrectlyGuarded(tx) +} diff --git a/txcache/selectionSessionWrapper_test.go b/txcache/selectionSessionWrapper_test.go new file mode 100644 index 0000000000..73c1647232 --- /dev/null +++ b/txcache/selectionSessionWrapper_test.go @@ -0,0 +1,318 @@ +package txcache + +import ( + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" + "github.com/stretchr/testify/require" +) + +func TestSelectionSessionWrapper_accumulateConsumedBalance(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + t.Run("when sender is fee payer", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + a := createTx([]byte("a-7"), "a", 7) + b := createTx([]byte("a-8"), "a", 8).withValue(oneQuintillionBig) + + a.precomputeFields(host) + b.precomputeFields(host) + + sessionWrapper.accumulateConsumedBalance(a) + require.Equal(t, "50000000000000", sessionWrapper.getAccountRecord([]byte("a")).consumedBalance.String()) + + sessionWrapper.accumulateConsumedBalance(b) + require.Equal(t, "1000100000000000000", sessionWrapper.getAccountRecord([]byte("a")).consumedBalance.String()) + }) + + t.Run("when relayer is fee payer", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + a := createTx([]byte("a-7"), "a", 7).withRelayer([]byte("b")).withGasLimit(100_000) + b := createTx([]byte("a-8"), "a", 8).withValue(oneQuintillionBig).withRelayer([]byte("b")).withGasLimit(100_000) + + a.precomputeFields(host) + b.precomputeFields(host) + + sessionWrapper.accumulateConsumedBalance(a) + require.Equal(t, "0", sessionWrapper.getAccountRecord([]byte("a")).consumedBalance.String()) + require.Equal(t, "100000000000000", sessionWrapper.getAccountRecord([]byte("b")).consumedBalance.String()) + + sessionWrapper.accumulateConsumedBalance(b) + require.Equal(t, "1000000000000000000", sessionWrapper.getAccountRecord([]byte("a")).consumedBalance.String()) + require.Equal(t, "200000000000000", sessionWrapper.getAccountRecord([]byte("b")).consumedBalance.String()) + }) +} + +func TestSelectionSessionWrapper_detectWillFeeExceedBalance(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + t.Run("unknown", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + a := createTx([]byte("tx-1"), "alice", 42) + a.precomputeFields(host) + + require.False(t, sessionWrapper.detectWillFeeExceedBalance(a)) + }) + + t.Run("will not exceed for (a) and (b), but will exceed for (c)", func(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43).withValue(oneQuintillionBig) + c := createTx([]byte("tx-3"), "alice", 44).withValue(oneQuintillionBig) + + a.precomputeFields(host) + b.precomputeFields(host) + c.precomputeFields(host) + + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + session.SetBalance([]byte("alice"), big.NewInt(oneQuintillion+50000000000000+1)) + recordAlice := sessionWrapper.getAccountRecord([]byte("alice")) + require.Equal(t, "0", recordAlice.consumedBalance.String()) + require.Equal(t, "1000050000000000001", recordAlice.initialBalance.String()) + + require.False(t, sessionWrapper.detectWillFeeExceedBalance(a)) + + sessionWrapper.accumulateConsumedBalance(a) + + require.Equal(t, "50000000000000", recordAlice.consumedBalance.String()) + + // Even though, in reality, that will be an invalid (but executable) transaction (insufficient balance). + require.False(t, sessionWrapper.detectWillFeeExceedBalance(b)) + + sessionWrapper.accumulateConsumedBalance(b) + + require.Equal(t, "1000100000000000000", recordAlice.consumedBalance.String()) + require.True(t, sessionWrapper.detectWillFeeExceedBalance(c)) + }) + + t.Run("will not exceed for (a) and (b), but will exceed for (c) (with relayed)", func(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42).withRelayer([]byte("carol")).withGasLimit(100_000) + b := createTx([]byte("tx-2"), "alice", 43).withValue(oneQuintillionBig).withRelayer([]byte("carol")).withGasLimit(100_000) + c := createTx([]byte("tx-3"), "alice", 44).withValue(oneQuintillionBig).withRelayer([]byte("carol")).withGasLimit(100_000) + + a.precomputeFields(host) + b.precomputeFields(host) + c.precomputeFields(host) + + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + session.SetBalance([]byte("alice"), big.NewInt(oneQuintillion)) + session.SetBalance([]byte("carol"), big.NewInt(100000000000000+100000000000000+1)) + recordAlice := sessionWrapper.getAccountRecord([]byte("alice")) + recordCarol := sessionWrapper.getAccountRecord([]byte("carol")) + require.Equal(t, "0", recordAlice.consumedBalance.String()) + require.Equal(t, "1000000000000000000", recordAlice.initialBalance.String()) + require.Equal(t, "0", recordCarol.consumedBalance.String()) + require.Equal(t, "200000000000001", recordCarol.initialBalance.String()) + + require.False(t, sessionWrapper.detectWillFeeExceedBalance(a)) + + sessionWrapper.accumulateConsumedBalance(a) + + require.Equal(t, "0", recordAlice.consumedBalance.String()) + require.Equal(t, "100000000000000", recordCarol.consumedBalance.String()) + + require.False(t, sessionWrapper.detectWillFeeExceedBalance(b)) + + sessionWrapper.accumulateConsumedBalance(b) + + require.Equal(t, "1000000000000000000", recordAlice.consumedBalance.String()) + require.Equal(t, "200000000000000", recordCarol.consumedBalance.String()) + require.True(t, sessionWrapper.detectWillFeeExceedBalance(c)) + }) +} + +func TestBenchmarkSelectionSessionWrapper_getNonce(t *testing.T) { + sw := core.NewStopWatch() + + t.Run("numAccounts = 300, numTransactionsPerAccount = 100", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + numAccounts := 300 + numTransactionsPerAccount := 100 + // See "detectSkippableSender()" and "detectSkippableTransaction()". + numCallsGetNoncePerTransaction := 2 + numCallsGetNoncePerAccount := numTransactionsPerAccount * numCallsGetNoncePerTransaction + + for i := 0; i < numAccounts; i++ { + session.SetNonce(randomAddresses.getItem(i), uint64(i)) + } + + sw.Start(t.Name()) + + for i := 0; i < numAccounts; i++ { + for j := 0; j < numCallsGetNoncePerAccount; j++ { + _ = sessionWrapper.getNonce(randomAddresses.getItem(i)) + } + } + + sw.Stop(t.Name()) + + require.Equal(t, numAccounts, session.NumCallsGetAccountState) + }) + + t.Run("numAccounts = 10_000, numTransactionsPerAccount = 3", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + numAccounts := 10_000 + numTransactionsPerAccount := 3 + // See "detectSkippableSender()" and "detectSkippableTransaction()". + numCallsGetNoncePerTransaction := 2 + numCallsGetNoncePerAccount := numTransactionsPerAccount * numCallsGetNoncePerTransaction + + for i := 0; i < numAccounts; i++ { + session.SetNonce(randomAddresses.getItem(i), uint64(i)) + } + + sw.Start(t.Name()) + + for i := 0; i < numAccounts; i++ { + for j := 0; j < numCallsGetNoncePerAccount; j++ { + _ = sessionWrapper.getNonce(randomAddresses.getItem(i)) + } + } + + sw.Stop(t.Name()) + + require.Equal(t, numAccounts, session.NumCallsGetAccountState) + }) + + t.Run("numAccounts = 30_000, numTransactionsPerAccount = 1", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + numAccounts := 30_000 + numTransactionsPerAccount := 1 + // See "detectSkippableSender()" and "detectSkippableTransaction()". + numCallsGetNoncePerTransaction := 2 + numCallsGetNoncePerAccount := numTransactionsPerAccount * numCallsGetNoncePerTransaction + + for i := 0; i < numAccounts; i++ { + session.SetNonce(randomAddresses.getItem(i), uint64(i)) + } + + sw.Start(t.Name()) + + for i := 0; i < numAccounts; i++ { + for j := 0; j < numCallsGetNoncePerAccount; j++ { + _ = sessionWrapper.getNonce(randomAddresses.getItem(i)) + } + } + + sw.Stop(t.Name()) + + require.Equal(t, numAccounts, session.NumCallsGetAccountState) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // Session wrapper operations should have a negligible (or small) impact on the performance! + // 0.000826s (TestBenchmarkSelectionSessionWrapper_getNonce/_numAccounts_=_300,_numTransactionsPerAccount=_100) + // 0.003263s (TestBenchmarkSelectionSessionWrapper_getNonce/_numAccounts_=_10_000,_numTransactionsPerAccount=_3) + // 0.010291s (TestBenchmarkSelectionSessionWrapper_getNonce/_numAccounts_=_30_000,_numTransactionsPerAccount=_1) +} + +func TestBenchmarkSelectionSessionWrapper_detectWillFeeExceedBalance(t *testing.T) { + sw := core.NewStopWatch() + + t.Run("numSenders = 300, numTransactionsPerSender = 100", func(t *testing.T) { + doTestBenchmarkSelectionSessionWrapper_detectWillFeeExceedBalance(t, sw, 300, 100) + }) + + t.Run("numSenders = 10_000, numTransactionsPerSender = 3", func(t *testing.T) { + doTestBenchmarkSelectionSessionWrapper_detectWillFeeExceedBalance(t, sw, 10_000, 3) + }) + + t.Run("numSenders = 30_000, numTransactionsPerSender = 1", func(t *testing.T) { + doTestBenchmarkSelectionSessionWrapper_detectWillFeeExceedBalance(t, sw, 30_000, 1) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // Session wrapper operations should have a negligible (or small) impact on the performance! + // 0.006629s (TestBenchmarkSelectionSessionWrapper_detectWillFeeExceedBalance/numSenders_=_300,_numTransactionsPerSender_=_100) + // 0.010478s (TestBenchmarkSelectionSessionWrapper_detectWillFeeExceedBalance/numSenders_=_10_000,_numTransactionsPerSender_=_3) + // 0.030631s (TestBenchmarkSelectionSessionWrapper_detectWillFeeExceedBalance/numSenders_=_30_000,_numTransactionsPerSender_=_1) +} + +func doTestBenchmarkSelectionSessionWrapper_detectWillFeeExceedBalance(t *testing.T, sw *core.StopWatch, numSenders int, numTransactionsPerSender int) { + fee := 100000000000000 + transferredValue := 42 + + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + for i := 0; i < numSenders; i++ { + session.SetBalance(randomAddresses.getItem(i), oneQuintillionBig) + } + + transactions := make([]*WrappedTransaction, numSenders*numTransactionsPerSender) + + for i := 0; i < numTransactionsPerSender; i++ { + for j := 0; j < numSenders; j++ { + sender := randomAddresses.getItem(j) + feePayer := randomAddresses.getTailItem(j) + + transactions[j*numTransactionsPerSender+i] = &WrappedTransaction{ + Tx: &transaction.Transaction{SndAddr: sender}, + Fee: big.NewInt(int64(fee)), + TransferredValue: big.NewInt(int64(transferredValue)), + FeePayer: feePayer, + } + } + } + + sw.Start(t.Name()) + + for _, tx := range transactions { + if sessionWrapper.detectWillFeeExceedBalance(tx) { + require.Fail(t, "unexpected") + } + + sessionWrapper.accumulateConsumedBalance(tx) + } + + sw.Stop(t.Name()) + + for i := 0; i < numSenders; i++ { + senderRecord := sessionWrapper.getAccountRecord(randomAddresses.getItem(i)) + feePayerRecord := sessionWrapper.getAccountRecord(randomAddresses.getTailItem(i)) + + require.Equal(t, transferredValue*numTransactionsPerSender, int(senderRecord.consumedBalance.Uint64())) + require.Equal(t, fee*numTransactionsPerSender, int(feePayerRecord.consumedBalance.Uint64())) + } +} diff --git a/txcache/selection_test.go b/txcache/selection_test.go new file mode 100644 index 0000000000..18a052a782 --- /dev/null +++ b/txcache/selection_test.go @@ -0,0 +1,579 @@ +package txcache + +import ( + "bytes" + "fmt" + "math" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" + "github.com/stretchr/testify/require" +) + +func TestTxCache_SelectTransactions_Dummy(t *testing.T) { + t.Run("all having same PPU", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 5) + session.SetNonce([]byte("carol"), 1) + + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5)) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1)) + + selected, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, selected, 8) + require.Equal(t, 400000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) + require.Equal(t, "hash-alice-2", string(selected[1].TxHash)) + require.Equal(t, "hash-alice-3", string(selected[2].TxHash)) + require.Equal(t, "hash-alice-4", string(selected[3].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[4].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[5].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[6].TxHash)) + require.Equal(t, "hash-carol-1", string(selected[7].TxHash)) + }) + + t.Run("alice > carol > bob", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 5) + session.SetNonce([]byte("carol"), 3) + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasPrice(100)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasPrice(50)) + cache.AddTx(createTx([]byte("hash-carol-3"), "carol", 3).withGasPrice(75)) + + selected, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, selected, 3) + require.Equal(t, 150000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-alice-1", string(selected[0].TxHash)) + require.Equal(t, "hash-carol-3", string(selected[1].TxHash)) + require.Equal(t, "hash-bob-5", string(selected[2].TxHash)) + }) +} + +func TestTxCache_SelectTransactionsWithBandwidth_Dummy(t *testing.T) { + t.Run("transactions with no data field", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 5) + session.SetNonce([]byte("carol"), 1) + + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2).withGasLimit(500000)) + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withGasLimit(200000)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7).withGasLimit(400000)) + cache.AddTx(createTx([]byte("hash-bob-6"), "bob", 6).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-bob-5"), "bob", 5).withGasLimit(50000)) + cache.AddTx(createTx([]byte("hash-carol-1"), "carol", 1).withGasLimit(50000)) + + selected, accumulatedGas := cache.SelectTransactions(session, 760000, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, selected, 5) + require.Equal(t, 750000, int(accumulatedGas)) + + // Check order + require.Equal(t, "hash-bob-5", string(selected[0].TxHash)) + require.Equal(t, "hash-bob-6", string(selected[1].TxHash)) + require.Equal(t, "hash-carol-1", string(selected[2].TxHash)) + require.Equal(t, "hash-alice-1", string(selected[3].TxHash)) + require.Equal(t, "hash-bob-7", string(selected[4].TxHash)) + }) +} + +func TestTxCache_SelectTransactions_HandlesNotExecutableTransactions(t *testing.T) { + t.Run("with middle gaps", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 42) + session.SetNonce([]byte("carol"), 7) + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-5"), "alice", 5)) // gap + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 42)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 44)) // gap + cache.AddTx(createTx([]byte("hash-bob-45"), "bob", 45)) + cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) + cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) + cache.AddTx(createTx([]byte("hash-carol-10"), "carol", 10)) // gap + cache.AddTx(createTx([]byte("hash-carol-11"), "carol", 11)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + expectedNumSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol + require.Len(t, sorted, expectedNumSelected) + require.Equal(t, 300000, int(accumulatedGas)) + }) + + t.Run("with initial gaps", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 42) + session.SetNonce([]byte("carol"), 7) + + // Good + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + + // Initial gap + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 44)) + cache.AddTx(createTx([]byte("hash-bob-43"), "bob", 45)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 46)) + + // Good + cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) + cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + expectedNumSelected := 3 + 0 + 2 // 3 alice + 0 bob + 2 carol + require.Len(t, sorted, expectedNumSelected) + require.Equal(t, 250000, int(accumulatedGas)) + }) + + t.Run("with lower nonces", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 42) + session.SetNonce([]byte("carol"), 7) + + // Good + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + + // A few with lower nonce + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 40)) + cache.AddTx(createTx([]byte("hash-bob-43"), "bob", 41)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 42)) + + // Good + cache.AddTx(createTx([]byte("hash-carol-7"), "carol", 7)) + cache.AddTx(createTx([]byte("hash-carol-8"), "carol", 8)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + expectedNumSelected := 3 + 1 + 2 // 3 alice + 1 bob + 2 carol + require.Len(t, sorted, expectedNumSelected) + require.Equal(t, 300000, int(accumulatedGas)) + }) + + t.Run("with duplicated nonces", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3a"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-3b"), "alice", 3).withGasPrice(oneBillion * 2)) + cache.AddTx(createTx([]byte("hash-alice-3c"), "alice", 3)) + cache.AddTx(createTx([]byte("hash-alice-4"), "alice", 4)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, sorted, 4) + require.Equal(t, 200000, int(accumulatedGas)) + + require.Equal(t, "hash-alice-1", string(sorted[0].TxHash)) + require.Equal(t, "hash-alice-2", string(sorted[1].TxHash)) + require.Equal(t, "hash-alice-3b", string(sorted[2].TxHash)) + require.Equal(t, "hash-alice-4", string(sorted[3].TxHash)) + }) + + t.Run("with fee exceeding balance", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetBalance([]byte("alice"), big.NewInt(150000000000000)) + session.SetNonce([]byte("bob"), 42) + session.SetBalance([]byte("bob"), big.NewInt(70000000000000)) + + // Enough balance + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-alice-3"), "alice", 3)) + + // Not enough balance + cache.AddTx(createTx([]byte("hash-bob-42"), "bob", 40)) + cache.AddTx(createTx([]byte("hash-bob-43"), "bob", 41)) + cache.AddTx(createTx([]byte("hash-bob-44"), "bob", 42)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + expectedNumSelected := 3 + 1 // 3 alice + 1 bob + require.Len(t, sorted, expectedNumSelected) + require.Equal(t, 200000, int(accumulatedGas)) + }) + + t.Run("with incorrectly guarded", func(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + session.SetNonce([]byte("alice"), 1) + session.SetNonce([]byte("bob"), 42) + + session.IsIncorrectlyGuardedCalled = func(tx data.TransactionHandler) bool { + return bytes.Equal(tx.GetData(), []byte("t")) + } + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1).withData([]byte("x")).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-bob-42a"), "bob", 42).withData([]byte("y")).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-bob-43a"), "bob", 43).withData([]byte("z")).withGasLimit(100000)) + cache.AddTx(createTx([]byte("hash-bob-43b"), "bob", 43).withData([]byte("t")).withGasLimit(100000)) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, sorted, 3) + require.Equal(t, 300000, int(accumulatedGas)) + + require.Equal(t, "hash-alice-1", string(sorted[0].TxHash)) + require.Equal(t, "hash-bob-42a", string(sorted[1].TxHash)) + require.Equal(t, "hash-bob-43a", string(sorted[2].TxHash)) + }) +} + +func TestTxCache_SelectTransactions_WhenTransactionsAddedInReversedNonceOrder(t *testing.T) { + cache := newUnconstrainedCacheToTest() + session := txcachemocks.NewSelectionSessionMock() + + // Add "nSenders" * "nTransactionsPerSender" transactions in the cache (in reversed nonce order) + nSenders := 1000 + nTransactionsPerSender := 100 + nTotalTransactions := nSenders * nTransactionsPerSender + + for senderTag := 0; senderTag < nSenders; senderTag++ { + sender := fmt.Sprintf("sender:%d", senderTag) + + for txNonce := nTransactionsPerSender - 1; txNonce >= 0; txNonce-- { + txHash := fmt.Sprintf("hash:%d:%d", senderTag, txNonce) + tx := createTx([]byte(txHash), sender, uint64(txNonce)) + cache.AddTx(tx) + } + } + + require.Equal(t, uint64(nTotalTransactions), cache.CountTx()) + + sorted, accumulatedGas := cache.SelectTransactions(session, math.MaxUint64, math.MaxInt, selectionLoopMaximumDuration) + require.Len(t, sorted, nTotalTransactions) + require.Equal(t, 5_000_000_000, int(accumulatedGas)) + + // Check order + nonces := make(map[string]uint64, nSenders) + + for _, tx := range sorted { + nonce := tx.Tx.GetNonce() + sender := string(tx.Tx.GetSndAddr()) + previousNonce := nonces[sender] + + require.LessOrEqual(t, previousNonce, nonce) + nonces[sender] = nonce + } +} + +func TestTxCache_selectTransactionsFromBunches(t *testing.T) { + t.Run("empty cache", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + selected, accumulatedGas := selectTransactionsFromBunches(session, []bunchOfTransactions{}, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + + require.Equal(t, 0, len(selected)) + require.Equal(t, uint64(0), accumulatedGas) + }) +} + +func TestBenchmarkTxCache_acquireBunchesOfTransactions(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 300001, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + } + + host := txcachemocks.NewMempoolHostMock() + + sw := core.NewStopWatch() + + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 10000, 100) + + require.Equal(t, 1000000, int(cache.CountTx())) + + sw.Start(t.Name()) + bunches := cache.acquireBunchesOfTransactions() + sw.Stop(t.Name()) + + require.Len(t, bunches, 10000) + require.Len(t, bunches[0], 100) + require.Len(t, bunches[len(bunches)-1], 100) + }) + + t.Run("numSenders = 50000, numTransactions = 2", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 50000, 2) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + bunches := cache.acquireBunchesOfTransactions() + sw.Stop(t.Name()) + + require.Len(t, bunches, 50000) + require.Len(t, bunches[0], 2) + require.Len(t, bunches[len(bunches)-1], 2) + }) + + t.Run("numSenders = 100000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 100000, 1) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + bunches := cache.acquireBunchesOfTransactions() + sw.Stop(t.Name()) + + require.Len(t, bunches, 100000) + require.Len(t, bunches[0], 1) + require.Len(t, bunches[len(bunches)-1], 1) + }) + + t.Run("numSenders = 300000, numTransactions = 1", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 300000, 1) + + require.Equal(t, 300000, int(cache.CountTx())) + + sw.Start(t.Name()) + bunches := cache.acquireBunchesOfTransactions() + sw.Stop(t.Name()) + + require.Len(t, bunches, 300000) + require.Len(t, bunches[0], 1) + require.Len(t, bunches[len(bunches)-1], 1) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.014468s (TestBenchmarkTxCache_acquireBunchesOfTransactions/numSenders_=_10000,_numTransactions_=_100) + // 0.019183s (TestBenchmarkTxCache_acquireBunchesOfTransactions/numSenders_=_50000,_numTransactions_=_2) + // 0.013876s (TestBenchmarkTxCache_acquireBunchesOfTransactions/numSenders_=_100000,_numTransactions_=_1) + // 0.056631s (TestBenchmarkTxCache_acquireBunchesOfTransactions/numSenders_=_300000,_numTransactions_=_1) +} + +func TestBenchmarkTxCache_selectTransactionsFromBunches(t *testing.T) { + sw := core.NewStopWatch() + + t.Run("numSenders = 1000, numTransactions = 1000", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + + sw.Start(t.Name()) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(selected)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + t.Run("numSenders = 10000, numTransactions = 100", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(1000, 1000) + + sw.Start(t.Name()) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(selected)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + t.Run("numSenders = 100000, numTransactions = 3", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(100000, 3) + + sw.Start(t.Name()) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(selected)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + t.Run("numSenders = 300000, numTransactions = 1", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(300000, 1) + + sw.Start(t.Name()) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, math.MaxInt, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 200000, len(selected)) + require.Equal(t, uint64(10_000_000_000), accumulatedGas) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.074999s (TestBenchmarkTxCache_selectTransactionsFromBunches/numSenders_=_1000,_numTransactions_=_1000) + // 0.059256s (TestBenchmarkTxCache_selectTransactionsFromBunches/numSenders_=_10000,_numTransactions_=_100) + // 0.389317s (TestBenchmarkTxCache_selectTransactionsFromBunches/numSenders_=_100000,_numTransactions_=_3) + // 0.498457s (TestBenchmarkTxCache_selectTransactionsFromBunches/numSenders_=_300000,_numTransactions_=_1) +} + +func TestTxCache_selectTransactionsFromBunches_loopBreaks_whenTakesTooLong(t *testing.T) { + t.Run("numSenders = 300000, numTransactions = 1", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + bunches := createBunchesOfTransactionsWithUniformDistribution(300000, 1) + selected, accumulatedGas := selectTransactionsFromBunches(session, bunches, 10_000_000_000, 50_000, 1*time.Millisecond) + + require.Less(t, len(selected), 50_000) + require.Less(t, int(accumulatedGas), 10_000_000_000) + }) +} + +func TestBenchmarkTxCache_doSelectTransactions(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 1000000000, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 300001, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + } + + host := txcachemocks.NewMempoolHostMock() + session := txcachemocks.NewSelectionSessionMock() + + sw := core.NewStopWatch() + + t.Run("numSenders = 10000, numTransactions = 100, maxNum = 30_000", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 10000, 100) + + require.Equal(t, 1000000, int(cache.CountTx())) + + sw.Start(t.Name()) + selected, accumulatedGas := cache.SelectTransactions(session, 10_000_000_000, 30_000, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 30_000, len(selected)) + require.Equal(t, uint64(1_500_000_000), accumulatedGas) + }) + + t.Run("numSenders = 50000, numTransactions = 2, maxNum = 30_000", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 50000, 2) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + selected, accumulatedGas := cache.SelectTransactions(session, 10_000_000_000, 30_000, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 30_000, len(selected)) + require.Equal(t, uint64(1_500_000_000), accumulatedGas) + }) + + t.Run("numSenders = 100000, numTransactions = 1, maxNum = 30_000", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 100000, 1) + + require.Equal(t, 100000, int(cache.CountTx())) + + sw.Start(t.Name()) + selected, accumulatedGas := cache.SelectTransactions(session, 10_000_000_000, 30_000, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 30_000, len(selected)) + require.Equal(t, uint64(1_500_000_000), accumulatedGas) + }) + + t.Run("numSenders = 300000, numTransactions = 1, maxNum = 30_000", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + addManyTransactionsWithUniformDistribution(cache, 300000, 1) + + require.Equal(t, 300000, int(cache.CountTx())) + + sw.Start(t.Name()) + selected, accumulatedGas := cache.SelectTransactions(session, 10_000_000_000, 30_000, selectionLoopMaximumDuration) + sw.Stop(t.Name()) + + require.Equal(t, 30_000, len(selected)) + require.Equal(t, uint64(1_500_000_000), accumulatedGas) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.048709s (TestBenchmarkTxCache_doSelectTransactions/numSenders_=_10000,_numTransactions_=_100,_maxNum_=_30_000) + // 0.076177s (TestBenchmarkTxCache_doSelectTransactions/numSenders_=_50000,_numTransactions_=_2,_maxNum_=_30_000) + // 0.104399s (TestBenchmarkTxCache_doSelectTransactions/numSenders_=_100000,_numTransactions_=_1,_maxNum_=_30_000) + // 0.319060s (TestBenchmarkTxCache_doSelectTransactions/numSenders_=_300000,_numTransactions_=_1,_maxNum_=_30_000) +} diff --git a/txcache/testutils_test.go b/txcache/testutils_test.go new file mode 100644 index 0000000000..2f40cd41ef --- /dev/null +++ b/txcache/testutils_test.go @@ -0,0 +1,270 @@ +package txcache + +import ( + cryptoRand "crypto/rand" + "encoding/binary" + "math" + "math/big" + "math/rand" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" +) + +const oneMilion = 1000000 +const oneBillion = oneMilion * 1000 +const oneQuintillion = 1_000_000_000_000_000_000 +const estimatedSizeOfBoundedTxFields = uint64(128) +const hashLength = 32 +const addressLength = 32 + +var oneQuintillionBig = big.NewInt(oneQuintillion) + +// The GitHub Actions runners are (extremely) slow. +const selectionLoopMaximumDuration = 30 * time.Second + +var randomHashes = newRandomData(math.MaxUint16, hashLength) +var randomAddresses = newRandomData(math.MaxUint16, addressLength) + +type randomData struct { + randomBytes []byte + numItems int + itemSize int +} + +func newRandomData(numItems int, itemSize int) *randomData { + randomBytes := make([]byte, numItems*itemSize) + + _, err := cryptoRand.Read(randomBytes) + if err != nil { + panic(err) + } + + return &randomData{ + randomBytes: randomBytes, + numItems: numItems, + itemSize: itemSize, + } +} + +func (data *randomData) getItem(index int) []byte { + start := index * data.itemSize + end := start + data.itemSize + return data.randomBytes[start:end] +} + +func (data *randomData) getTailItem(index int) []byte { + start := (data.numItems - 1 - index) * data.itemSize + end := start + data.itemSize + return data.randomBytes[start:end] +} + +func (cache *TxCache) areInternalMapsConsistent() bool { + internalMapByHash := cache.txByHash + internalMapBySender := cache.txListBySender + + senders := internalMapBySender.getSenders() + numInMapByHash := len(internalMapByHash.keys()) + numInMapBySender := 0 + numMissingInMapByHash := 0 + + for _, sender := range senders { + numInMapBySender += int(sender.countTx()) + + for _, hash := range sender.getTxsHashes() { + _, ok := internalMapByHash.getTx(string(hash)) + if !ok { + numMissingInMapByHash++ + } + } + } + + isFine := (numInMapByHash == numInMapBySender) && (numMissingInMapByHash == 0) + return isFine +} + +func (cache *TxCache) getHashesForSender(sender string) []string { + return cache.getListForSender(sender).getTxHashesAsStrings() +} + +func (cache *TxCache) getListForSender(sender string) *txListForSender { + return cache.txListBySender.testGetListForSender(sender) +} + +func (txMap *txListBySenderMap) testGetListForSender(sender string) *txListForSender { + list, ok := txMap.getListForSender(sender) + if !ok { + panic("sender not in cache") + } + + return list +} + +func (listForSender *txListForSender) getTxHashesAsStrings() []string { + hashes := listForSender.getTxsHashes() + return hashesAsStrings(hashes) +} + +func (listForSender *txListForSender) getTxsHashes() [][]byte { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + + result := make([][]byte, 0, listForSender.countTx()) + + for element := listForSender.items.Front(); element != nil; element = element.Next() { + value := element.Value.(*WrappedTransaction) + result = append(result, value.TxHash) + } + + return result +} + +func hashesAsStrings(hashes [][]byte) []string { + result := make([]string, len(hashes)) + for i := 0; i < len(hashes); i++ { + result[i] = string(hashes[i]) + } + + return result +} + +func hashesAsBytes(hashes []string) [][]byte { + result := make([][]byte, len(hashes)) + for i := 0; i < len(hashes); i++ { + result[i] = []byte(hashes[i]) + } + + return result +} + +func addManyTransactionsWithUniformDistribution(cache *TxCache, nSenders int, nTransactionsPerSender int) { + for senderTag := 0; senderTag < nSenders; senderTag++ { + sender := createFakeSenderAddress(senderTag) + + for nonce := nTransactionsPerSender - 1; nonce >= 0; nonce-- { + transactionHash := createFakeTxHash(sender, nonce) + gasPrice := oneBillion + rand.Intn(3*oneBillion) + transaction := createTx(transactionHash, string(sender), uint64(nonce)).withGasPrice(uint64(gasPrice)) + + cache.AddTx(transaction) + } + } +} + +func createBunchesOfTransactionsWithUniformDistribution(nSenders int, nTransactionsPerSender int) []bunchOfTransactions { + bunches := make([]bunchOfTransactions, 0, nSenders) + host := txcachemocks.NewMempoolHostMock() + + for senderTag := 0; senderTag < nSenders; senderTag++ { + bunch := make(bunchOfTransactions, 0, nTransactionsPerSender) + sender := createFakeSenderAddress(senderTag) + + for nonce := 0; nonce < nTransactionsPerSender; nonce++ { + transactionHash := createFakeTxHash(sender, nonce) + gasPrice := oneBillion + rand.Intn(3*oneBillion) + transaction := createTx(transactionHash, string(sender), uint64(nonce)).withGasPrice(uint64(gasPrice)) + transaction.precomputeFields(host) + + bunch = append(bunch, transaction) + } + + bunches = append(bunches, bunch) + } + + return bunches +} + +func createTx(hash []byte, sender string, nonce uint64) *WrappedTransaction { + tx := &transaction.Transaction{ + SndAddr: []byte(sender), + Nonce: nonce, + GasLimit: 50000, + GasPrice: oneBillion, + } + + return &WrappedTransaction{ + Tx: tx, + TxHash: hash, + Size: int64(estimatedSizeOfBoundedTxFields), + } +} + +func (wrappedTx *WrappedTransaction) withSize(size uint64) *WrappedTransaction { + dataLength := size - estimatedSizeOfBoundedTxFields + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Data = make([]byte, dataLength) + wrappedTx.Size = int64(size) + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withData(data []byte) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Data = data + wrappedTx.Size = int64(len(data)) + int64(estimatedSizeOfBoundedTxFields) + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withDataLength(dataLength int) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Data = make([]byte, dataLength) + wrappedTx.Size = int64(dataLength) + int64(estimatedSizeOfBoundedTxFields) + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withGasPrice(gasPrice uint64) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.GasPrice = gasPrice + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withGasLimit(gasLimit uint64) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.GasLimit = gasLimit + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withValue(value *big.Int) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.Value = value + return wrappedTx +} + +func (wrappedTx *WrappedTransaction) withRelayer(relayer []byte) *WrappedTransaction { + tx := wrappedTx.Tx.(*transaction.Transaction) + tx.RelayerAddr = relayer + return wrappedTx +} + +func createFakeSenderAddress(senderTag int) []byte { + bytes := make([]byte, 32) + binary.LittleEndian.PutUint64(bytes, uint64(senderTag)) + binary.LittleEndian.PutUint64(bytes[24:], uint64(senderTag)) + return bytes +} + +func createFakeTxHash(fakeSenderAddress []byte, nonce int) []byte { + bytes := make([]byte, 32) + copy(bytes, fakeSenderAddress) + binary.LittleEndian.PutUint64(bytes[8:], uint64(nonce)) + binary.LittleEndian.PutUint64(bytes[16:], uint64(nonce)) + return bytes +} + +// waitTimeout waits for the waitgroup for the specified max timeout. +// Returns true if waiting timed out. +// Reference: https://stackoverflow.com/a/32843750/1475331 +func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { + c := make(chan struct{}) + go func() { + defer close(c) + wg.Wait() + }() + select { + case <-c: + return false // completed normally + case <-time.After(timeout): + return true // timed out + } +} diff --git a/txcache/transactionsHeap.go b/txcache/transactionsHeap.go new file mode 100644 index 0000000000..28b4e0724a --- /dev/null +++ b/txcache/transactionsHeap.go @@ -0,0 +1,59 @@ +package txcache + +type transactionsHeap struct { + items []*transactionsHeapItem + less func(i, j int) bool +} + +func newMinTransactionsHeap(capacity int) *transactionsHeap { + h := transactionsHeap{ + items: make([]*transactionsHeapItem, 0, capacity), + } + + h.less = func(i, j int) bool { + return h.items[j].isCurrentTransactionMoreValuableForNetwork(h.items[i]) + } + + return &h +} + +func newMaxTransactionsHeap(capacity int) *transactionsHeap { + h := transactionsHeap{ + items: make([]*transactionsHeapItem, 0, capacity), + } + + h.less = func(i, j int) bool { + return h.items[i].isCurrentTransactionMoreValuableForNetwork(h.items[j]) + } + + return &h +} + +// Len returns the number of elements in the heap. +func (h *transactionsHeap) Len() int { return len(h.items) } + +// Less reports whether the element with index i should sort before the element with index j. +func (h *transactionsHeap) Less(i, j int) bool { + return h.less(i, j) +} + +// Swap swaps the elements with indexes i and j. +func (h *transactionsHeap) Swap(i, j int) { + h.items[i], h.items[j] = h.items[j], h.items[i] +} + +// Push pushes the element x onto the heap. +func (h *transactionsHeap) Push(x interface{}) { + h.items = append(h.items, x.(*transactionsHeapItem)) +} + +// Pop removes and returns the minimum element (according to "h.less") from the heap. +func (h *transactionsHeap) Pop() interface{} { + // Standard code when storing the heap in a slice: + // https://pkg.go.dev/container/heap + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} diff --git a/txcache/transactionsHeapItem.go b/txcache/transactionsHeapItem.go new file mode 100644 index 0000000000..b98ec50877 --- /dev/null +++ b/txcache/transactionsHeapItem.go @@ -0,0 +1,132 @@ +package txcache + +type transactionsHeapItem struct { + sender []byte + bunch bunchOfTransactions + + currentTransactionIndex int + currentTransaction *WrappedTransaction + currentTransactionNonce uint64 + latestSelectedTransaction *WrappedTransaction + latestSelectedTransactionNonce uint64 +} + +func newTransactionsHeapItem(bunch bunchOfTransactions) (*transactionsHeapItem, error) { + if len(bunch) == 0 { + return nil, errEmptyBunchOfTransactions + } + + firstTransaction := bunch[0] + + return &transactionsHeapItem{ + sender: firstTransaction.Tx.GetSndAddr(), + bunch: bunch, + + currentTransactionIndex: 0, + currentTransaction: firstTransaction, + currentTransactionNonce: firstTransaction.Tx.GetNonce(), + latestSelectedTransaction: nil, + }, nil +} + +func (item *transactionsHeapItem) selectCurrentTransaction() *WrappedTransaction { + item.latestSelectedTransaction = item.currentTransaction + item.latestSelectedTransactionNonce = item.currentTransactionNonce + + return item.currentTransaction +} + +func (item *transactionsHeapItem) gotoNextTransaction() bool { + if item.currentTransactionIndex+1 >= len(item.bunch) { + return false + } + + item.currentTransactionIndex++ + item.currentTransaction = item.bunch[item.currentTransactionIndex] + item.currentTransactionNonce = item.currentTransaction.Tx.GetNonce() + return true +} + +func (item *transactionsHeapItem) detectInitialGap(senderNonce uint64) bool { + if item.latestSelectedTransaction != nil { + return false + } + + hasInitialGap := item.currentTransactionNonce > senderNonce + if hasInitialGap { + logSelect.Trace("transactionsHeapItem.detectInitialGap, initial gap", + "tx", item.currentTransaction.TxHash, + "nonce", item.currentTransactionNonce, + "sender", item.sender, + "senderNonce", senderNonce, + ) + } + + return hasInitialGap +} + +func (item *transactionsHeapItem) detectMiddleGap() bool { + if item.latestSelectedTransaction == nil { + return false + } + + // Detect middle gap. + hasMiddleGap := item.currentTransactionNonce > item.latestSelectedTransactionNonce+1 + if hasMiddleGap { + logSelect.Trace("transactionsHeapItem.detectMiddleGap, middle gap", + "tx", item.currentTransaction.TxHash, + "nonce", item.currentTransactionNonce, + "sender", item.sender, + "previousSelectedNonce", item.latestSelectedTransactionNonce, + ) + } + + return hasMiddleGap +} + +func (item *transactionsHeapItem) detectLowerNonce(senderNonce uint64) bool { + isLowerNonce := item.currentTransactionNonce < senderNonce + if isLowerNonce { + logSelect.Trace("transactionsHeapItem.detectLowerNonce", + "tx", item.currentTransaction.TxHash, + "nonce", item.currentTransactionNonce, + "sender", item.sender, + "senderNonce", senderNonce, + ) + } + + return isLowerNonce +} + +func (item *transactionsHeapItem) detectIncorrectlyGuarded(sessionWrapper *selectionSessionWrapper) bool { + isIncorrectlyGuarded := sessionWrapper.isIncorrectlyGuarded(item.currentTransaction.Tx) + if isIncorrectlyGuarded { + logSelect.Trace("transactionsHeapItem.detectIncorrectlyGuarded", + "tx", item.currentTransaction.TxHash, + "sender", item.sender, + ) + } + + return isIncorrectlyGuarded +} + +func (item *transactionsHeapItem) detectNonceDuplicate() bool { + if item.latestSelectedTransaction == nil { + return false + } + + isDuplicate := item.currentTransactionNonce == item.latestSelectedTransactionNonce + if isDuplicate { + logSelect.Trace("transactionsHeapItem.detectNonceDuplicate", + "tx", item.currentTransaction.TxHash, + "sender", item.sender, + "nonce", item.currentTransactionNonce, + ) + } + + return isDuplicate +} + +func (item *transactionsHeapItem) isCurrentTransactionMoreValuableForNetwork(other *transactionsHeapItem) bool { + return item.currentTransaction.isTransactionMoreValuableForNetwork(other.currentTransaction) +} diff --git a/txcache/transactionsHeapItem_test.go b/txcache/transactionsHeapItem_test.go new file mode 100644 index 0000000000..e829013859 --- /dev/null +++ b/txcache/transactionsHeapItem_test.go @@ -0,0 +1,183 @@ +package txcache + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" + "github.com/stretchr/testify/require" +) + +func TestNewTransactionsHeapItem(t *testing.T) { + t.Run("empty bunch", func(t *testing.T) { + item, err := newTransactionsHeapItem(nil) + require.Nil(t, item) + require.Equal(t, errEmptyBunchOfTransactions, err) + }) + + t.Run("non-empty bunch", func(t *testing.T) { + bunch := bunchOfTransactions{ + createTx([]byte("tx-1"), "alice", 42), + } + + item, err := newTransactionsHeapItem(bunch) + require.NotNil(t, item) + require.Nil(t, err) + + require.Equal(t, []byte("alice"), item.sender) + require.Equal(t, bunch, item.bunch) + require.Equal(t, 0, item.currentTransactionIndex) + require.Equal(t, bunch[0], item.currentTransaction) + require.Equal(t, uint64(42), item.currentTransactionNonce) + require.Nil(t, item.latestSelectedTransaction) + }) +} + +func TestTransactionsHeapItem_selectTransaction(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + a.precomputeFields(host) + b.precomputeFields(host) + + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + + selected := item.selectCurrentTransaction() + require.Equal(t, a, selected) + require.Equal(t, a, item.latestSelectedTransaction) + require.Equal(t, 42, int(item.latestSelectedTransactionNonce)) + + ok := item.gotoNextTransaction() + require.True(t, ok) + + selected = item.selectCurrentTransaction() + require.Equal(t, b, selected) + require.Equal(t, b, item.latestSelectedTransaction) + require.Equal(t, 43, int(item.latestSelectedTransactionNonce)) + + ok = item.gotoNextTransaction() + require.False(t, ok) +} + +func TestTransactionsHeapItem_detectInitialGap(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + + t.Run("known, without gap", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + require.False(t, item.detectInitialGap(42)) + }) + + t.Run("known, without gap", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + require.True(t, item.detectInitialGap(41)) + }) +} + +func TestTransactionsHeapItem_detectMiddleGap(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + c := createTx([]byte("tx-3"), "alice", 44) + + t.Run("known, without gap", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = a + item.latestSelectedTransactionNonce = 42 + item.currentTransaction = b + item.currentTransactionNonce = 43 + + require.False(t, item.detectMiddleGap()) + }) + + t.Run("known, without gap", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = a + item.latestSelectedTransactionNonce = 42 + item.currentTransaction = c + item.currentTransactionNonce = 44 + + require.True(t, item.detectMiddleGap()) + }) +} + +func TestTransactionsHeapItem_detectLowerNonce(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + + t.Run("known, good", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + require.False(t, item.detectLowerNonce(42)) + }) + + t.Run("known, lower", func(t *testing.T) { + item, err := newTransactionsHeapItem(bunchOfTransactions{a, b}) + require.NoError(t, err) + require.True(t, item.detectLowerNonce(44)) + }) +} + +func TestTransactionsHeapItem_detectNonceDuplicate(t *testing.T) { + a := createTx([]byte("tx-1"), "alice", 42) + b := createTx([]byte("tx-2"), "alice", 43) + c := createTx([]byte("tx-3"), "alice", 42) + + t.Run("unknown", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = nil + require.False(t, item.detectNonceDuplicate()) + }) + + t.Run("no duplicates", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = a + item.latestSelectedTransactionNonce = 42 + item.currentTransaction = b + item.currentTransactionNonce = 43 + + require.False(t, item.detectNonceDuplicate()) + }) + + t.Run("duplicates", func(t *testing.T) { + item := &transactionsHeapItem{} + item.latestSelectedTransaction = a + item.latestSelectedTransactionNonce = 42 + item.currentTransaction = c + item.currentTransactionNonce = 42 + + require.True(t, item.detectNonceDuplicate()) + }) +} + +func TestTransactionsHeapItem_detectIncorrectlyGuarded(t *testing.T) { + t.Run("is correctly guarded", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + sessionWrapper := newSelectionSessionWrapper(session) + + session.IsIncorrectlyGuardedCalled = func(tx data.TransactionHandler) bool { + return false + } + + item, err := newTransactionsHeapItem(bunchOfTransactions{createTx([]byte("tx-1"), "alice", 42)}) + require.NoError(t, err) + + require.False(t, item.detectIncorrectlyGuarded(sessionWrapper)) + }) + + t.Run("is incorrectly guarded", func(t *testing.T) { + session := txcachemocks.NewSelectionSessionMock() + session.IsIncorrectlyGuardedCalled = func(tx data.TransactionHandler) bool { + return true + } + sessionWrapper := newSelectionSessionWrapper(session) + + item, err := newTransactionsHeapItem(bunchOfTransactions{createTx([]byte("tx-1"), "alice", 42)}) + require.NoError(t, err) + + require.True(t, item.detectIncorrectlyGuarded(sessionWrapper)) + }) +} diff --git a/txcache/txByHashMap.go b/txcache/txByHashMap.go new file mode 100644 index 0000000000..8290279722 --- /dev/null +++ b/txcache/txByHashMap.go @@ -0,0 +1,101 @@ +package txcache + +import ( + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-storage-go/txcache/maps" +) + +// txByHashMap is a new map-like structure for holding and accessing transactions by txHash +type txByHashMap struct { + backingMap *maps.ConcurrentMap + counter atomic.Counter + numBytes atomic.Counter +} + +// newTxByHashMap creates a new TxByHashMap instance +func newTxByHashMap(nChunksHint uint32) *txByHashMap { + backingMap := maps.NewConcurrentMap(nChunksHint) + + return &txByHashMap{ + backingMap: backingMap, + } +} + +// addTx adds a transaction to the map +func (txMap *txByHashMap) addTx(tx *WrappedTransaction) bool { + added := txMap.backingMap.SetIfAbsent(string(tx.TxHash), tx) + if added { + txMap.counter.Increment() + txMap.numBytes.Add(tx.Size) + } + + return added +} + +// removeTx removes a transaction from the map +func (txMap *txByHashMap) removeTx(txHash string) (*WrappedTransaction, bool) { + item, removed := txMap.backingMap.Remove(txHash) + if !removed { + return nil, false + } + + tx, ok := item.(*WrappedTransaction) + if !ok { + return nil, false + } + + if removed { + txMap.counter.Decrement() + txMap.numBytes.Subtract(tx.Size) + } + + return tx, true +} + +// getTx gets a transaction from the map +func (txMap *txByHashMap) getTx(txHash string) (*WrappedTransaction, bool) { + txUntyped, ok := txMap.backingMap.Get(txHash) + if !ok { + return nil, false + } + + tx := txUntyped.(*WrappedTransaction) + return tx, true +} + +// RemoveTxsBulk removes transactions, in bulk +func (txMap *txByHashMap) RemoveTxsBulk(txHashes [][]byte) uint32 { + numRemoved := uint32(0) + + for _, txHash := range txHashes { + _, removed := txMap.removeTx(string(txHash)) + if removed { + numRemoved++ + } + } + + return numRemoved +} + +// forEach iterates over the senders +func (txMap *txByHashMap) forEach(function ForEachTransaction) { + txMap.backingMap.IterCb(func(key string, item interface{}) { + tx := item.(*WrappedTransaction) + function([]byte(key), tx) + }) +} + +func (txMap *txByHashMap) clear() { + txMap.backingMap.Clear() + txMap.counter.Set(0) +} + +func (txMap *txByHashMap) keys() [][]byte { + keys := txMap.backingMap.Keys() + keysAsBytes := make([][]byte, len(keys)) + for i := 0; i < len(keys); i++ { + keysAsBytes[i] = []byte(keys[i]) + } + + return keysAsBytes +} diff --git a/txcache/txCache.go b/txcache/txCache.go new file mode 100644 index 0000000000..df69c7ecd2 --- /dev/null +++ b/txcache/txCache.go @@ -0,0 +1,294 @@ +package txcache + +import ( + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-storage-go/monitoring" + "github.com/multiversx/mx-chain-storage-go/types" +) + +var _ types.Cacher = (*TxCache)(nil) + +// TxCache represents a cache-like structure (it has a fixed capacity and implements an eviction mechanism) for holding transactions +type TxCache struct { + name string + txListBySender *txListBySenderMap + txByHash *txByHashMap + config ConfigSourceMe + host MempoolHost + evictionMutex sync.Mutex + isEvictionInProgress atomic.Flag + mutTxOperation sync.Mutex +} + +// NewTxCache creates a new transaction cache +func NewTxCache(config ConfigSourceMe, host MempoolHost) (*TxCache, error) { + log.Debug("NewTxCache", "config", config.String()) + monitoring.MonitorNewCache(config.Name, uint64(config.NumBytesThreshold)) + + err := config.verify() + if err != nil { + return nil, err + } + if check.IfNil(host) { + return nil, errNilMempoolHost + } + + // Note: for simplicity, we use the same "numChunks" for both internal concurrent maps + numChunks := config.NumChunks + senderConstraintsObj := config.getSenderConstraints() + + txCache := &TxCache{ + name: config.Name, + txListBySender: newTxListBySenderMap(numChunks, senderConstraintsObj), + txByHash: newTxByHashMap(numChunks), + config: config, + host: host, + } + + return txCache, nil +} + +// AddTx adds a transaction in the cache +// Eviction happens if maximum capacity is reached +func (cache *TxCache) AddTx(tx *WrappedTransaction) (ok bool, added bool) { + if tx == nil || check.IfNil(tx.Tx) { + return false, false + } + + logAdd.Trace("TxCache.AddTx", "tx", tx.TxHash, "nonce", tx.Tx.GetNonce(), "sender", tx.Tx.GetSndAddr()) + + tx.precomputeFields(cache.host) + + if cache.config.EvictionEnabled { + _ = cache.doEviction() + } + + cache.mutTxOperation.Lock() + addedInByHash := cache.txByHash.addTx(tx) + addedInBySender, evicted := cache.txListBySender.addTxReturnEvicted(tx) + cache.mutTxOperation.Unlock() + if addedInByHash != addedInBySender { + // This can happen when two go-routines concur to add the same transaction: + // - A adds to "txByHash" + // - B won't add to "txByHash" (duplicate) + // - B adds to "txListBySender" + // - A won't add to "txListBySender" (duplicate) + logAdd.Debug("TxCache.AddTx: slight inconsistency detected:", "tx", tx.TxHash, "sender", tx.Tx.GetSndAddr(), "addedInByHash", addedInByHash, "addedInBySender", addedInBySender) + } + + if len(evicted) > 0 { + logRemove.Trace("TxCache.AddTx with eviction", "sender", tx.Tx.GetSndAddr(), "num evicted txs", len(evicted)) + cache.txByHash.RemoveTxsBulk(evicted) + } + + // The return value "added" is true even if transaction added, but then removed due to limits be sender. + // This it to ensure that onAdded() notification is triggered. + return true, addedInByHash || addedInBySender +} + +// GetByTxHash gets the transaction by hash +func (cache *TxCache) GetByTxHash(txHash []byte) (*WrappedTransaction, bool) { + tx, ok := cache.txByHash.getTx(string(txHash)) + return tx, ok +} + +// SelectTransactions selects the best transactions to be included in the next miniblock. +// It returns up to "maxNum" transactions, with total gas <= "gasRequested". +func (cache *TxCache) SelectTransactions(session SelectionSession, gasRequested uint64, maxNum int, selectionLoopMaximumDuration time.Duration) ([]*WrappedTransaction, uint64) { + if check.IfNil(session) { + log.Error("TxCache.SelectTransactions", "err", errNilSelectionSession) + return nil, 0 + } + + stopWatch := core.NewStopWatch() + stopWatch.Start("selection") + + logSelect.Debug( + "TxCache.SelectTransactions: begin", + "num bytes", cache.NumBytes(), + "num txs", cache.CountTx(), + "num senders", cache.CountSenders(), + ) + + transactions, accumulatedGas := cache.doSelectTransactions(session, gasRequested, maxNum, selectionLoopMaximumDuration) + + stopWatch.Stop("selection") + + logSelect.Debug( + "TxCache.SelectTransactions: end", + "duration", stopWatch.GetMeasurement("selection"), + "num txs selected", len(transactions), + "gas", accumulatedGas, + ) + + go cache.diagnoseCounters() + go displaySelectionOutcome(logSelect, "selection", transactions) + + return transactions, accumulatedGas +} + +func (cache *TxCache) getSenders() []*txListForSender { + return cache.txListBySender.getSenders() +} + +// RemoveTxByHash removes transactions with nonces lower or equal to the given transaction's nonce +func (cache *TxCache) RemoveTxByHash(txHash []byte) bool { + cache.mutTxOperation.Lock() + defer cache.mutTxOperation.Unlock() + + tx, foundInByHash := cache.txByHash.removeTx(string(txHash)) + if !foundInByHash { + // Transaction might have been removed in the meantime. + return false + } + + evicted := cache.txListBySender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx) + if len(evicted) > 0 { + cache.txByHash.RemoveTxsBulk(evicted) + } + + logRemove.Trace("TxCache.RemoveTxByHash", "tx", txHash, "len(evicted)", len(evicted)) + return true +} + +// NumBytes gets the approximate number of bytes stored in the cache +func (cache *TxCache) NumBytes() int { + return int(cache.txByHash.numBytes.GetUint64()) +} + +// CountTx gets the number of transactions in the cache +func (cache *TxCache) CountTx() uint64 { + return cache.txByHash.counter.GetUint64() +} + +// Len is an alias for CountTx +func (cache *TxCache) Len() int { + return int(cache.CountTx()) +} + +// SizeInBytesContained returns 0 +func (cache *TxCache) SizeInBytesContained() uint64 { + return 0 +} + +// CountSenders gets the number of senders in the cache +func (cache *TxCache) CountSenders() uint64 { + return cache.txListBySender.counter.GetUint64() +} + +// ForEachTransaction iterates over the transactions in the cache +func (cache *TxCache) ForEachTransaction(function ForEachTransaction) { + cache.txByHash.forEach(function) +} + +// getAllTransactions returns all transactions in the cache +func (cache *TxCache) getAllTransactions() []*WrappedTransaction { + transactions := make([]*WrappedTransaction, 0, cache.Len()) + + cache.ForEachTransaction(func(_ []byte, tx *WrappedTransaction) { + transactions = append(transactions, tx) + }) + + return transactions +} + +// GetTransactionsPoolForSender returns the list of transaction hashes for the sender +func (cache *TxCache) GetTransactionsPoolForSender(sender string) []*WrappedTransaction { + listForSender, ok := cache.txListBySender.getListForSender(sender) + if !ok { + return nil + } + + return listForSender.getTxs() +} + +// Clear clears the cache +func (cache *TxCache) Clear() { + cache.mutTxOperation.Lock() + cache.txListBySender.clear() + cache.txByHash.clear() + cache.mutTxOperation.Unlock() +} + +// Put is not implemented +func (cache *TxCache) Put(_ []byte, _ interface{}, _ int) (evicted bool) { + log.Error("TxCache.Put is not implemented") + return false +} + +// Get gets a transaction (unwrapped) by hash +// Implemented for compatibility reasons (see txPoolsCleaner.go). +func (cache *TxCache) Get(key []byte) (value interface{}, ok bool) { + tx, ok := cache.GetByTxHash(key) + if ok { + return tx.Tx, true + } + return nil, false +} + +// Has checks if a transaction exists +func (cache *TxCache) Has(key []byte) bool { + _, ok := cache.GetByTxHash(key) + return ok +} + +// Peek gets a transaction (unwrapped) by hash +// Implemented for compatibility reasons (see transactions.go, common.go). +func (cache *TxCache) Peek(key []byte) (value interface{}, ok bool) { + tx, ok := cache.GetByTxHash(key) + if ok { + return tx.Tx, true + } + return nil, false +} + +// HasOrAdd is not implemented +func (cache *TxCache) HasOrAdd(_ []byte, _ interface{}, _ int) (has, added bool) { + log.Error("TxCache.HasOrAdd is not implemented") + return false, false +} + +// Remove removes tx by hash +func (cache *TxCache) Remove(key []byte) { + _ = cache.RemoveTxByHash(key) +} + +// Keys returns the tx hashes in the cache +func (cache *TxCache) Keys() [][]byte { + return cache.txByHash.keys() +} + +// MaxSize returns the maximum number of transactions that can be stored in the cache. +// See: https://github.com/multiversx/mx-chain-go/blob/v1.8.4/dataRetriever/txpool/shardedTxPool.go#L55 +func (cache *TxCache) MaxSize() int { + return int(cache.config.CountThreshold) +} + +// RegisterHandler is not implemented +func (cache *TxCache) RegisterHandler(func(key []byte, value interface{}), string) { + log.Error("TxCache.RegisterHandler is not implemented") +} + +// UnRegisterHandler is not implemented +func (cache *TxCache) UnRegisterHandler(string) { + log.Error("TxCache.UnRegisterHandler is not implemented") +} + +// ImmunizeTxsAgainstEviction does nothing for this type of cache +func (cache *TxCache) ImmunizeTxsAgainstEviction(_ [][]byte) { +} + +// Close does nothing for this cacher implementation +func (cache *TxCache) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cache *TxCache) IsInterfaceNil() bool { + return cache == nil +} diff --git a/txcache/txCache_test.go b/txcache/txCache_test.go new file mode 100644 index 0000000000..c96f70546a --- /dev/null +++ b/txcache/txCache_test.go @@ -0,0 +1,639 @@ +package txcache + +import ( + "errors" + "fmt" + "math" + "sort" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-storage-go/common" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" + "github.com/multiversx/mx-chain-storage-go/types" + "github.com/stretchr/testify/require" +) + +func Test_NewTxCache(t *testing.T) { + config := ConfigSourceMe{ + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 1, + } + + host := txcachemocks.NewMempoolHostMock() + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + badConfig := config + badConfig.Name = "" + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.Name", host) + + badConfig = config + badConfig.NumChunks = 0 + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumChunks", host) + + badConfig = config + badConfig.NumBytesPerSenderThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumBytesPerSenderThreshold", host) + + badConfig = config + badConfig.CountPerSenderThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.CountPerSenderThreshold", host) + + badConfig = config + cache, err = NewTxCache(config, nil) + require.Nil(t, cache) + require.Equal(t, errNilMempoolHost, err) + + badConfig = config + badConfig.NumBytesThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.NumBytesThreshold", host) + + badConfig = config + badConfig.CountThreshold = 0 + requireErrorOnNewTxCache(t, badConfig, common.ErrInvalidConfig, "config.CountThreshold", host) +} + +func requireErrorOnNewTxCache(t *testing.T, config ConfigSourceMe, errExpected error, errPartialMessage string, host MempoolHost) { + cache, errReceived := NewTxCache(config, host) + require.Nil(t, cache) + require.True(t, errors.Is(errReceived, errExpected)) + require.Contains(t, errReceived.Error(), errPartialMessage) +} + +func Test_AddTx(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + tx := createTx([]byte("hash-1"), "alice", 1) + + ok, added := cache.AddTx(tx) + require.True(t, ok) + require.True(t, added) + require.True(t, cache.Has([]byte("hash-1"))) + + // Add it again (no-operation) + ok, added = cache.AddTx(tx) + require.True(t, ok) + require.False(t, added) + require.True(t, cache.Has([]byte("hash-1"))) + + foundTx, ok := cache.GetByTxHash([]byte("hash-1")) + require.True(t, ok) + require.Equal(t, tx, foundTx) +} + +func Test_AddNilTx_DoesNothing(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + txHash := []byte("hash-1") + + ok, added := cache.AddTx(&WrappedTransaction{Tx: nil, TxHash: txHash}) + require.False(t, ok) + require.False(t, added) + + foundTx, ok := cache.GetByTxHash(txHash) + require.False(t, ok) + require.Nil(t, foundTx) +} + +func Test_AddTx_AppliesSizeConstraintsPerSenderForNumTransactions(t *testing.T) { + cache := newCacheToTest(maxNumBytesPerSenderUpperBound, 3) + + cache.AddTx(createTx([]byte("tx-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("tx-alice-2"), "alice", 2)) + cache.AddTx(createTx([]byte("tx-alice-4"), "alice", 4)) + cache.AddTx(createTx([]byte("tx-bob-1"), "bob", 1)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 2)) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) + + cache.AddTx(createTx([]byte("tx-alice-3"), "alice", 3)) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) +} + +func Test_AddTx_AppliesSizeConstraintsPerSenderForNumBytes(t *testing.T) { + cache := newCacheToTest(1024, math.MaxUint32) + + cache.AddTx(createTx([]byte("tx-alice-1"), "alice", 1).withSize(128).withGasLimit(50000)) + cache.AddTx(createTx([]byte("tx-alice-2"), "alice", 2).withSize(512).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-alice-4"), "alice", 3).withSize(256).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-1"), "bob", 1).withSize(512).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 2).withSize(513).withGasLimit(1500000)) + + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-4"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) + + cache.AddTx(createTx([]byte("tx-alice-3"), "alice", 3).withSize(256).withGasLimit(1500000)) + cache.AddTx(createTx([]byte("tx-bob-2"), "bob", 3).withSize(512).withGasLimit(1500000)) + require.Equal(t, []string{"tx-alice-1", "tx-alice-2", "tx-alice-3"}, cache.getHashesForSender("alice")) + require.Equal(t, []string{"tx-bob-1", "tx-bob-2"}, cache.getHashesForSender("bob")) + require.True(t, cache.areInternalMapsConsistent()) +} + +func Test_RemoveByTxHash(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-2"), "alice", 2)) + + removed := cache.RemoveTxByHash([]byte("hash-1")) + require.True(t, removed) + + removed = cache.RemoveTxByHash([]byte("hash-2")) + require.True(t, removed) + + removed = cache.RemoveTxByHash([]byte("hash-3")) + require.False(t, removed) + + foundTx, ok := cache.GetByTxHash([]byte("hash-1")) + require.False(t, ok) + require.Nil(t, foundTx) + + foundTx, ok = cache.GetByTxHash([]byte("hash-2")) + require.False(t, ok) + require.Nil(t, foundTx) + + require.Equal(t, uint64(0), cache.CountTx()) +} + +func Test_CountTx_And_Len(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-2"), "alice", 2)) + cache.AddTx(createTx([]byte("hash-3"), "alice", 3)) + + require.Equal(t, uint64(3), cache.CountTx()) + require.Equal(t, 3, cache.Len()) +} + +func Test_GetByTxHash_And_Peek_And_Get(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + txHash := []byte("hash-1") + tx := createTx(txHash, "alice", 1) + cache.AddTx(tx) + + foundTx, ok := cache.GetByTxHash(txHash) + require.True(t, ok) + require.Equal(t, tx, foundTx) + + foundTxPeek, okPeek := cache.Peek(txHash) + require.True(t, okPeek) + require.Equal(t, tx.Tx, foundTxPeek) + + foundTxPeek, okPeek = cache.Peek([]byte("missing")) + require.False(t, okPeek) + require.Nil(t, foundTxPeek) + + foundTxGet, okGet := cache.Get(txHash) + require.True(t, okGet) + require.Equal(t, tx.Tx, foundTxGet) + + foundTxGet, okGet = cache.Get([]byte("missing")) + require.False(t, okGet) + require.Nil(t, foundTxGet) +} + +func Test_RemoveByTxHash_WhenMissing(t *testing.T) { + cache := newUnconstrainedCacheToTest() + removed := cache.RemoveTxByHash([]byte("missing")) + require.False(t, removed) +} + +func Test_RemoveByTxHash_RemovesFromByHash_WhenMapsInconsistency(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + txHash := []byte("hash-1") + tx := createTx(txHash, "alice", 1) + cache.AddTx(tx) + + // Cause an inconsistency between the two internal maps (theoretically possible in case of misbehaving eviction) + _ = cache.txListBySender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx) + + _ = cache.RemoveTxByHash(txHash) + require.Equal(t, 0, cache.txByHash.backingMap.Count()) +} + +func Test_Clear(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) + cache.AddTx(createTx([]byte("hash-alice-42"), "alice", 42)) + require.Equal(t, uint64(3), cache.CountTx()) + + cache.Clear() + require.Equal(t, uint64(0), cache.CountTx()) +} + +func Test_ForEachTransaction(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("hash-alice-1"), "alice", 1)) + cache.AddTx(createTx([]byte("hash-bob-7"), "bob", 7)) + + counter := 0 + cache.ForEachTransaction(func(txHash []byte, value *WrappedTransaction) { + counter++ + }) + require.Equal(t, 2, counter) +} + +func Test_GetTransactionsPoolForSender(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + txHashes1 := [][]byte{[]byte("hash-1"), []byte("hash-2")} + txSender1 := "alice" + wrappedTxs1 := []*WrappedTransaction{ + createTx(txHashes1[1], txSender1, 2), + createTx(txHashes1[0], txSender1, 1), + } + txHashes2 := [][]byte{[]byte("hash-3"), []byte("hash-4"), []byte("hash-5")} + txSender2 := "bob" + wrappedTxs2 := []*WrappedTransaction{ + createTx(txHashes2[1], txSender2, 4), + createTx(txHashes2[0], txSender2, 3), + createTx(txHashes2[2], txSender2, 5), + } + cache.AddTx(wrappedTxs1[0]) + cache.AddTx(wrappedTxs1[1]) + cache.AddTx(wrappedTxs2[0]) + cache.AddTx(wrappedTxs2[1]) + cache.AddTx(wrappedTxs2[2]) + + sort.Slice(wrappedTxs1, func(i, j int) bool { + return wrappedTxs1[i].Tx.GetNonce() < wrappedTxs1[j].Tx.GetNonce() + }) + txs := cache.GetTransactionsPoolForSender(txSender1) + require.Equal(t, wrappedTxs1, txs) + + sort.Slice(wrappedTxs2, func(i, j int) bool { + return wrappedTxs2[i].Tx.GetNonce() < wrappedTxs2[j].Tx.GetNonce() + }) + txs = cache.GetTransactionsPoolForSender(txSender2) + require.Equal(t, wrappedTxs2, txs) + + _ = cache.RemoveTxByHash(txHashes2[0]) + expectedTxs := wrappedTxs2[1:] + txs = cache.GetTransactionsPoolForSender(txSender2) + require.Equal(t, expectedTxs, txs) +} + +func Test_Keys(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) + cache.AddTx(createTx([]byte("alice-y"), "alice", 43)) + cache.AddTx(createTx([]byte("bob-x"), "bob", 42)) + cache.AddTx(createTx([]byte("bob-y"), "bob", 43)) + + keys := cache.Keys() + require.Equal(t, 4, len(keys)) + require.Contains(t, keys, []byte("alice-x")) + require.Contains(t, keys, []byte("alice-y")) + require.Contains(t, keys, []byte("bob-x")) + require.Contains(t, keys, []byte("bob-y")) +} + +func Test_AddWithEviction_UniformDistributionOfTxsPerSender(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + t.Run("numSenders = 11, numTransactions = 10, countThreshold = 100, numItemsToPreemptivelyEvict = 1", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 1, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 11, 10) + + // Eviction happens if the cache capacity is already exceeded, + // but not if the capacity will be exceeded after the addition. + // Thus, for the given value of "NumItemsToPreemptivelyEvict", there will be "countThreshold" + 1 transactions in the cache. + require.Equal(t, 101, int(cache.CountTx())) + }) + + t.Run("numSenders = 3, numTransactions = 5, countThreshold = 4, numItemsToPreemptivelyEvict = 3", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 4, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 3, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 3, 5) + require.Equal(t, 3, int(cache.CountTx())) + }) + + t.Run("numSenders = 11, numTransactions = 10, countThreshold = 100, numItemsToPreemptivelyEvict = 2", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 100, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 2, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 11, 10) + require.Equal(t, 100, int(cache.CountTx())) + }) + + t.Run("numSenders = 100, numTransactions = 1000, countThreshold = 250000 (no eviction)", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 250000, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 1, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 100, 1000) + require.Equal(t, 100000, int(cache.CountTx())) + }) + + t.Run("numSenders = 1000, numTransactions = 500, countThreshold = 250000, NumItemsToPreemptivelyEvict = 50000", func(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + EvictionEnabled: true, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: 250000, + CountPerSenderThreshold: math.MaxUint32, + NumItemsToPreemptivelyEvict: 10000, + } + + cache, err := NewTxCache(config, host) + require.Nil(t, err) + require.NotNil(t, cache) + + addManyTransactionsWithUniformDistribution(cache, 1000, 500) + require.Equal(t, 250000, int(cache.CountTx())) + }) +} + +func Test_NotImplementedFunctions(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + evicted := cache.Put(nil, nil, 0) + require.False(t, evicted) + + has, added := cache.HasOrAdd(nil, nil, 0) + require.False(t, has) + require.False(t, added) + + require.NotPanics(t, func() { cache.RegisterHandler(nil, "") }) + + err := cache.Close() + require.Nil(t, err) +} + +func Test_IsInterfaceNil(t *testing.T) { + cache := newUnconstrainedCacheToTest() + require.False(t, check.IfNil(cache)) + + makeNil := func() types.Cacher { + return nil + } + + thisIsNil := makeNil() + require.True(t, check.IfNil(thisIsNil)) +} + +func TestTxCache_TransactionIsAdded_EvenWhenInternalMapsAreInconsistent(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + // Setup inconsistency: transaction already exists in map by hash, but not in map by sender + cache.txByHash.addTx(createTx([]byte("alice-x"), "alice", 42)) + + require.Equal(t, 1, cache.txByHash.backingMap.Count()) + require.True(t, cache.Has([]byte("alice-x"))) + ok, added := cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) + require.True(t, ok) + require.True(t, added) + require.Equal(t, uint64(1), cache.CountSenders()) + require.Equal(t, []string{"alice-x"}, cache.getHashesForSender("alice")) + cache.Clear() + + // Setup inconsistency: transaction already exists in map by sender, but not in map by hash + cache.txListBySender.addTxReturnEvicted(createTx([]byte("alice-x"), "alice", 42)) + + require.False(t, cache.Has([]byte("alice-x"))) + ok, added = cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) + require.True(t, ok) + require.True(t, added) + require.Equal(t, uint64(1), cache.CountSenders()) + require.Equal(t, []string{"alice-x"}, cache.getHashesForSender("alice")) + cache.Clear() +} + +func TestTxCache_NoCriticalInconsistency_WhenConcurrentAdditionsAndRemovals(t *testing.T) { + cache := newUnconstrainedCacheToTest() + + // A lot of routines concur to add & remove a transaction + for try := 0; try < 100; try++ { + var wg sync.WaitGroup + + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) + _ = cache.RemoveTxByHash([]byte("alice-x")) + wg.Done() + }() + } + + wg.Wait() + // In this case, there is the slight chance that: + // go A: add to map by hash + // go B: won't add in map by hash, already there + // go A: add to map by sender + // go A: remove from map by hash + // go A: remove from map by sender and delete empty sender + // go B: add to map by sender + // go B: can't remove from map by hash, not found + // go B: won't remove from map by sender (sender unknown) + + // Therefore, the number of senders could be 0 or 1 + require.Equal(t, 0, cache.txByHash.backingMap.Count()) + expectedCountConsistent := 0 + expectedCountSlightlyInconsistent := 1 + actualCount := int(cache.txListBySender.backingMap.Count()) + require.True(t, actualCount == expectedCountConsistent || actualCount == expectedCountSlightlyInconsistent) + + // A further addition works: + cache.AddTx(createTx([]byte("alice-x"), "alice", 42)) + require.True(t, cache.Has([]byte("alice-x"))) + require.Equal(t, []string{"alice-x"}, cache.getHashesForSender("alice")) + } +} + +func TestBenchmarkTxCache_addManyTransactionsWithSameNonce(t *testing.T) { + config := ConfigSourceMe{ + Name: "untitled", + NumChunks: 16, + NumBytesThreshold: 419_430_400, + NumBytesPerSenderThreshold: 12_288_000, + CountThreshold: 300_000, + CountPerSenderThreshold: 5_000, + EvictionEnabled: true, + NumItemsToPreemptivelyEvict: 50_000, + } + + host := txcachemocks.NewMempoolHostMock() + + sw := core.NewStopWatch() + + t.Run("numTransactions = 100 (worst case)", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + numTransactions := 100 + + sw.Start(t.Name()) + + for i := 0; i < numTransactions; i++ { + cache.AddTx(createTx(randomHashes.getItem(i), "alice", 42).withGasPrice(oneBillion + uint64(i))) + } + + sw.Stop(t.Name()) + + require.Equal(t, numTransactions, int(cache.CountTx())) + }) + + t.Run("numTransactions = 1000 (worst case)", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + numTransactions := 1000 + + sw.Start(t.Name()) + + for i := 0; i < numTransactions; i++ { + cache.AddTx(createTx(randomHashes.getItem(i), "alice", 42).withGasPrice(oneBillion + uint64(i))) + } + + sw.Stop(t.Name()) + + require.Equal(t, numTransactions, int(cache.CountTx())) + }) + + t.Run("numTransactions = 5_000 (worst case)", func(t *testing.T) { + cache, err := NewTxCache(config, host) + require.Nil(t, err) + + numTransactions := 5_000 + + sw.Start(t.Name()) + + for i := 0; i < numTransactions; i++ { + cache.AddTx(createTx(randomHashes.getItem(i), "alice", 42).withGasPrice(oneBillion + uint64(i))) + } + + sw.Stop(t.Name()) + + require.Equal(t, numTransactions, int(cache.CountTx())) + }) + + for name, measurement := range sw.GetMeasurementsMap() { + fmt.Printf("%fs (%s)\n", measurement, name) + } + + // (1) + // Vendor ID: GenuineIntel + // Model name: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz + // CPU family: 6 + // Model: 140 + // Thread(s) per core: 2 + // Core(s) per socket: 4 + // + // 0.000120s (TestBenchmarkTxCache_addManyTransactionsWithSameNonce/numTransactions_=_100_(worst_case)) + // 0.002821s (TestBenchmarkTxCache_addManyTransactionsWithSameNonce/numTransactions_=_1000_(worst_case)) + // 0.062260s (TestBenchmarkTxCache_addManyTransactionsWithSameNonce/numTransactions_=_5_000_(worst_case)) +} + +func newUnconstrainedCacheToTest() *TxCache { + host := txcachemocks.NewMempoolHostMock() + + cache, err := NewTxCache(ConfigSourceMe{ + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: maxNumBytesPerSenderUpperBound, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: math.MaxUint32, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + }, host) + if err != nil { + panic(fmt.Sprintf("newUnconstrainedCacheToTest(): %s", err)) + } + + return cache +} + +func newCacheToTest(numBytesPerSenderThreshold uint32, countPerSenderThreshold uint32) *TxCache { + host := txcachemocks.NewMempoolHostMock() + + cache, err := NewTxCache(ConfigSourceMe{ + Name: "test", + NumChunks: 16, + NumBytesThreshold: maxNumBytesUpperBound, + NumBytesPerSenderThreshold: numBytesPerSenderThreshold, + CountThreshold: math.MaxUint32, + CountPerSenderThreshold: countPerSenderThreshold, + EvictionEnabled: false, + NumItemsToPreemptivelyEvict: 1, + }, host) + if err != nil { + panic(fmt.Sprintf("newCacheToTest(): %s", err)) + } + + return cache +} diff --git a/txcache/txListBySenderMap.go b/txcache/txListBySenderMap.go new file mode 100644 index 0000000000..50993268cb --- /dev/null +++ b/txcache/txListBySenderMap.go @@ -0,0 +1,153 @@ +package txcache + +import ( + "sync" + + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-storage-go/txcache/maps" +) + +// txListBySenderMap is a map-like structure for holding and accessing transactions by sender +type txListBySenderMap struct { + backingMap *maps.ConcurrentMap + senderConstraints senderConstraints + counter atomic.Counter + mutex sync.Mutex +} + +// newTxListBySenderMap creates a new instance of TxListBySenderMap +func newTxListBySenderMap( + nChunksHint uint32, + senderConstraints senderConstraints, +) *txListBySenderMap { + backingMap := maps.NewConcurrentMap(nChunksHint) + + return &txListBySenderMap{ + backingMap: backingMap, + senderConstraints: senderConstraints, + } +} + +// addTxReturnEvicted adds a transaction in the map, in the corresponding list (selected by its sender). +// This function returns a boolean indicating whether the transaction was added, and a slice of evicted transaction hashes (upon applying sender-level constraints). +func (txMap *txListBySenderMap) addTxReturnEvicted(tx *WrappedTransaction) (bool, [][]byte) { + sender := string(tx.Tx.GetSndAddr()) + listForSender := txMap.getOrAddListForSender(sender) + + added, evictedHashes := listForSender.AddTx(tx) + return added, evictedHashes +} + +// getOrAddListForSender gets or lazily creates a list (using double-checked locking pattern) +func (txMap *txListBySenderMap) getOrAddListForSender(sender string) *txListForSender { + listForSender, ok := txMap.getListForSender(sender) + if ok { + return listForSender + } + + txMap.mutex.Lock() + defer txMap.mutex.Unlock() + + listForSender, ok = txMap.getListForSender(sender) + if ok { + return listForSender + } + + return txMap.addSender(sender) +} + +func (txMap *txListBySenderMap) getListForSender(sender string) (*txListForSender, bool) { + listForSenderUntyped, ok := txMap.backingMap.Get(sender) + if !ok { + return nil, false + } + + listForSender := listForSenderUntyped.(*txListForSender) + return listForSender, true +} + +func (txMap *txListBySenderMap) addSender(sender string) *txListForSender { + listForSender := newTxListForSender(sender, &txMap.senderConstraints) + + txMap.backingMap.Set(sender, listForSender) + txMap.counter.Increment() + + return listForSender +} + +// removeTransactionsWithLowerOrEqualNonceReturnHashes removes transactions with nonces lower or equal to the given transaction's nonce. +func (txMap *txListBySenderMap) removeTransactionsWithLowerOrEqualNonceReturnHashes(tx *WrappedTransaction) [][]byte { + sender := string(tx.Tx.GetSndAddr()) + + listForSender, ok := txMap.getListForSender(sender) + if !ok { + // This happens when a sender whose transactions were selected for processing is removed from cache in the meantime. + // When it comes to remove one if its transactions due to processing (commited / finalized block), they don't exist in cache anymore. + log.Trace("txListBySenderMap.removeTxReturnEvicted detected slight inconsistency: sender of tx not in cache", "tx", tx.TxHash, "sender", []byte(sender)) + return nil + } + + evicted := listForSender.removeTransactionsWithLowerOrEqualNonceReturnHashes(tx.Tx.GetNonce()) + txMap.removeSenderIfEmpty(listForSender) + return evicted +} + +func (txMap *txListBySenderMap) removeSenderIfEmpty(listForSender *txListForSender) { + if listForSender.IsEmpty() { + txMap.removeSender(listForSender.sender) + } +} + +// Important note: this doesn't remove the transactions from txCache.txByHash. That is the responsibility of the caller (of this function). +func (txMap *txListBySenderMap) removeSender(sender string) bool { + logRemove.Trace("txListBySenderMap.removeSender", "sender", sender) + + _, removed := txMap.backingMap.Remove(sender) + if removed { + txMap.counter.Decrement() + } + + return removed +} + +// RemoveSendersBulk removes senders, in bulk +func (txMap *txListBySenderMap) RemoveSendersBulk(senders []string) uint32 { + numRemoved := uint32(0) + + for _, senderKey := range senders { + if txMap.removeSender(senderKey) { + numRemoved++ + } + } + + return numRemoved +} + +// removeTransactionsWithHigherOrEqualNonce removes transactions with nonces higher or equal to the given nonce. +// Useful for the eviction flow. +func (txMap *txListBySenderMap) removeTransactionsWithHigherOrEqualNonce(accountKey []byte, nonce uint64) { + sender := string(accountKey) + listForSender, ok := txMap.getListForSender(sender) + if !ok { + return + } + + listForSender.removeTransactionsWithHigherOrEqualNonce(nonce) + txMap.removeSenderIfEmpty(listForSender) +} + +func (txMap *txListBySenderMap) getSenders() []*txListForSender { + senders := make([]*txListForSender, 0, txMap.counter.Get()) + + txMap.backingMap.IterCb(func(key string, item interface{}) { + listForSender := item.(*txListForSender) + senders = append(senders, listForSender) + }) + + return senders +} + +func (txMap *txListBySenderMap) clear() { + txMap.backingMap.Clear() + txMap.counter.Set(0) +} diff --git a/txcache/txListBySenderMap_test.go b/txcache/txListBySenderMap_test.go new file mode 100644 index 0000000000..b7f8998d7c --- /dev/null +++ b/txcache/txListBySenderMap_test.go @@ -0,0 +1,104 @@ +package txcache + +import ( + "math" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSendersMap_AddTx_IncrementsCounter(t *testing.T) { + myMap := newSendersMapToTest() + + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", 1)) + myMap.addTxReturnEvicted(createTx([]byte("aa"), "alice", 2)) + myMap.addTxReturnEvicted(createTx([]byte("b"), "bob", 1)) + + // There are 2 senders + require.Equal(t, int64(2), myMap.counter.Get()) +} + +func TestSendersMap_removeTransactionsWithLowerOrEqualNonceReturnHashes_alsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { + myMap := newSendersMapToTest() + + txAlice1 := createTx([]byte("a1"), "alice", 1) + txAlice2 := createTx([]byte("a2"), "alice", 2) + txBob := createTx([]byte("b"), "bob", 1) + + myMap.addTxReturnEvicted(txAlice1) + myMap.addTxReturnEvicted(txAlice2) + myMap.addTxReturnEvicted(txBob) + require.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, uint64(2), myMap.testGetListForSender("alice").countTx()) + require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) + + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txAlice1) + require.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, uint64(1), myMap.testGetListForSender("alice").countTx()) + require.Equal(t, uint64(1), myMap.testGetListForSender("bob").countTx()) + + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txAlice2) + // All alice's transactions have been removed now + require.Equal(t, int64(1), myMap.counter.Get()) + + _ = myMap.removeTransactionsWithLowerOrEqualNonceReturnHashes(txBob) + // Also Bob has no more transactions + require.Equal(t, int64(0), myMap.counter.Get()) +} + +func TestSendersMap_RemoveSender(t *testing.T) { + myMap := newSendersMapToTest() + + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", 1)) + require.Equal(t, int64(1), myMap.counter.Get()) + + // Bob is unknown + myMap.removeSender("bob") + require.Equal(t, int64(1), myMap.counter.Get()) + + myMap.removeSender("alice") + require.Equal(t, int64(0), myMap.counter.Get()) +} + +func TestSendersMap_RemoveSendersBulk_ConcurrentWithAddition(t *testing.T) { + myMap := newSendersMapToTest() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + + for i := 0; i < 100; i++ { + numRemoved := myMap.RemoveSendersBulk([]string{"alice"}) + require.LessOrEqual(t, numRemoved, uint32(1)) + + numRemoved = myMap.RemoveSendersBulk([]string{"bob"}) + require.LessOrEqual(t, numRemoved, uint32(1)) + + numRemoved = myMap.RemoveSendersBulk([]string{"carol"}) + require.LessOrEqual(t, numRemoved, uint32(1)) + } + }() + + wg.Add(100) + for i := 0; i < 100; i++ { + go func(i int) { + myMap.addTxReturnEvicted(createTx([]byte("a"), "alice", uint64(i))) + myMap.addTxReturnEvicted(createTx([]byte("b"), "bob", uint64(i))) + myMap.addTxReturnEvicted(createTx([]byte("c"), "carol", uint64(i))) + + wg.Done() + }(i) + } + + wg.Wait() +} + +func newSendersMapToTest() *txListBySenderMap { + return newTxListBySenderMap(4, senderConstraints{ + maxNumBytes: math.MaxUint32, + maxNumTxs: math.MaxUint32, + }) +} diff --git a/txcache/txListForSender.go b/txcache/txListForSender.go new file mode 100644 index 0000000000..67e4e8b6f0 --- /dev/null +++ b/txcache/txListForSender.go @@ -0,0 +1,242 @@ +package txcache + +import ( + "bytes" + "container/list" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/atomic" +) + +// txListForSender represents a sorted list of transactions of a particular sender +type txListForSender struct { + sender string + items *list.List + totalBytes atomic.Counter + constraints *senderConstraints + + mutex sync.RWMutex +} + +// newTxListForSender creates a new (sorted) list of transactions +func newTxListForSender(sender string, constraints *senderConstraints) *txListForSender { + return &txListForSender{ + items: list.New(), + sender: sender, + constraints: constraints, + } +} + +// AddTx adds a transaction in sender's list +// This is a "sorted" insert +func (listForSender *txListForSender) AddTx(tx *WrappedTransaction) (bool, [][]byte) { + // We don't allow concurrent interceptor goroutines to mutate a given sender's list + listForSender.mutex.Lock() + defer listForSender.mutex.Unlock() + + insertionPlace, err := listForSender.findInsertionPlace(tx) + if err != nil { + return false, nil + } + + if insertionPlace == nil { + listForSender.items.PushFront(tx) + } else { + listForSender.items.InsertAfter(tx, insertionPlace) + } + + listForSender.onAddedTransaction(tx) + + evicted := listForSender.applySizeConstraints() + return true, evicted +} + +// This function should only be used in critical section (listForSender.mutex) +func (listForSender *txListForSender) applySizeConstraints() [][]byte { + evictedTxHashes := make([][]byte, 0) + + // Iterate back to front + for element := listForSender.items.Back(); element != nil; element = element.Prev() { + if !listForSender.isCapacityExceeded() { + break + } + + listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + + // Keep track of removed transactions + value := element.Value.(*WrappedTransaction) + evictedTxHashes = append(evictedTxHashes, value.TxHash) + } + + return evictedTxHashes +} + +func (listForSender *txListForSender) isCapacityExceeded() bool { + maxBytes := int64(listForSender.constraints.maxNumBytes) + maxNumTxs := uint64(listForSender.constraints.maxNumTxs) + tooManyBytes := listForSender.totalBytes.Get() > maxBytes + tooManyTxs := listForSender.countTx() > maxNumTxs + + return tooManyBytes || tooManyTxs +} + +func (listForSender *txListForSender) onAddedTransaction(tx *WrappedTransaction) { + listForSender.totalBytes.Add(tx.Size) +} + +// This function should only be used in critical section (listForSender.mutex). +// When searching for the insertion place, we consider the following rules: +// - transactions are sorted by nonce in ascending order. +// - transactions with the same nonce are sorted by gas price in descending order. +// - transactions with the same nonce and gas price are sorted by hash in ascending order. +// - duplicates are not allowed. +// - "PPU" measurement is not relevant in this context. Competition among transactions of the same sender (and nonce) is based on gas price. +func (listForSender *txListForSender) findInsertionPlace(incomingTx *WrappedTransaction) (*list.Element, error) { + incomingNonce := incomingTx.Tx.GetNonce() + incomingGasPrice := incomingTx.Tx.GetGasPrice() + + // The loop iterates from the back to the front of the list. + // Starting from the back allows the function to quickly find the insertion point for transactions with higher nonces, which are more likely to be added. + for element := listForSender.items.Back(); element != nil; element = element.Prev() { + currentTx := element.Value.(*WrappedTransaction) + currentTxNonce := currentTx.Tx.GetNonce() + currentTxGasPrice := currentTx.Tx.GetGasPrice() + + if currentTxNonce == incomingNonce { + if currentTxGasPrice > incomingGasPrice { + // The case of same nonce, lower gas price. + // We've found an insertion place: right after "element". + return element, nil + } + + if currentTxGasPrice == incomingGasPrice { + // The case of same nonce, same gas price. + + comparison := bytes.Compare(currentTx.TxHash, incomingTx.TxHash) + if comparison == 0 { + // The incoming transaction will be discarded, since it's already in the cache. + return nil, errItemAlreadyInCache + } + if comparison < 0 { + // We've found an insertion place: right after "element". + return element, nil + } + + // We allow the search loop to continue, since the incoming transaction has a "higher hash". + } + + // We allow the search loop to continue, since the incoming transaction has a higher gas price. + continue + } + + if currentTxNonce < incomingNonce { + // We've found the first transaction with a lower nonce than the incoming one, + // thus the incoming transaction will be placed right after this one. + return element, nil + } + + // We allow the search loop to continue, since the incoming transaction has a higher nonce. + } + + // The incoming transaction will be inserted at the head of the list. + return nil, nil +} + +func (listForSender *txListForSender) onRemovedListElement(element *list.Element) { + tx := element.Value.(*WrappedTransaction) + listForSender.totalBytes.Subtract(tx.Size) +} + +// IsEmpty checks whether the list is empty +func (listForSender *txListForSender) IsEmpty() bool { + return listForSender.countTxWithLock() == 0 +} + +// getTxs returns the transactions of the sender +func (listForSender *txListForSender) getTxs() []*WrappedTransaction { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + + result := make([]*WrappedTransaction, 0, listForSender.countTx()) + + for element := listForSender.items.Front(); element != nil; element = element.Next() { + value := element.Value.(*WrappedTransaction) + result = append(result, value) + } + + return result +} + +// getTxsReversed returns the transactions of the sender, in reverse nonce order +func (listForSender *txListForSender) getTxsReversed() []*WrappedTransaction { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + + result := make([]*WrappedTransaction, 0, listForSender.countTx()) + + for element := listForSender.items.Back(); element != nil; element = element.Prev() { + value := element.Value.(*WrappedTransaction) + result = append(result, value) + } + + return result +} + +// This function should only be used in critical section (listForSender.mutex) +func (listForSender *txListForSender) countTx() uint64 { + return uint64(listForSender.items.Len()) +} + +func (listForSender *txListForSender) countTxWithLock() uint64 { + listForSender.mutex.RLock() + defer listForSender.mutex.RUnlock() + return uint64(listForSender.items.Len()) +} + +// removeTransactionsWithLowerOrEqualNonceReturnHashes removes transactions with nonces lower or equal to the given nonce +func (listForSender *txListForSender) removeTransactionsWithLowerOrEqualNonceReturnHashes(targetNonce uint64) [][]byte { + evictedTxHashes := make([][]byte, 0) + + // We don't allow concurrent goroutines to mutate a given sender's list + listForSender.mutex.Lock() + defer listForSender.mutex.Unlock() + + for element := listForSender.items.Front(); element != nil; { + tx := element.Value.(*WrappedTransaction) + txNonce := tx.Tx.GetNonce() + + if txNonce > targetNonce { + break + } + + nextElement := element.Next() + _ = listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + element = nextElement + + // Keep track of removed transactions + evictedTxHashes = append(evictedTxHashes, tx.TxHash) + } + + return evictedTxHashes +} + +func (listForSender *txListForSender) removeTransactionsWithHigherOrEqualNonce(givenNonce uint64) { + listForSender.mutex.Lock() + defer listForSender.mutex.Unlock() + + for element := listForSender.items.Back(); element != nil; { + tx := element.Value.(*WrappedTransaction) + txNonce := tx.Tx.GetNonce() + + if txNonce < givenNonce { + break + } + + prevElement := element.Prev() + _ = listForSender.items.Remove(element) + listForSender.onRemovedListElement(element) + element = prevElement + } +} diff --git a/txcache/txListForSender_test.go b/txcache/txListForSender_test.go new file mode 100644 index 0000000000..da4bbfadb9 --- /dev/null +++ b/txcache/txListForSender_test.go @@ -0,0 +1,197 @@ +package txcache + +import ( + "math" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestListForSender_AddTx_Sorts(t *testing.T) { + list := newUnconstrainedListToTest() + + list.AddTx(createTx([]byte("a"), ".", 1)) + list.AddTx(createTx([]byte("c"), ".", 3)) + list.AddTx(createTx([]byte("d"), ".", 4)) + list.AddTx(createTx([]byte("b"), ".", 2)) + + require.Equal(t, []string{"a", "b", "c", "d"}, list.getTxHashesAsStrings()) +} + +func TestListForSender_AddTx_GivesPriorityToHigherGas(t *testing.T) { + list := newUnconstrainedListToTest() + + list.AddTx(createTx([]byte("a"), ".", 1)) + list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(1.2 * oneBillion)) + list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(1.1 * oneBillion)) + list.AddTx(createTx([]byte("d"), ".", 2)) + list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(1.3 * oneBillion)) + + require.Equal(t, []string{"a", "d", "e", "b", "c"}, list.getTxHashesAsStrings()) +} + +func TestListForSender_AddTx_SortsCorrectlyWhenSameNonceSamePrice(t *testing.T) { + list := newUnconstrainedListToTest() + + list.AddTx(createTx([]byte("a"), ".", 1).withGasPrice(oneBillion)) + list.AddTx(createTx([]byte("b"), ".", 3).withGasPrice(3 * oneBillion)) + list.AddTx(createTx([]byte("c"), ".", 3).withGasPrice(3 * oneBillion)) + list.AddTx(createTx([]byte("d"), ".", 3).withGasPrice(2 * oneBillion)) + list.AddTx(createTx([]byte("e"), ".", 3).withGasPrice(3.5 * oneBillion)) + list.AddTx(createTx([]byte("f"), ".", 2).withGasPrice(oneBillion)) + list.AddTx(createTx([]byte("g"), ".", 3).withGasPrice(2.5 * oneBillion)) + + // In case of same-nonce, same-price transactions, the newer one has priority + require.Equal(t, []string{"a", "f", "e", "b", "c", "g", "d"}, list.getTxHashesAsStrings()) +} + +func TestListForSender_AddTx_IgnoresDuplicates(t *testing.T) { + list := newUnconstrainedListToTest() + + added, _ := list.AddTx(createTx([]byte("tx1"), ".", 1)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx3"), ".", 3)) + require.True(t, added) + added, _ = list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.False(t, added) +} + +func TestListForSender_AddTx_AppliesSizeConstraintsForNumTransactions(t *testing.T) { + list := newListToTest(math.MaxUint32, 3) + + list.AddTx(createTx([]byte("tx1"), ".", 1)) + list.AddTx(createTx([]byte("tx5"), ".", 5)) + list.AddTx(createTx([]byte("tx4"), ".", 4)) + list.AddTx(createTx([]byte("tx2"), ".", 2)) + require.Equal(t, []string{"tx1", "tx2", "tx4"}, list.getTxHashesAsStrings()) + + _, evicted := list.AddTx(createTx([]byte("tx3"), ".", 3)) + require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) + + // Gives priority to higher gas - though undesirable to some extent, "tx3" is evicted + _, evicted = list.AddTx(createTx([]byte("tx2++"), ".", 2).withGasPrice(1.5 * oneBillion)) + require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx3"}, hashesAsStrings(evicted)) + + // Though undesirable to some extent, "tx3++"" is added, then evicted + _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withGasPrice(1.5 * oneBillion)) + require.Equal(t, []string{"tx1", "tx2++", "tx2"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx3++"}, hashesAsStrings(evicted)) +} + +func TestListForSender_AddTx_AppliesSizeConstraintsForNumBytes(t *testing.T) { + list := newListToTest(1024, math.MaxUint32) + + list.AddTx(createTx([]byte("tx1"), ".", 1).withSize(128).withGasLimit(50000)) + list.AddTx(createTx([]byte("tx2"), ".", 2).withSize(512).withGasLimit(1500000)) + list.AddTx(createTx([]byte("tx3"), ".", 3).withSize(256).withGasLimit(1500000)) + _, evicted := list.AddTx(createTx([]byte("tx5"), ".", 4).withSize(256).withGasLimit(1500000)) + require.Equal(t, []string{"tx1", "tx2", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx5"}, hashesAsStrings(evicted)) + + _, evicted = list.AddTx(createTx([]byte("tx5--"), ".", 4).withSize(128).withGasLimit(50000)) + require.Equal(t, []string{"tx1", "tx2", "tx3", "tx5--"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{}, hashesAsStrings(evicted)) + + _, evicted = list.AddTx(createTx([]byte("tx4"), ".", 4).withSize(128).withGasLimit(50000)) + require.Equal(t, []string{"tx1", "tx2", "tx3", "tx4"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx5--"}, hashesAsStrings(evicted)) + + // Gives priority to higher gas - though undesirably to some extent, "tx4" is evicted + _, evicted = list.AddTx(createTx([]byte("tx3++"), ".", 3).withSize(256).withGasLimit(1500000).withGasPrice(1.5 * oneBillion)) + require.Equal(t, []string{"tx1", "tx2", "tx3++", "tx3"}, list.getTxHashesAsStrings()) + require.Equal(t, []string{"tx4"}, hashesAsStrings(evicted)) +} + +func TestListForSender_removeTransactionsWithLowerOrEqualNonceReturnHashes(t *testing.T) { + list := newUnconstrainedListToTest() + + list.AddTx(createTx([]byte("tx-42"), ".", 42)) + list.AddTx(createTx([]byte("tx-43"), ".", 43)) + list.AddTx(createTx([]byte("tx-44"), ".", 44)) + list.AddTx(createTx([]byte("tx-45"), ".", 45)) + + require.Equal(t, 4, list.items.Len()) + + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(43) + require.Equal(t, 2, list.items.Len()) + + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(44) + require.Equal(t, 1, list.items.Len()) + + _ = list.removeTransactionsWithLowerOrEqualNonceReturnHashes(99) + require.Equal(t, 0, list.items.Len()) +} + +func TestListForSender_getTxs(t *testing.T) { + t.Run("without transactions", func(t *testing.T) { + list := newUnconstrainedListToTest() + + require.Len(t, list.getTxs(), 0) + require.Len(t, list.getTxsReversed(), 0) + }) + + t.Run("with transactions", func(t *testing.T) { + list := newUnconstrainedListToTest() + + list.AddTx(createTx([]byte("tx-42"), ".", 42)) + require.Len(t, list.getTxs(), 1) + require.Len(t, list.getTxsReversed(), 1) + + list.AddTx(createTx([]byte("tx-44"), ".", 44)) + require.Len(t, list.getTxs(), 2) + require.Len(t, list.getTxsReversed(), 2) + + list.AddTx(createTx([]byte("tx-43"), ".", 43)) + require.Len(t, list.getTxs(), 3) + require.Len(t, list.getTxsReversed(), 3) + + require.Equal(t, []byte("tx-42"), list.getTxs()[0].TxHash) + require.Equal(t, []byte("tx-43"), list.getTxs()[1].TxHash) + require.Equal(t, []byte("tx-44"), list.getTxs()[2].TxHash) + require.Equal(t, []byte("tx-44"), list.getTxsReversed()[0].TxHash) + require.Equal(t, []byte("tx-43"), list.getTxsReversed()[1].TxHash) + require.Equal(t, []byte("tx-42"), list.getTxsReversed()[2].TxHash) + }) +} + +func TestListForSender_DetectRaceConditions(t *testing.T) { + list := newUnconstrainedListToTest() + + wg := sync.WaitGroup{} + + doOperations := func() { + // These might be called concurrently: + _ = list.IsEmpty() + _ = list.getTxs() + _ = list.getTxsReversed() + _ = list.countTxWithLock() + _, _ = list.AddTx(createTx([]byte("test"), ".", 42)) + + wg.Done() + } + + for i := 0; i < 100; i++ { + wg.Add(1) + go doOperations() + } + + wg.Wait() +} + +func newUnconstrainedListToTest() *txListForSender { + return newListToTest(math.MaxUint32, math.MaxUint32) +} + +func newListToTest(maxNumBytes uint32, maxNumTxs uint32) *txListForSender { + senderConstraints := &senderConstraints{ + maxNumBytes: maxNumBytes, + maxNumTxs: maxNumTxs, + } + + return newTxListForSender(".", senderConstraints) +} diff --git a/txcache/wrappedTransaction.go b/txcache/wrappedTransaction.go new file mode 100644 index 0000000000..d66ddb3963 --- /dev/null +++ b/txcache/wrappedTransaction.go @@ -0,0 +1,69 @@ +package txcache + +import ( + "bytes" + "math/big" + + "github.com/multiversx/mx-chain-core-go/data" +) + +// bunchOfTransactions is a slice of WrappedTransaction pointers +type bunchOfTransactions []*WrappedTransaction + +// WrappedTransaction contains a transaction, its hash and extra information +type WrappedTransaction struct { + Tx data.TransactionHandler + TxHash []byte + SenderShardID uint32 + ReceiverShardID uint32 + Size int64 + + // These fields are only set within "precomputeFields". + // We don't need to protect them with a mutex, since "precomputeFields" is called only once for each transaction. + // Additional note: "WrappedTransaction" objects are created by the Node, in dataRetriever/txpool/shardedTxPool.go. + Fee *big.Int + PricePerUnit uint64 + TransferredValue *big.Int + FeePayer []byte +} + +// precomputeFields computes (and caches) the (average) price per gas unit. +func (wrappedTx *WrappedTransaction) precomputeFields(host MempoolHost) { + wrappedTx.Fee = host.ComputeTxFee(wrappedTx.Tx) + + gasLimit := wrappedTx.Tx.GetGasLimit() + if gasLimit != 0 { + wrappedTx.PricePerUnit = wrappedTx.Fee.Uint64() / gasLimit + } + + wrappedTx.TransferredValue = host.GetTransferredValue(wrappedTx.Tx) + wrappedTx.FeePayer = wrappedTx.decideFeePayer() +} + +func (wrappedTx *WrappedTransaction) decideFeePayer() []byte { + asRelayed, ok := wrappedTx.Tx.(data.RelayedTransactionHandler) + if ok && len(asRelayed.GetRelayerAddr()) > 0 { + return asRelayed.GetRelayerAddr() + } + + return wrappedTx.Tx.GetSndAddr() +} + +// Equality is out of scope (not possible in our case). +func (wrappedTx *WrappedTransaction) isTransactionMoreValuableForNetwork(otherTransaction *WrappedTransaction) bool { + // First, compare by PPU (higher PPU is better). + if wrappedTx.PricePerUnit != otherTransaction.PricePerUnit { + return wrappedTx.PricePerUnit > otherTransaction.PricePerUnit + } + + // If PPU is the same, compare by gas limit (higher gas limit is better, promoting less "execution fragmentation"). + gasLimit := wrappedTx.Tx.GetGasLimit() + gasLimitOther := otherTransaction.Tx.GetGasLimit() + + if gasLimit != gasLimitOther { + return gasLimit > gasLimitOther + } + + // In the end, compare by transaction hash + return bytes.Compare(wrappedTx.TxHash, otherTransaction.TxHash) < 0 +} diff --git a/txcache/wrappedTransaction_test.go b/txcache/wrappedTransaction_test.go new file mode 100644 index 0000000000..49c0c3ed4c --- /dev/null +++ b/txcache/wrappedTransaction_test.go @@ -0,0 +1,142 @@ +package txcache + +import ( + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-storage-go/testscommon/txcachemocks" + "github.com/stretchr/testify/require" +) + +func TestWrappedTransaction_precomputeFields(t *testing.T) { + t.Run("only move balance gas limit", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + tx := createTx([]byte("a"), "a", 1).withValue(oneQuintillionBig).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) + tx.precomputeFields(host) + + require.Equal(t, "51500000000000", tx.Fee.String()) + require.Equal(t, oneBillion, int(tx.PricePerUnit)) + require.Equal(t, "1000000000000000000", tx.TransferredValue.String()) + require.Equal(t, []byte("a"), tx.FeePayer) + }) + + t.Run("move balance gas limit and execution gas limit (a)", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + tx := createTx([]byte("b"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) + tx.precomputeFields(host) + + require.Equal(t, "51500010000000", tx.Fee.String()) + require.Equal(t, 999_980_777, int(tx.PricePerUnit)) + require.Equal(t, []byte("b"), tx.FeePayer) + }) + + t.Run("move balance gas limit and execution gas limit (b)", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + tx := createTx([]byte("c"), "c", 1).withDataLength(1).withGasLimit(oneMilion).withGasPrice(oneBillion) + tx.precomputeFields(host) + + actualFee := 51500*oneBillion + (oneMilion-51500)*oneBillion/100 + require.Equal(t, "60985000000000", tx.Fee.String()) + require.Equal(t, 60_985_000_000_000, actualFee) + require.Equal(t, actualFee/oneMilion, int(tx.PricePerUnit)) + require.Equal(t, []byte("c"), tx.FeePayer) + }) + + t.Run("with guardian", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + tx := createTx([]byte("a"), "a", 1).withValue(oneQuintillionBig) + tx.precomputeFields(host) + + require.Equal(t, "50000000000000", tx.Fee.String()) + require.Equal(t, oneBillion, int(tx.PricePerUnit)) + require.Equal(t, "1000000000000000000", tx.TransferredValue.String()) + require.Equal(t, []byte("a"), tx.FeePayer) + }) + + t.Run("with nil transferred value", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + tx := createTx([]byte("a"), "a", 1) + tx.precomputeFields(host) + + require.Nil(t, tx.TransferredValue) + require.Equal(t, []byte("a"), tx.FeePayer) + }) + + t.Run("queries host", func(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + host.ComputeTxFeeCalled = func(_ data.TransactionWithFeeHandler) *big.Int { + return big.NewInt(42) + } + host.GetTransferredValueCalled = func(_ data.TransactionHandler) *big.Int { + return big.NewInt(43) + } + + tx := createTx([]byte("a"), "a", 1).withGasLimit(50_000) + tx.precomputeFields(host) + + require.Equal(t, "42", tx.Fee.String()) + require.Equal(t, "43", tx.TransferredValue.String()) + }) +} + +func TestWrappedTransaction_decideFeePayer(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + t.Run("when sender is fee payer", func(t *testing.T) { + tx := createTx([]byte("a"), "a", 1) + tx.precomputeFields(host) + + require.Nil(t, tx.TransferredValue) + require.Equal(t, []byte("a"), tx.FeePayer) + }) + + t.Run("when relayer is fee payer", func(t *testing.T) { + tx := createTx([]byte("a"), "a", 1).withRelayer([]byte("b")).withGasLimit(100_000) + tx.precomputeFields(host) + + require.Nil(t, tx.TransferredValue) + require.Equal(t, []byte("b"), tx.FeePayer) + }) +} + +func TestWrappedTransaction_isTransactionMoreValuableForNetwork(t *testing.T) { + host := txcachemocks.NewMempoolHostMock() + + t.Run("decide by price per unit", func(t *testing.T) { + a := createTx([]byte("a-1"), "a", 1).withDataLength(1).withGasLimit(51500).withGasPrice(oneBillion) + a.precomputeFields(host) + + b := createTx([]byte("b-1"), "b", 1).withDataLength(1).withGasLimit(51501).withGasPrice(oneBillion) + b.precomputeFields(host) + + require.True(t, a.isTransactionMoreValuableForNetwork(b)) + }) + + t.Run("decide by gas limit (set them up to have the same PPU)", func(t *testing.T) { + a := createTx([]byte("a-7"), "a", 7).withDataLength(30).withGasLimit(95_000).withGasPrice(oneBillion) + a.precomputeFields(host) + + b := createTx([]byte("b-7"), "b", 7).withDataLength(60).withGasLimit(140_000).withGasPrice(oneBillion) + b.precomputeFields(host) + + require.Equal(t, a.PricePerUnit, b.PricePerUnit) + require.True(t, b.isTransactionMoreValuableForNetwork(a)) + }) + + t.Run("decide by transaction hash (set them up to have the same PPU and gas limit)", func(t *testing.T) { + a := createTx([]byte("a-7"), "a", 7) + a.precomputeFields(host) + + b := createTx([]byte("b-7"), "b", 7) + b.precomputeFields(host) + + require.Equal(t, a.PricePerUnit, b.PricePerUnit) + require.True(t, a.isTransactionMoreValuableForNetwork(b)) + }) +}