From 79c6f704b7303259debc16c97690db389cdfadbd Mon Sep 17 00:00:00 2001 From: nicholaspai <9457025+nicholaspai@users.noreply.github.com> Date: Mon, 16 Dec 2024 15:21:09 -0500 Subject: [PATCH] fix(Dataworker): Account for total required netSendAmount when executing PoolRebalanceLeaves (#1933) * fix(Dataworker): Update balanceAllocator properly when executing PoolRebalanceLeaves ## Context The dataworker executor functionality is supposed to detect when to call `sync()` before executing L1 PoolRebalance and RelayerRefund leaves depending on the `liquidReserves` value of l1 tokens before executing those leaves. We use pass around the `balanceAllocator` when simulating execution of the [PoolRebalanceLeaves](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1491) and the [RelayerRefundLeaves](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1512) in order to keep track of how many L1 tokens are withdrawn from and deposited to the HubPool following Hub-chain leaf executions. This way, we can use the `balanceAllocator` in [this function](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1531) to detect when we're not going to have enough funds in LP reserves to execute a leaf [here](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1823). ## Problem The problem is that when accounting for PoolRebalanceLeaf executions, we were ADDING not subtracting balance to the balanceAllocator's count of the hubpool's reserves. This means that if the current `liquidReserves` were good enough to cover execution of the Ethereum PoolRebalanceLeaf [here](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1484), then the balance allocator would accidentally inflate the HubPool's balance [here](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1488). This function [here](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1529C40-L1529C92) would then have issues. Within this function, if any individual PoolRebalanceLeaf's `netSendAmount` was less than its liquid reserves, then a `sync` would be skipped [here](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1802). This [line](https://github.com/across-protocol/relayer/blob/3e9c1e4108568b39fe007f8fcd71721db4bbe090/src/dataworker/Dataworker.ts#L1822) would then artificially inflate the hub pool's balance in the balance allocator, leading to a much more down stream simulation error when the pool rebalance leaf execution fails for unknown reasons. ## Examples of problems We saw a bundle of PoolRebalanceLeaves today fail to execute because three of the leaves, one Ethereum leaf and two non-Ethereum leaves, had a total `netSendAmount` greater than the HubPool's `liquidReserves`, but each individually had a `netSendAmount` < the `liquidReserves`. For example, the three leaves had `netSendAmounts` of: - 40 - 90 - 70 While the hubPool's liquidReserves was 180: - 40 + 90 + 70 = 200 > 180 - 40 < 180 - 90 < 180 - 70 < 180 If you take these numbers and run them through the `executePoolRebalanceLeaves` code above, you'll see how a PoolRebalanceLeaf execution was able to be submitted but then fail in simulation, without preceding the leaf executions with a `sync` transaction. * fix issue * Update Dataworker.executePoolRebalances.ts * Add more test cases * Update Dataworker.executePoolRebalances.ts * Update Monitor.ts * comment on tests * make test better * Add orbit-fee handling, remove balance allocator * Fix balanceAllocator call in _executePoolRebalanceLeaves * throw error if can't fund the DonationBox or loadEthForL2Calls call * add lifecycle test Signed-off-by: nicholaspai * Exit early if aggregate net send amount == 0 * Update Dataworker.executePoolRebalances.ts * Update log in _updateExchangeRatesBeforeExecutingNonHubChainLeaves when skipping exchange rate update early * Update Dataworker.ts * Fund more AZERO whenever we're short * remove hardcodes * Improve logs about lookback window being too short * Improve logs on funding orbit chain message * Update Dataworker.customSpokePoolClients.ts * Update index.ts * Update index.ts * Add invariant unit test * Remove l1 tokens with 0 net send amounts from _updateOldExchangeRates * Rename to amountWei * Refactor blockRangesAreInvalid to internal helper func * Squash feeData * Update src/dataworker/Dataworker.ts Co-authored-by: Paul <108695806+pxrl@users.noreply.github.com> * Update src/dataworker/Dataworker.ts Co-authored-by: Paul <108695806+pxrl@users.noreply.github.com> * Update src/dataworker/Dataworker.ts Co-authored-by: Paul <108695806+pxrl@users.noreply.github.com> * result * Add unit testing about exiting early if leaves are already executed * Add ability for some nonHubChain leaves to be executed even if they all cannot * Skip mainnet leaf execution if we cannot execute instead of throwing * Skip sync in _updateExchangeRatesBeforeExecutingNonHubChainLeaves if liquid reserves won't increase * refactor block range pretty printing * update comments * Add assert error messages * Add _getSpokeBalanceForL2Tokens helper and add to logs * Re-add balance allocator * Update Dataworker.executeRelayerRefunds.ts * Update Dataworker.ts * Remove canExecute return value from _updateExchangeRatesBeforeExecutingHubChainLeaves * Update Dataworker.executePoolRebalances.ts * Update Dataworker.executePoolRebalances.ts * Refactor error log * Clean up logs * Consider state of liquid reserves following eth pool rebalance leaf executions * Improve tests * Update name * Add unit test, split executePoolRebalanceLeaf tests in two files to take advantage of parallel test runs in CI * Remove SIMULATE_L1_EXECUTION * Add test for amountToReturn * add tests * Add test about hub chain slow fill leaves * Update BalanceAllocator.ts Co-authored-by: Paul <108695806+pxrl@users.noreply.github.com> * Update Dataworker.ts Co-authored-by: Paul <108695806+pxrl@users.noreply.github.com> * change blockRangesAreInvalidForSpokeClients to return list of chain ID's that are invalid; add DISABLED_CHAINS unit tests to BundleDataClient unit test files Signed-off-by: nicholaspai --------- Signed-off-by: nicholaspai Co-authored-by: Paul <108695806+pxrl@users.noreply.github.com> --- src/clients/BalanceAllocator.ts | 12 + src/common/Constants.ts | 28 + src/dataworker/Dataworker.ts | 731 +++++++++------ src/dataworker/DataworkerUtils.ts | 139 +-- src/dataworker/index.ts | 4 + test/BalanceAllocator.ts | 6 + test/Dataworker.blockRangeUtils.ts | 262 +++--- test/Dataworker.customSpokePoolClients.ts | 2 +- test/Dataworker.executePoolRebalanceUtils.ts | 901 ++++++++++++++++++ test/Dataworker.executePoolRebalances.ts | 933 +++++++++++-------- test/Dataworker.executeRelayerRefunds.ts | 34 +- test/Dataworker.loadData.fill.ts | 45 + test/Dataworker.loadData.slowFill.ts | 40 + test/Monitor.ts | 2 +- test/fixtures/Dataworker.Fixture.ts | 7 +- test/utils/utils.ts | 4 + 16 files changed, 2302 insertions(+), 848 deletions(-) create mode 100644 test/Dataworker.executePoolRebalanceUtils.ts diff --git a/src/clients/BalanceAllocator.ts b/src/clients/BalanceAllocator.ts index 666dd6f5e..c74bd7d01 100644 --- a/src/clients/BalanceAllocator.ts +++ b/src/clients/BalanceAllocator.ts @@ -97,6 +97,12 @@ export class BalanceAllocator { return this.requestBalanceAllocations([{ chainId, tokens, holder, amount }]); } + async getBalanceSubUsed(chainId: number, token: string, holder: string): Promise { + const balance = await this.getBalance(chainId, token, holder); + const used = this.getUsed(chainId, token, holder); + return balance.sub(used); + } + async getBalance(chainId: number, token: string, holder: string): Promise { if (!this.balances?.[chainId]?.[token]?.[holder]) { const balance = await this._queryBalance(chainId, token, holder); @@ -114,6 +120,12 @@ export class BalanceAllocator { return this.balances[chainId][token][holder]; } + testSetBalance(chainId: number, token: string, holder: string, balance: BigNumber): void { + this.balances[chainId] ??= {}; + this.balances[chainId][token] ??= {}; + this.balances[chainId][token][holder] = balance; + } + getUsed(chainId: number, token: string, holder: string): BigNumber { if (!this.used?.[chainId]?.[token]?.[holder]) { // Note: cannot use assign because it breaks the BigNumber object. diff --git a/src/common/Constants.ts b/src/common/Constants.ts index b6dc7d6c7..fb554b217 100644 --- a/src/common/Constants.ts +++ b/src/common/Constants.ts @@ -611,3 +611,31 @@ export const DEFAULT_GAS_MULTIPLIER: { [chainId: number]: number } = { }; export const CONSERVATIVE_BUNDLE_FREQUENCY_SECONDS = 3 * 60 * 60; // 3 hours is a safe assumption for the time + +export const ARBITRUM_ORBIT_L1L2_MESSAGE_FEE_DATA: { + [chainId: number]: { + // Amount of tokens required to send a single message to the L2 + amountWei: number; + // Multiple of the required amount above to send to the feePayer in case + // we are short funds. For example, if set to 10, then everytime we need to load more funds + // we'll send 10x the required amount. + amountMultipleToFund: number; + // Account that pays the fees on-chain that we will load more fee tokens into. + feePayer?: string; + // Token that the feePayer will pay the fees in. + feeToken?: string; + }; +} = { + // Leave feePayer undefined if feePayer is HubPool. + // Leave feeToken undefined if feeToken is ETH. + [CHAIN_IDs.ARBITRUM]: { + amountWei: 0.02, + amountMultipleToFund: 1, + }, + [CHAIN_IDs.ALEPH_ZERO]: { + amountWei: 0.49, + amountMultipleToFund: 20, + feePayer: "0x0d57392895Db5aF3280e9223323e20F3951E81B1", // DonationBox + feeToken: TOKEN_SYMBOLS_MAP.AZERO.addresses[CHAIN_IDs.MAINNET], + }, +}; diff --git a/src/dataworker/Dataworker.ts b/src/dataworker/Dataworker.ts index 0aca04370..f3db13fdc 100644 --- a/src/dataworker/Dataworker.ts +++ b/src/dataworker/Dataworker.ts @@ -18,6 +18,7 @@ import { getWidestPossibleExpectedBlockRange, getEndBlockBuffers, _buildPoolRebalanceRoot, + ERC20, } from "../utils"; import { ProposedRootBundle, @@ -37,12 +38,13 @@ import { blockRangesAreInvalidForSpokeClients, getBlockRangeForChain, getImpliedBundleBlockRanges, + InvalidBlockRange, l2TokensToCountTowardsSpokePoolLeafExecutionCapital, persistDataToArweave, } from "../dataworker/DataworkerUtils"; import { _buildRelayerRefundRoot, _buildSlowRelayRoot } from "./DataworkerUtils"; import _ from "lodash"; -import { CONTRACT_ADDRESSES, spokePoolClientsToProviders } from "../common"; +import { ARBITRUM_ORBIT_L1L2_MESSAGE_FEE_DATA, CONTRACT_ADDRESSES, spokePoolClientsToProviders } from "../common"; import * as sdk from "@across-protocol/sdk"; import { BundleData, @@ -309,24 +311,19 @@ export class Dataworker { // Exit early if spoke pool clients don't have early enough event data to satisfy block ranges for the // potential proposal - if ( - Object.keys(earliestBlocksInSpokePoolClients).length > 0 && - (await blockRangesAreInvalidForSpokeClients( - spokePoolClients, - blockRangesForProposal, - chainIds, - earliestBlocksInSpokePoolClients, - this.isV3(mainnetBlockRange[0]) - )) - ) { + const invalidBlockRanges = await this._validateBlockRanges( + spokePoolClients, + blockRangesForProposal, + chainIds, + earliestBlocksInSpokePoolClients, + this.isV3(mainnetBlockRange[0]) + ); + if (invalidBlockRanges.length > 0) { this.logger.warn({ at: "Dataworke#propose", message: "Cannot propose bundle with insufficient event data. Set a larger DATAWORKER_FAST_LOOKBACK_COUNT", - rootBundleRanges: blockRangesForProposal, - earliestBlocksInSpokePoolClients, - spokeClientsEventSearchConfigs: Object.fromEntries( - Object.entries(spokePoolClients).map(([chainId, client]) => [chainId, client.eventSearchConfig]) - ), + invalidBlockRanges, + bundleBlockRanges: this._prettifyBundleBlockRanges(chainIds, blockRangesForProposal), }); return; } @@ -836,25 +833,19 @@ export class Dataworker { // Exit early if spoke pool clients don't have early enough event data to satisfy block ranges for the // pending proposal. Log an error loudly so that user knows that disputer needs to increase its lookback. - if ( - Object.keys(earliestBlocksInSpokePoolClients).length > 0 && - (await blockRangesAreInvalidForSpokeClients( - spokePoolClients, - blockRangesImpliedByBundleEndBlocks, - chainIds, - earliestBlocksInSpokePoolClients, - this.isV3(mainnetBlockRange[0]) - )) - ) { - this.logger.debug({ + const invalidBlockRanges = await this._validateBlockRanges( + spokePoolClients, + blockRangesImpliedByBundleEndBlocks, + chainIds, + earliestBlocksInSpokePoolClients, + this.isV3(mainnetBlockRange[0]) + ); + if (invalidBlockRanges.length > 0) { + this.logger.warn({ at: "Dataworke#validate", message: "Cannot validate bundle with insufficient event data. Set a larger DATAWORKER_FAST_LOOKBACK_COUNT", - rootBundleRanges: blockRangesImpliedByBundleEndBlocks, - availableSpokePoolClients: Object.keys(spokePoolClients), - earliestBlocksInSpokePoolClients, - spokeClientsEventSearchConfigs: Object.fromEntries( - Object.entries(spokePoolClients).map(([chainId, client]) => [chainId, client.eventSearchConfig]) - ), + invalidBlockRanges, + bundleBlockRanges: this._prettifyBundleBlockRanges(chainIds, blockRangesImpliedByBundleEndBlocks), }); return { valid: false, @@ -1074,27 +1065,20 @@ export class Dataworker { ); const mainnetBlockRange = blockNumberRanges[0]; const chainIds = this.clients.configStoreClient.getChainIdIndicesForBlock(mainnetBlockRange[0]); - if ( - Object.keys(earliestBlocksInSpokePoolClients).length > 0 && - (await blockRangesAreInvalidForSpokeClients( - spokePoolClients, - blockNumberRanges, - chainIds, - earliestBlocksInSpokePoolClients, - this.isV3(mainnetBlockRange[0]) - )) - ) { + const invalidBlockRanges = await this._validateBlockRanges( + spokePoolClients, + blockNumberRanges, + chainIds, + earliestBlocksInSpokePoolClients, + this.isV3(mainnetBlockRange[0]) + ); + if (invalidBlockRanges.length > 0) { this.logger.warn({ at: "Dataworke#executeSlowRelayLeaves", message: "Cannot validate bundle with insufficient event data. Set a larger DATAWORKER_FAST_LOOKBACK_COUNT", - chainId, - rootBundleRanges: blockNumberRanges, - availableSpokePoolClients: Object.keys(spokePoolClients), - earliestBlocksInSpokePoolClients, - spokeClientsEventSearchConfigs: Object.fromEntries( - Object.entries(spokePoolClients).map(([chainId, client]) => [chainId, client.eventSearchConfig]) - ), + invalidBlockRanges, + bundleTxn: matchingRootBundle.transactionHash, }); continue; } @@ -1279,6 +1263,12 @@ export class Dataworker { chainId: destinationChainId, token: outputToken, amount: outputAmount, + spokeBalance: await this._getSpokeBalanceForL2Tokens( + balanceAllocator, + destinationChainId, + outputToken, + client.spokePool.address + ), }); } @@ -1356,7 +1346,7 @@ export class Dataworker { submitExecution = true, earliestBlocksInSpokePoolClients: { [chainId: number]: number } = {} ): Promise { - let leafCount = 0; + const leafCount = 0; this.logger.debug({ at: "Dataworker#executePoolRebalanceLeaves", message: "Executing pool rebalance leaves", @@ -1463,6 +1453,32 @@ export class Dataworker { return leafCount; } + return this._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + unexecutedLeaves, + expectedTrees.poolRebalanceTree.tree, + expectedTrees.relayerRefundTree.leaves, + expectedTrees.relayerRefundTree.tree, + expectedTrees.slowRelayTree.leaves, + expectedTrees.slowRelayTree.tree, + submitExecution + ); + } + + async _executePoolLeavesAndSyncL1Tokens( + spokePoolClients: { [chainId: number]: SpokePoolClient }, + balanceAllocator: BalanceAllocator, + poolLeaves: PoolRebalanceLeaf[], + poolRebalanceTree: MerkleTree, + relayerRefundLeaves: RelayerRefundLeaf[], + relayerRefundTree: MerkleTree, + slowFillLeaves: SlowFillLeaf[], + slowFillTree: MerkleTree, + submitExecution: boolean + ): Promise { + const hubPoolChainId = this.clients.hubPoolClient.chainId; + // There are three times that we should look to update the HubPool's liquid reserves: // 1. First, before we attempt to execute the HubChain PoolRebalance leaves and RelayerRefund leaves. // We should see if there are new liquid reserves we need to account for before sending out these @@ -1473,15 +1489,21 @@ export class Dataworker { // back from the Ethereum RelayerRefundLeaves. // 3. Third, we haven't updated the exchange rate for an L1 token on a PoolRebalanceLeaf in a while that // we're going to execute, so we should batch in an update. - let updatedLiquidReserves: Record = {}; + + // Keep track of the HubPool.pooledTokens.liquidReserves state value before entering into any possible + // LP token update. This way we can efficiently update LP liquid reserves values if and only if we need to do so + // to execute a pool leaf. + let latestLiquidReserves: Record = {}; + let leafCount = 0; // First, execute mainnet pool rebalance leaves. Then try to execute any relayer refund and slow leaves for the // expected relayed root hash, then proceed with remaining pool rebalance leaves. This is an optimization that // takes advantage of the fact that mainnet transfers between HubPool and SpokePool are atomic. - const mainnetLeaves = unexecutedLeaves.filter((leaf) => leaf.chainId === hubPoolChainId); + const mainnetLeaves = poolLeaves.filter((leaf) => leaf.chainId === hubPoolChainId); if (mainnetLeaves.length > 0) { - assert(mainnetLeaves.length === 1); - updatedLiquidReserves = await this._updateExchangeRatesBeforeExecutingHubChainLeaves( + assert(mainnetLeaves.length === 1, "There should only be one Ethereum PoolRebalanceLeaf"); + latestLiquidReserves = await this._updateExchangeRatesBeforeExecutingHubChainLeaves( + balanceAllocator, mainnetLeaves[0], submitExecution ); @@ -1489,7 +1511,7 @@ export class Dataworker { spokePoolClients, mainnetLeaves, balanceAllocator, - expectedTrees.poolRebalanceTree.tree, + poolRebalanceTree, submitExecution ); @@ -1497,21 +1519,21 @@ export class Dataworker { // will be relayed after executing the above pool rebalance root. const nextRootBundleIdForMainnet = spokePoolClients[hubPoolChainId].getLatestRootBundleId(); - // Now, execute refund and slow fill leaves for Mainnet using new funds. These methods will return early if there + // Now, execute refund and slow fill leaves for Mainnet using any new funds. These methods will return early if there // are no relevant leaves to execute. await this._executeSlowFillLeaf( - expectedTrees.slowRelayTree.leaves.filter((leaf) => leaf.chainId === hubPoolChainId), + slowFillLeaves.filter((leaf) => leaf.chainId === hubPoolChainId), balanceAllocator, spokePoolClients[hubPoolChainId], - expectedTrees.slowRelayTree.tree, + slowFillTree, submitExecution, nextRootBundleIdForMainnet ); await this._executeRelayerRefundLeaves( - expectedTrees.relayerRefundTree.leaves.filter((leaf) => leaf.chainId === hubPoolChainId), + relayerRefundLeaves.filter((leaf) => leaf.chainId === hubPoolChainId), balanceAllocator, spokePoolClients[hubPoolChainId], - expectedTrees.relayerRefundTree.tree, + relayerRefundTree, submitExecution, nextRootBundleIdForMainnet ); @@ -1519,30 +1541,29 @@ export class Dataworker { // Before executing the other pool rebalance leaves, see if we should update any exchange rates to account for // any tokens returned to the hub pool via the EthereumSpokePool that we'll need to use to execute - // any of the remaining pool rebalance leaves. This might include tokens we've already enqueued to update - // in the previous step, but this captures any tokens that are sent back from the Ethereum_SpokePool to the - // HubPool that we want to capture an increased liquidReserves for. - const nonHubChainPoolRebalanceLeaves = unexecutedLeaves.filter((leaf) => leaf.chainId !== hubPoolChainId); + // any of the remaining pool rebalance leaves. This is also important if we failed to execute + // the mainnet leaf and haven't enqueued a sync call that could be used to execute some of the other leaves. + const nonHubChainPoolRebalanceLeaves = poolLeaves.filter((leaf) => leaf.chainId !== hubPoolChainId); if (nonHubChainPoolRebalanceLeaves.length === 0) { return leafCount; } - const updatedL1Tokens = await this._updateExchangeRatesBeforeExecutingNonHubChainLeaves( - updatedLiquidReserves, + const syncedL1Tokens = await this._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + latestLiquidReserves, balanceAllocator, nonHubChainPoolRebalanceLeaves, submitExecution ); - Object.keys(updatedLiquidReserves).forEach((token) => { - if (!updatedL1Tokens.has(token)) { - updatedL1Tokens.add(token); + Object.keys(latestLiquidReserves).forEach((token) => { + if (!syncedL1Tokens.has(token)) { + syncedL1Tokens.add(token); } }); // Save all L1 tokens that we haven't updated exchange rates for in a different step. - const l1TokensWithPotentiallyOlderUpdate = expectedTrees.poolRebalanceTree.leaves.reduce((l1TokenSet, leaf) => { + const l1TokensWithPotentiallyOlderUpdate = poolLeaves.reduce((l1TokenSet, leaf) => { const currLeafL1Tokens = leaf.l1Tokens; - currLeafL1Tokens.forEach((l1Token) => { - if (!l1TokenSet.includes(l1Token) && !updatedL1Tokens.has(l1Token)) { + currLeafL1Tokens.forEach((l1Token, i) => { + if (leaf.netSendAmounts[i].gt(0) && !l1TokenSet.includes(l1Token) && !syncedL1Tokens.has(l1Token)) { l1TokenSet.push(l1Token); } }); @@ -1550,112 +1571,166 @@ export class Dataworker { }, []); await this._updateOldExchangeRates(l1TokensWithPotentiallyOlderUpdate, submitExecution); - // Perform similar funding checks for remaining non-mainnet pool rebalance leaves. + // Figure out which non-mainnet pool rebalance leaves we can execute and execute them: leafCount += await this._executePoolRebalanceLeaves( spokePoolClients, nonHubChainPoolRebalanceLeaves, balanceAllocator, - expectedTrees.poolRebalanceTree.tree, + poolRebalanceTree, submitExecution ); return leafCount; } + async _getExecutablePoolRebalanceLeaves( + poolLeaves: PoolRebalanceLeaf[], + balanceAllocator: BalanceAllocator + ): Promise { + // We evaluate these leaves iteratively rather than in parallel so we can keep track + // of the used balances after "executing" each leaf. + const executableLeaves: PoolRebalanceLeaf[] = []; + for (const leaf of poolLeaves) { + // We can evaluate the l1 tokens within the leaf in parallel because we can assume + // that there are not duplicate L1 tokens within the leaf. + const isExecutable = await sdkUtils.everyAsync(leaf.l1Tokens, async (l1Token, i) => { + const netSendAmountForLeaf = leaf.netSendAmounts[i]; + if (netSendAmountForLeaf.lte(0)) { + return true; + } + const hubChainId = this.clients.hubPoolClient.chainId; + const hubPoolAddress = this.clients.hubPoolClient.hubPool.address; + const success = await balanceAllocator.requestBalanceAllocation( + hubChainId, + [l1Token], + hubPoolAddress, + netSendAmountForLeaf + ); + return success; + }); + if (isExecutable) { + executableLeaves.push(leaf); + } else { + this.logger.error({ + at: "Dataworker#_getExecutablePoolRebalanceLeaves", + message: `Not enough funds to execute pool rebalance leaf for chain ${leaf.chainId}`, + l1Tokens: leaf.l1Tokens, + netSendAmounts: leaf.netSendAmounts, + }); + } + } + return executableLeaves; + } + async _executePoolRebalanceLeaves( spokePoolClients: { [chainId: number]: SpokePoolClient; }, - leaves: PoolRebalanceLeaf[], + allLeaves: PoolRebalanceLeaf[], balanceAllocator: BalanceAllocator, tree: MerkleTree, submitExecution: boolean ): Promise { const hubPoolChainId = this.clients.hubPoolClient.chainId; - const fundedLeaves = ( - await Promise.all( - leaves.map(async (leaf) => { - const requests = leaf.netSendAmounts.map((amount, i) => ({ - amount: amount.gt(bnZero) ? amount : bnZero, - tokens: [leaf.l1Tokens[i]], - holder: this.clients.hubPoolClient.hubPool.address, - chainId: hubPoolChainId, - })); - - if (sdkUtils.chainIsArbitrum(leaf.chainId)) { - const hubPoolBalance = await this.clients.hubPoolClient.hubPool.provider.getBalance( - this.clients.hubPoolClient.hubPool.address - ); - if (hubPoolBalance.lt(this._getRequiredEthForArbitrumPoolRebalanceLeaf(leaf))) { - requests.push({ - tokens: [ZERO_ADDRESS], - amount: this._getRequiredEthForArbitrumPoolRebalanceLeaf(leaf), - holder: await this.clients.hubPoolClient.hubPool.signer.getAddress(), - chainId: hubPoolChainId, - }); - } - } - - const success = await balanceAllocator.requestBalanceAllocations( - requests.filter((req) => req.amount.gt(bnZero)) - ); + const signer = this.clients.hubPoolClient.hubPool.signer; + + // Evaluate leaves iteratively because we will be modifying virtual balances and we want + // to make sure we are getting the virtual balance computations correct. + const fundedLeaves = await this._getExecutablePoolRebalanceLeaves(allLeaves, balanceAllocator); + const executableLeaves: PoolRebalanceLeaf[] = []; + for (const leaf of fundedLeaves) { + // For orbit leaves we need to check if we have enough gas tokens to pay for the L1 to L2 message. + if (!sdkUtils.chainIsArbitrum(leaf.chainId) && !sdkUtils.chainIsOrbit(leaf.chainId)) { + executableLeaves.push(leaf); + continue; + } - if (!success) { - // Note: this is an error because the HubPool should generally not run out of funds to put into - // netSendAmounts. This means that no new bundles can be proposed until this leaf is funded. + // Check if orbit leaf can be executed. + const { + amount: requiredAmount, + token: feeToken, + holder, + } = await this._getRequiredEthForOrbitPoolRebalanceLeaf(leaf); + const feeData = { + tokens: [feeToken], + amount: requiredAmount, + chainId: hubPoolChainId, + }; + const success = await balanceAllocator.requestBalanceAllocations([{ ...feeData, holder }]); + if (!success) { + this.logger.debug({ + at: "Dataworker#_executePoolRebalanceLeaves", + message: `Loading more orbit gas token to pay for L1->L2 message submission fees to ${getNetworkName( + leaf.chainId + )} 📨!`, + leaf, + feeToken, + requiredAmount, + }); + if (submitExecution) { + const canFund = await balanceAllocator.requestBalanceAllocations([ + { ...feeData, holder: await signer.getAddress() }, + ]); + if (!canFund) { this.logger.error({ - at: "Dataworker#executePoolRebalanceLeaves", - message: "Not executing pool rebalance leaf on HubPool due to lack of funds to send.", - root: tree.getHexRoot(), - leafId: leaf.leafId, - rebalanceChain: leaf.chainId, - token: leaf.l1Tokens, - netSendAmounts: leaf.netSendAmounts, + at: "Dataworker#_executePoolRebalanceLeaves", + message: `Failed to fund ${requiredAmount.toString()} of orbit gas token ${feeToken} for message to ${getNetworkName( + leaf.chainId + )}!`, }); - } else { - // Add balances to spoke pool on mainnet since we know it will be sent atomically. - if (leaf.chainId === hubPoolChainId) { - await Promise.all( - leaf.netSendAmounts.map(async (amount, i) => { - if (amount.gt(bnZero)) { - await balanceAllocator.addUsed( - leaf.chainId, - leaf.l1Tokens[i], - spokePoolClients[leaf.chainId].spokePool.address, - amount.mul(-1) - ); - } - }) - ); - } + continue; } - return success ? leaf : undefined; - }) - ) - ).filter(isDefined); - - let hubPoolBalance; - if (fundedLeaves.some((leaf) => sdkUtils.chainIsArbitrum(leaf.chainId))) { - hubPoolBalance = await this.clients.hubPoolClient.hubPool.provider.getBalance( - this.clients.hubPoolClient.hubPool.address - ); - } - fundedLeaves.forEach((leaf) => { - const proof = tree.getHexProof(leaf); - const mrkdwn = `Root hash: ${tree.getHexRoot()}\nLeaf: ${leaf.leafId}\nChain: ${leaf.chainId}`; - if (submitExecution) { - if (sdkUtils.chainIsArbitrum(leaf.chainId)) { - if (hubPoolBalance.lt(this._getRequiredEthForArbitrumPoolRebalanceLeaf(leaf))) { + if (feeToken === ZERO_ADDRESS) { this.clients.multiCallerClient.enqueueTransaction({ contract: this.clients.hubPoolClient.hubPool, chainId: hubPoolChainId, method: "loadEthForL2Calls", args: [], message: `Loaded ETH for message to ${getNetworkName(leaf.chainId)} 📨!`, - mrkdwn, - value: this._getRequiredEthForArbitrumPoolRebalanceLeaf(leaf), + mrkdwn: `Root hash: ${tree.getHexRoot()}\nLeaf: ${leaf.leafId}\nChain: ${leaf.chainId}`, + value: requiredAmount, + }); + } else { + this.clients.multiCallerClient.enqueueTransaction({ + contract: new Contract(feeToken, ERC20.abi, signer), + chainId: hubPoolChainId, + method: "transfer", + args: [holder, requiredAmount], + message: `Loaded orbit gas token for message to ${getNetworkName(leaf.chainId)} 📨!`, + mrkdwn: `Root hash: ${tree.getHexRoot()}\nLeaf: ${leaf.leafId}\nChain: ${leaf.chainId}`, }); } } + } else { + this.logger.debug({ + at: "Dataworker#_executePoolRebalanceLeaves", + message: `feePayer ${holder} has sufficient orbit gas token to pay for L1->L2 message submission fees to ${getNetworkName( + leaf.chainId + )}`, + feeToken, + requiredAmount, + feePayerBalance: await balanceAllocator.getBalanceSubUsed(hubPoolChainId, feeToken, holder), + }); + } + executableLeaves.push(leaf); + } + + // Execute the leaves: + executableLeaves.forEach((leaf) => { + // Add balances to spoke pool on mainnet since we know it will be sent atomically. + if (leaf.chainId === hubPoolChainId) { + leaf.netSendAmounts.forEach((amount, i) => { + if (amount.gt(bnZero)) { + balanceAllocator.addUsed( + leaf.chainId, + leaf.l1Tokens[i], + spokePoolClients[leaf.chainId].spokePool.address, + amount.mul(-1) + ); + } + }); + } + const mrkdwn = `Root hash: ${tree.getHexRoot()}\nLeaf: ${leaf.leafId}\nChain: ${leaf.chainId}`; + if (submitExecution) { this.clients.multiCallerClient.enqueueTransaction({ contract: this.clients.hubPoolClient.hubPool, chainId: hubPoolChainId, @@ -1668,9 +1743,9 @@ export class Dataworker { leaf.runningBalances, leaf.leafId, leaf.l1Tokens, - proof, + tree.getHexProof(leaf), ], - message: "Executed PoolRebalanceLeaf 🌿!", + message: `Executed PoolRebalanceLeaf for chain ${leaf.chainId} 🌿!`, mrkdwn, unpermissioned: true, // If simulating execution of leaves for non-mainnet chains, can fail as it may require funds to be returned @@ -1678,22 +1753,27 @@ export class Dataworker { canFailInSimulation: leaf.chainId !== hubPoolChainId, }); } else { - this.logger.debug({ at: "Dataworker#executePoolRebalanceLeaves", message: mrkdwn }); + this.logger.debug({ at: "Dataworker#_executePoolRebalanceLeaves", message: mrkdwn }); } }); - return fundedLeaves.length; + + return executableLeaves.length; } async _updateExchangeRatesBeforeExecutingHubChainLeaves( + balanceAllocator: BalanceAllocator, poolRebalanceLeaf: Pick, submitExecution: boolean ): Promise> { const hubPool = this.clients.hubPoolClient.hubPool; const chainId = this.clients.hubPoolClient.chainId; - const updatedL1Tokens: Record = {}; + const updatedLiquidReserves: Record = {}; const { netSendAmounts, l1Tokens } = poolRebalanceLeaf; await sdk.utils.forEachAsync(l1Tokens, async (l1Token, idx) => { + const currentLiquidReserves = this.clients.hubPoolClient.getLpTokenInfoForL1Token(l1Token)?.liquidReserves; + updatedLiquidReserves[l1Token] = currentLiquidReserves; + assert(currentLiquidReserves !== undefined && currentLiquidReserves.gte(0), "Liquid reserves should be >= 0"); const tokenSymbol = this.clients.hubPoolClient.getTokenInfo(chainId, l1Token)?.symbol; // If netSendAmounts is negative, there is no need to update this exchange rate. @@ -1701,17 +1781,6 @@ export class Dataworker { return; } - const multicallInput = [ - hubPool.interface.encodeFunctionData("pooledTokens", [l1Token]), - hubPool.interface.encodeFunctionData("sync", [l1Token]), - hubPool.interface.encodeFunctionData("pooledTokens", [l1Token]), - ]; - const multicallOutput = await hubPool.callStatic.multicall(multicallInput); - const currentPooledTokens = hubPool.interface.decodeFunctionResult("pooledTokens", multicallOutput[0]); - const updatedPooledTokens = hubPool.interface.decodeFunctionResult("pooledTokens", multicallOutput[2]); - const currentLiquidReserves = currentPooledTokens.liquidReserves; - const updatedLiquidReserves = updatedPooledTokens.liquidReserves; - // If current liquid reserves can cover the netSendAmount, then there is no need to update the exchange rate. if (currentLiquidReserves.gte(netSendAmounts[idx])) { this.logger.debug({ @@ -1721,45 +1790,47 @@ export class Dataworker { netSendAmount: netSendAmounts[idx], l1Token, }); + updatedLiquidReserves[l1Token] = currentLiquidReserves.sub(netSendAmounts[idx]); return; } - // If updated liquid reserves are not enough to cover the payment, then send a warning that - // we're short on funds. - if (updatedLiquidReserves.lt(netSendAmounts[idx])) { - this.logger.error({ + // @dev: post-sync liquid reserves should be equal to ERC20 balanceOf the HubPool. + const postSyncLiquidReserves = await balanceAllocator.getBalanceSubUsed(chainId, l1Token, hubPool.address); + + // If updated liquid reserves are not enough to cover the payment, then send an error log that + // we're short on funds. Otherwise, enqueue a sync() call and then update the availableLiquidReserves. + if (postSyncLiquidReserves.lt(netSendAmounts[idx])) { + this.logger.warn({ at: "Dataworker#_updateExchangeRatesBeforeExecutingHubChainLeaves", - message: `Not enough funds to execute pool rebalance leaf on HubPool for token: ${tokenSymbol}`, - poolRebalanceLeaf, + message: `Not enough funds to execute Ethereum pool rebalance leaf on HubPool for token: ${tokenSymbol}`, netSendAmount: netSendAmounts[idx], - currentPooledTokens, - updatedPooledTokens, + currentLiquidReserves, + postSyncLiquidReserves, }); - return; - } - - this.logger.debug({ - at: "Dataworker#_updateExchangeRatesBeforeExecutingHubChainLeaves", - message: `Updating exchange rate update for ${tokenSymbol} because we need to update the liquid reserves of the contract to execute the hubChain poolRebalanceLeaf.`, - poolRebalanceLeaf, - netSendAmount: netSendAmounts[idx], - currentPooledTokens, - updatedPooledTokens, - }); - updatedL1Tokens[l1Token] = updatedPooledTokens.liquidReserves; - if (submitExecution) { - this.clients.multiCallerClient.enqueueTransaction({ - contract: hubPool, - chainId, - method: "exchangeRateCurrent", - args: [l1Token], - message: "Updated exchange rate ♻️!", - mrkdwn: `Updated exchange rate for l1 token: ${tokenSymbol}`, - unpermissioned: true, + } else { + // At this point, we can assume that the liquid reserves increased post-sync so we'll enqueue an update. + updatedLiquidReserves[l1Token] = postSyncLiquidReserves.sub(netSendAmounts[idx]); + this.logger.debug({ + at: "Dataworker#_updateExchangeRatesBeforeExecutingHubChainLeaves", + message: `Updating exchange rate for ${tokenSymbol} because we need to update the liquid reserves of the contract to execute the hubChain poolRebalanceLeaf.`, + netSendAmount: netSendAmounts[idx], + currentLiquidReserves, + postSyncLiquidReserves, }); + if (submitExecution) { + this.clients.multiCallerClient.enqueueTransaction({ + contract: hubPool, + chainId, + method: "exchangeRateCurrent", + args: [l1Token], + message: "Updated exchange rate ♻️!", + mrkdwn: `Updated exchange rate for l1 token: ${tokenSymbol}`, + unpermissioned: true, + }); + } } }); - return updatedL1Tokens; + return updatedLiquidReserves; } async _updateExchangeRatesBeforeExecutingNonHubChainLeaves( @@ -1772,81 +1843,103 @@ export class Dataworker { const hubPool = this.clients.hubPoolClient.hubPool; const hubPoolChainId = this.clients.hubPoolClient.chainId; + const aggregateNetSendAmounts: Record = {}; + await sdkUtils.forEachAsync(poolRebalanceLeaves, async (leaf) => { await sdkUtils.forEachAsync(leaf.l1Tokens, async (l1Token, idx) => { - const tokenSymbol = this.clients.hubPoolClient.getTokenInfo(hubPoolChainId, l1Token)?.symbol; + aggregateNetSendAmounts[l1Token] ??= bnZero; - if (updatedL1Tokens.has(l1Token)) { - return; - } // If leaf's netSendAmount is negative, then we don't need to updateExchangeRates since the Hub will not // have a liquidity constraint because it won't be sending any tokens. if (leaf.netSendAmounts[idx].lte(0)) { return; } - // The "used" balance kept in the BalanceAllocator should have adjusted for the netSendAmounts and relayer refund leaf - // executions above. Therefore, check if the current liquidReserves is less than the pool rebalance leaf's netSendAmount - // and the virtual hubPoolBalance would be enough to execute it. If so, then add an update exchange rate call to make sure that - // the HubPool becomes "aware" of its inflow following the relayre refund leaf execution. - let currHubPoolLiquidReserves = latestLiquidReserves[l1Token]; - if (!currHubPoolLiquidReserves) { - // @dev If there aren't liquid reserves for this token then set them to max value so we won't update them. - currHubPoolLiquidReserves = this.clients.hubPoolClient.getLpTokenInfoForL1Token(l1Token).liquidReserves; - } - assert(currHubPoolLiquidReserves !== undefined); - // We only need to update the exchange rate in the case where tokens are returned to the HubPool increasing - // its balance enough that it can execute a pool rebalance leaf it otherwise would not be able to. - // This would only happen if the starting hub pool balance is below the net send amount. If it started - // above, then the dataworker would not purposefully send tokens out of it to fulfill the Ethereum - // PoolRebalanceLeaf and then return tokens to it to execute another chain's PoolRebalanceLeaf. - if (currHubPoolLiquidReserves.gte(leaf.netSendAmounts[idx])) { - this.logger.debug({ - at: "Dataworker#_updateExchangeRatesBeforeExecutingNonHubChainLeaves", - message: `Skipping exchange rate update for ${tokenSymbol} because current liquid reserves > netSendAmount for chain ${leaf.chainId}`, - l2ChainId: leaf.chainId, - currHubPoolLiquidReserves, - netSendAmount: leaf.netSendAmounts[idx], - l1Token, - }); - return; - } + aggregateNetSendAmounts[l1Token] = aggregateNetSendAmounts[l1Token].add(leaf.netSendAmounts[idx]); + }); + }); - // @dev: Virtual balance = post-sync liquid reserves + any used balance. - const multicallInput = [ - hubPool.interface.encodeFunctionData("sync", [l1Token]), - hubPool.interface.encodeFunctionData("pooledTokens", [l1Token]), - ]; - const multicallOutput = await hubPool.callStatic.multicall(multicallInput); - const updatedPooledTokens = hubPool.interface.decodeFunctionResult("pooledTokens", multicallOutput[1]); - const updatedLiquidReserves = updatedPooledTokens.liquidReserves; - const virtualHubPoolBalance = updatedLiquidReserves.sub( - balanceAllocator.getUsed(hubPoolChainId, l1Token, hubPool.address) - ); + // Now, go through each L1 token and see if we need to update the exchange rate for it. + await sdkUtils.forEachAsync(Object.keys(aggregateNetSendAmounts), async (l1Token) => { + const currHubPoolLiquidReserves = + latestLiquidReserves[l1Token] ?? this.clients.hubPoolClient.getLpTokenInfoForL1Token(l1Token)?.liquidReserves; + assert( + currHubPoolLiquidReserves !== undefined && currHubPoolLiquidReserves.gte(0), + "Liquid reserves should be >= 0" + ); - // If the virtual balance is still too low to execute the pool leaf, then log an error that this will - // pool rebalance leaf execution will fail. - if (virtualHubPoolBalance.lt(leaf.netSendAmounts[idx])) { - this.logger.error({ - at: "Dataworker#executePoolRebalanceLeaves", - message: "Executing pool rebalance leaf on HubPool will fail due to lack of funds to send.", - leaf: leaf, - l1Token, - netSendAmount: leaf.netSendAmounts[idx], - updatedLiquidReserves, - virtualHubPoolBalance, - }); - return; - } + const requiredNetSendAmountForL1Token = aggregateNetSendAmounts[l1Token]; + // If netSendAmounts is 0, there is no need to update this exchange rate. + assert(requiredNetSendAmountForL1Token.gte(0), "Aggregate net send amount should be >= 0"); + if (requiredNetSendAmountForL1Token.eq(0)) { + return; + } + + const tokenSymbol = this.clients.hubPoolClient.getTokenInfo(hubPoolChainId, l1Token)?.symbol; + if (currHubPoolLiquidReserves.gte(requiredNetSendAmountForL1Token)) { this.logger.debug({ - at: "Dataworker#executePoolRebalanceLeaves", - message: `Relayer refund leaf will return enough funds to HubPool to execute PoolRebalanceLeaf, updating exchange rate for ${tokenSymbol}`, + at: "Dataworker#_updateExchangeRatesBeforeExecutingNonHubChainLeaves", + message: `Skipping exchange rate update for ${tokenSymbol} because current liquid reserves > required netSendAmount for non-hubChain pool leaves`, + leavesWithNetSendAmountRequirementsFromHubPoolLiquidReserves: Object.fromEntries( + poolRebalanceLeaves + .filter((leaf) => { + const l1TokenIndex = leaf.l1Tokens.indexOf(l1Token); + if (l1TokenIndex === -1) { + return false; + } + const netSendAmount = leaf.netSendAmounts[l1TokenIndex]; + return netSendAmount.gt(0); + }) + .map((leaf) => [leaf.chainId, leaf.netSendAmounts[leaf.l1Tokens.indexOf(l1Token)]]) + ), + currHubPoolLiquidReserves, + requiredNetSendAmountForL1Token, + l1Token, + }); + return; + } + + // Current liquid reserves are insufficient to execute aggregate net send amount for this token so + // look at the updated liquid reserves post-sync. This will be equal the ERC20 balanceOf the hub pool + // including any netSendAmounts used in a prior pool leaf execution. + const updatedLiquidReserves = await balanceAllocator.getBalanceSubUsed(hubPoolChainId, l1Token, hubPool.address); + + // If the post-sync balance is still too low to execute all the pool leaves, then log an error + if (updatedLiquidReserves.lt(requiredNetSendAmountForL1Token)) { + this.logger.warn({ + at: "Dataworker#_updateExchangeRatesBeforeExecutingNonHubChainLeaves", + message: `Not enough funds to execute ALL non-Ethereum pool rebalance leaf on HubPool for token: ${tokenSymbol}, updating exchange rate anyways to try to execute as many leaves as possible`, + l1Token, + requiredNetSendAmountForL1Token, + currHubPoolLiquidReserves, + updatedLiquidReserves, + }); + } else { + this.logger.debug({ + at: "Dataworker#_updateExchangeRatesBeforeExecutingNonHubChainLeaves", + message: `Post-sync liquid reserves are sufficient to execute PoolRebalanceLeaf, updating exchange rate for ${tokenSymbol}`, + l1Token, + requiredNetSendAmountForL1Token, + currHubPoolLiquidReserves, updatedLiquidReserves, - virtualHubPoolBalance, - netSendAmount: leaf.netSendAmounts[idx], - leaf, }); + } + + // We don't know yet which leaves we can execute so we'll update the exchange rate for this token even if + // some leaves might not be executable. + // TODO: Be more precise about whether updating this l1 token is worth it. For example, if we update this l1 + // token and its reserves increase, depending on which other tokens are contained in the pool rebalance leaf + // with this token, increasing this token's reserves might not help us execute those leaves. + if (updatedLiquidReserves.gt(currHubPoolLiquidReserves)) { updatedL1Tokens.add(l1Token); - }); + } else { + this.logger.debug({ + at: "Dataworker#_updateExchangeRatesBeforeExecutingNonHubChainLeaves", + message: `Skipping exchange rate update for ${tokenSymbol} because liquid reserves would not increase`, + currHubPoolLiquidReserves, + updatedLiquidReserves, + l1Token, + }); + } }); // Submit executions at the end since the above double loop runs in parallel and we don't want to submit @@ -2008,26 +2101,19 @@ export class Dataworker { const blockNumberRanges = getImpliedBundleBlockRanges(hubPoolClient, configStoreClient, matchingRootBundle); const mainnetBlockRanges = blockNumberRanges[0]; const chainIds = this.clients.configStoreClient.getChainIdIndicesForBlock(mainnetBlockRanges[0]); - if ( - Object.keys(earliestBlocksInSpokePoolClients).length > 0 && - (await blockRangesAreInvalidForSpokeClients( - spokePoolClients, - blockNumberRanges, - chainIds, - earliestBlocksInSpokePoolClients, - this.isV3(mainnetBlockRanges[0]) - )) - ) { + const invalidBlockRanges = await this._validateBlockRanges( + spokePoolClients, + blockNumberRanges, + chainIds, + earliestBlocksInSpokePoolClients, + this.isV3(mainnetBlockRanges[0]) + ); + if (invalidBlockRanges.length > 0) { this.logger.warn({ at: "Dataworke#executeRelayerRefundLeaves", message: "Cannot validate bundle with insufficient event data. Set a larger DATAWORKER_FAST_LOOKBACK_COUNT", - chainId, - rootBundleRanges: blockNumberRanges, - availableSpokePoolClients: Object.keys(spokePoolClients), - earliestBlocksInSpokePoolClients, - spokeClientsEventSearchConfigs: Object.fromEntries( - Object.entries(spokePoolClients).map(([chainId, client]) => [chainId, client.eventSearchConfig]) - ), + invalidBlockRanges, + bundleTxn: matchingRootBundle.transactionHash, }); continue; } @@ -2163,15 +2249,27 @@ export class Dataworker { const success = await balanceAllocator.requestBalanceAllocations(balanceRequestsToQuery); if (!success) { this.logger.warn({ - at: "Dataworker#executeRelayerRefundLeaves", - message: "Not executing relayer refund leaf on SpokePool due to lack of funds.", + at: "Dataworker#_executeRelayerRefundLeaves", + message: `Not executing relayer refund leaf on chain ${leaf.chainId} due to lack of spoke or msg.sender funds for token ${l1TokenInfo?.symbol}`, root: relayerRefundTree.getHexRoot(), bundle: rootBundleId, leafId: leaf.leafId, - token: l1TokenInfo?.symbol, - chainId: leaf.chainId, amountToReturn: leaf.amountToReturn, - refunds: leaf.refundAmounts, + totalRefundAmount: leaf.refundAmounts.reduce((acc, curr) => acc.add(curr), BigNumber.from(0)), + spokeBalance: await this._getSpokeBalanceForL2Tokens( + balanceAllocator, + leaf.chainId, + leaf.l2TokenAddress, + client.spokePool.address + ), + requiredEthValue: valueToPassViaPayable, + senderEthValue: + valueToPassViaPayable && + (await balanceAllocator.getBalanceSubUsed( + leaf.chainId, + ZERO_ADDRESS, + await client.spokePool.signer.getAddress() + )), }); } else { // If mainnet leaf, then allocate balance to the HubPool since it will be atomically transferred. @@ -2214,7 +2312,7 @@ export class Dataworker { canFailInSimulation: leaf.chainId === this.clients.hubPoolClient.chainId, }); } else { - this.logger.debug({ at: "Dataworker#executeRelayerRefundLeaves", message: mrkdwn }); + this.logger.debug({ at: "Dataworker#_executeRelayerRefundLeaves", message: mrkdwn }); } }); } @@ -2281,6 +2379,19 @@ export class Dataworker { } } + _getSpokeBalanceForL2Tokens( + balanceAllocator: BalanceAllocator, + chainId: number, + token: string, + holder: string + ): Promise { + return sdkUtils.reduceAsync( + l2TokensToCountTowardsSpokePoolLeafExecutionCapital(token, chainId), + async (acc, token) => acc.add(await balanceAllocator.getBalanceSubUsed(chainId, token, holder)), + bnZero + ); + } + _getPoolRebalanceRoot( blockRangesForChains: number[][], latestMainnetBlock: number, @@ -2323,21 +2434,53 @@ export class Dataworker { return _.cloneDeep(this.rootCache[key]); } - _getRequiredEthForArbitrumPoolRebalanceLeaf(leaf: PoolRebalanceLeaf): BigNumber { - // For arbitrum, the bot needs enough ETH to pay for each L1 -> L2 message. + async _getRequiredEthForOrbitPoolRebalanceLeaf(leaf: PoolRebalanceLeaf): Promise<{ + amount: BigNumber; + token: string; + holder: string; + }> { + // TODO: Make this code more dynamic in the future. For now, hard code custom gas token fees. + let relayMessageFee: BigNumber; + let token: string; + let holder: string; + if (leaf.chainId === CHAIN_IDs.ALEPH_ZERO) { + // Unlike when handling native ETH, the monitor bot does NOT support sending arbitrary ERC20 tokens to any other + // EOA, so if we're short a custom gas token like AZERO, then we're going to have to keep sending over token + // amounts to the DonationBox contract. Therefore, we'll multiply the final amount by 10 to ensure we don't incur + // a transfer() gas cost on every single pool rebalance leaf execution involving this arbitrum orbit chain. + const { amountWei, feePayer, feeToken, amountMultipleToFund } = + ARBITRUM_ORBIT_L1L2_MESSAGE_FEE_DATA[CHAIN_IDs.ALEPH_ZERO]; + relayMessageFee = toBNWei(amountWei).mul(amountMultipleToFund); + token = feeToken; + holder = feePayer; + } else { + // For now, assume arbitrum message fees are the same for all non-custom gas token chains. This obviously needs + // to be changed if we add support for an orbit chains where we pay message fees in ETH but they are different + // parameters than for Arbitrum mainnet. + const { amountWei, amountMultipleToFund } = ARBITRUM_ORBIT_L1L2_MESSAGE_FEE_DATA[CHAIN_IDs.ARBITRUM]; + relayMessageFee = toBNWei(amountWei).mul(amountMultipleToFund); + token = ZERO_ADDRESS; + holder = this.clients.hubPoolClient.hubPool.address; + } + + // For orbit chains, the bot needs enough ETH to pay for each L1 -> L2 message. // The following executions trigger an L1 -> L2 message: - // 1. The first arbitrum leaf for a particular set of roots. This means the roots must be sent and is + // 1. The first orbit leaf for a particular set of roots. This means the roots must be sent and is // signified by groupIndex === 0. // 2. Any netSendAmount > 0 triggers an L1 -> L2 token send, which costs 0.02 ETH. let requiredAmount = leaf.netSendAmounts.reduce( - (acc, curr) => (curr.gt(0) ? acc.add(toBNWei("0.02")) : acc), + (acc, curr) => (curr.gt(0) ? acc.add(relayMessageFee) : acc), BigNumber.from(0) ); if (leaf.groupIndex === 0) { - requiredAmount = requiredAmount.add(toBNWei("0.02")); + requiredAmount = requiredAmount.add(relayMessageFee); } - return requiredAmount; + return { + amount: requiredAmount, + token, + holder, + }; } /** @@ -2406,4 +2549,24 @@ export class Dataworker { this.clients.configStoreClient.getEnabledChains(mainnetBundleStartBlock) ); } + + async _validateBlockRanges( + spokePoolClients: SpokePoolClientsByChain, + blockRanges: number[][], + chainIds: number[], + earliestBlocksInSpokePoolClients: { [chainId: number]: number }, + isV3: boolean + ): Promise { + return await blockRangesAreInvalidForSpokeClients( + spokePoolClients, + blockRanges, + chainIds, + earliestBlocksInSpokePoolClients, + isV3 + ); + } + + _prettifyBundleBlockRanges(chainIds: number[], blockRanges: number[][]): Record { + return Object.fromEntries(chainIds.map((chainId, i) => [chainId, blockRanges[i]])); + } } diff --git a/src/dataworker/DataworkerUtils.ts b/src/dataworker/DataworkerUtils.ts index 3352c5a30..8a592a4e2 100644 --- a/src/dataworker/DataworkerUtils.ts +++ b/src/dataworker/DataworkerUtils.ts @@ -42,13 +42,14 @@ import { any } from "superstruct"; // TODO: Move to SDK since this implements UMIP logic about validating block ranges. // Return true if we won't be able to construct a root bundle for the bundle block ranges ("blockRanges") because // the bundle wants to look up data for events that weren't in the spoke pool client's search range. +export type InvalidBlockRange = { chainId: number; reason: string }; export async function blockRangesAreInvalidForSpokeClients( spokePoolClients: Record, blockRanges: number[][], chainIdListForBundleEvaluationBlockNumbers: number[], earliestValidBundleStartBlock: { [chainId: number]: number }, isV3 = false -): Promise { +): Promise { assert(blockRanges.length === chainIdListForBundleEvaluationBlockNumbers.length); let endBlockTimestamps: { [chainId: number]: number } | undefined; if (isV3) { @@ -60,68 +61,94 @@ export async function blockRangesAreInvalidForSpokeClients( // There should be a spoke pool client instantiated for every bundle timestamp. assert(!Object.keys(endBlockTimestamps).some((chainId) => !isDefined(spokePoolClients[chainId]))); } - return utils.someAsync(blockRanges, async ([start, end], index) => { - const chainId = chainIdListForBundleEvaluationBlockNumbers[index]; - // If block range is 0 then chain is disabled, we don't need to query events for this chain. - if (isNaN(end) || isNaN(start)) { - return true; - } - if (start === end) { - return false; - } - const spokePoolClient = spokePoolClients[chainId]; - - // If spoke pool client doesn't exist for enabled chain then we clearly cannot query events for this chain. - if (spokePoolClient === undefined) { - return true; - } + // Return an undefined object if the block ranges are valid + return ( + await utils.mapAsync(blockRanges, async ([start, end], index): Promise => { + const chainId = chainIdListForBundleEvaluationBlockNumbers[index]; + if (isNaN(end) || isNaN(start)) { + return { + reason: `block range contains undefined block for: [isNaN(start): ${isNaN(start)}, isNaN(end): ${isNaN( + end + )}]`, + chainId, + }; + } + if (start === end) { + // If block range is 0 then chain is disabled, we don't need to query events for this chain. + return undefined; + } - const clientLastBlockQueried = spokePoolClient.latestBlockSearched; + const spokePoolClient = spokePoolClients[chainId]; - const earliestValidBundleStartBlockForChain = - earliestValidBundleStartBlock[chainId] ?? spokePoolClient.deploymentBlock; + // If spoke pool client doesn't exist for enabled chain then we clearly cannot query events for this chain. + if (spokePoolClient === undefined) { + return { + reason: "spoke pool client undefined", + chainId, + }; + } - // If range start block is less than the earliest spoke pool client we can validate or the range end block - // is greater than the latest client end block, then ranges are invalid. - // Note: Math.max the from block with the registration block of the spoke pool to handle the edge case for the first - // bundle that set its start blocks equal 0. - const bundleRangeFromBlock = Math.max(spokePoolClient.deploymentBlock, start); - if (bundleRangeFromBlock < earliestValidBundleStartBlockForChain || end > clientLastBlockQueried) { - return true; - } + const clientLastBlockQueried = spokePoolClient.latestBlockSearched; + + const earliestValidBundleStartBlockForChain = + earliestValidBundleStartBlock?.[chainId] ?? spokePoolClient.deploymentBlock; + + // If range start block is less than the earliest spoke pool client we can validate or the range end block + // is greater than the latest client end block, then ranges are invalid. + // Note: Math.max the from block with the registration block of the spoke pool to handle the edge case for the first + // bundle that set its start blocks equal 0. + const bundleRangeFromBlock = Math.max(spokePoolClient.deploymentBlock, start); + const bundleRangeFromBlockTooEarly = bundleRangeFromBlock < earliestValidBundleStartBlockForChain; + const endGreaterThanClientLastBlockQueried = end > clientLastBlockQueried; + if (bundleRangeFromBlockTooEarly || endGreaterThanClientLastBlockQueried) { + return { + reason: `${ + bundleRangeFromBlockTooEarly + ? `bundleRangeFromBlock ${bundleRangeFromBlock} < earliestValidBundleStartBlockForChain ${earliestValidBundleStartBlockForChain}` + : `end ${end} > clientLastBlockQueried ${clientLastBlockQueried}` + }`, + chainId, + }; + } - if (endBlockTimestamps !== undefined) { - const maxFillDeadlineBufferInBlockRange = await spokePoolClient.getMaxFillDeadlineInRange( - bundleRangeFromBlock, - end - ); - // Skip this check if the spokePoolClient.fromBlock is less than or equal to the spokePool deployment block. - // In this case, we have all the information for this SpokePool possible so there are no older deposits - // that might have expired that we might miss. - const conservativeBundleFrequencySeconds = Number( - process.env.CONSERVATIVE_BUNDLE_FREQUENCY_SECONDS ?? CONSERVATIVE_BUNDLE_FREQUENCY_SECONDS - ); - if ( - spokePoolClient.eventSearchConfig.fromBlock > spokePoolClient.deploymentBlock && - // @dev The maximum lookback window we need to evaluate expired deposits is the max fill deadline buffer, - // which captures all deposits that newly expired, plus the bundle time (e.g. 1 hour) to account for the - // maximum time it takes for a newly expired deposit to be included in a bundle. A conservative value for - // this bundle time is 3 hours. This `conservativeBundleFrequencySeconds` buffer also ensures that all deposits - // that are technically "expired", but have fills in the bundle, are also included. This can happen if a fill - // is sent pretty late into the deposit's expiry period. - endBlockTimestamps[chainId] - spokePoolClient.getOldestTime() < - maxFillDeadlineBufferInBlockRange + conservativeBundleFrequencySeconds - ) { - return true; + if (endBlockTimestamps !== undefined) { + const maxFillDeadlineBufferInBlockRange = await spokePoolClient.getMaxFillDeadlineInRange( + bundleRangeFromBlock, + end + ); + // Skip this check if the spokePoolClient.fromBlock is less than or equal to the spokePool deployment block. + // In this case, we have all the information for this SpokePool possible so there are no older deposits + // that might have expired that we might miss. + const conservativeBundleFrequencySeconds = Number( + process.env.CONSERVATIVE_BUNDLE_FREQUENCY_SECONDS ?? CONSERVATIVE_BUNDLE_FREQUENCY_SECONDS + ); + if ( + spokePoolClient.eventSearchConfig.fromBlock > spokePoolClient.deploymentBlock && + // @dev The maximum lookback window we need to evaluate expired deposits is the max fill deadline buffer, + // which captures all deposits that newly expired, plus the bundle time (e.g. 1 hour) to account for the + // maximum time it takes for a newly expired deposit to be included in a bundle. A conservative value for + // this bundle time is 3 hours. This `conservativeBundleFrequencySeconds` buffer also ensures that all deposits + // that are technically "expired", but have fills in the bundle, are also included. This can happen if a fill + // is sent pretty late into the deposit's expiry period. + endBlockTimestamps[chainId] - spokePoolClient.getOldestTime() < + maxFillDeadlineBufferInBlockRange + conservativeBundleFrequencySeconds + ) { + return { + reason: `cannot evaluate all possible expired deposits; endBlockTimestamp ${ + endBlockTimestamps[chainId] + } - spokePoolClient.getOldestTime ${spokePoolClient.getOldestTime()} < maxFillDeadlineBufferInBlockRange ${maxFillDeadlineBufferInBlockRange} + conservativeBundleFrequencySeconds ${conservativeBundleFrequencySeconds}`, + chainId, + }; + } } - } - // We must now assume that all newly expired deposits at the time of the bundle end blocks are contained within - // the spoke pool client's memory. + // We must now assume that all newly expired deposits at the time of the bundle end blocks are contained within + // the spoke pool client's memory. - // If we get to here, block ranges are valid, return false. - return false; - }); + // If we get to here, block ranges are valid, return false. + return undefined; + }) + ).filter(isDefined); } export function _buildSlowRelayRoot(bundleSlowFillsV3: BundleSlowFills): { diff --git a/src/dataworker/index.ts b/src/dataworker/index.ts index 1c1bd646c..3a6f893ee 100644 --- a/src/dataworker/index.ts +++ b/src/dataworker/index.ts @@ -194,7 +194,11 @@ export async function runDataworker(_logger: winston.Logger, baseSigner: Signer) at: "Dataworker#index", message: "Exiting early due to dataworker function collision", proposalCollision, + proposedBundleDataDefined: isDefined(proposedBundleData), executorCollision, + poolRebalanceLeafExecutionCount, + unclaimedPoolRebalanceLeafCount: pendingProposal.unclaimedPoolRebalanceLeafCount, + challengePeriodNotPassed: pendingProposal.challengePeriodEndTimestamp > clients.hubPoolClient.currentTime, pendingProposal, }); } else { diff --git a/test/BalanceAllocator.ts b/test/BalanceAllocator.ts index 8645b1566..4383da472 100644 --- a/test/BalanceAllocator.ts +++ b/test/BalanceAllocator.ts @@ -54,6 +54,12 @@ describe("BalanceAllocator", async function () { expect(balanceAllocator.getUsed(1, testToken1, testAccount1)).to.equal(BigNumber.from(100)); }); + it("Returns balance sub used", async function () { + balanceAllocator.addUsed(1, testToken1, testAccount1, BigNumber.from(100)); + balanceAllocator.setMockBalances(1, testToken1, testAccount1, BigNumber.from(150)); + expect(await balanceAllocator.getBalanceSubUsed(1, testToken1, testAccount1)).to.equal(BigNumber.from(50)); + }); + it("Simple request", async function () { balanceAllocator.setMockBalances(1, testToken1, testAccount1, BigNumber.from(100)); expect(await balanceAllocator.requestBalanceAllocation(1, [testToken1], testAccount1, BigNumber.from(50))).to.be diff --git a/test/Dataworker.blockRangeUtils.ts b/test/Dataworker.blockRangeUtils.ts index 418828bc9..f97e199f3 100644 --- a/test/Dataworker.blockRangeUtils.ts +++ b/test/Dataworker.blockRangeUtils.ts @@ -5,7 +5,7 @@ import { setupDataworker } from "./fixtures/Dataworker.Fixture"; import { DataworkerClients } from "../src/dataworker/DataworkerClientHelper"; import { HubPoolClient, SpokePoolClient } from "../src/clients"; import { originChainId } from "./constants"; -import { blockRangesAreInvalidForSpokeClients } from "../src/dataworker/DataworkerUtils"; +import { blockRangesAreInvalidForSpokeClients, InvalidBlockRange } from "../src/dataworker/DataworkerUtils"; import { getDeployedBlockNumber } from "@across-protocol/contracts"; import { MockHubPoolClient, MockSpokePoolClient } from "./mocks"; import { getTimestampsForBundleEndBlocks } from "../src/utils/BlockUtils"; @@ -155,6 +155,32 @@ describe("Dataworker block range-related utility methods", async function () { // and getDeploymentBlockNumber should be changed to work in test environments. const _spokePoolClients = { [chainId]: spokePoolClients[chainId] }; let chainIds = [chainId]; + let result: InvalidBlockRange[]; + + // Block ranges are invalid if any spoke pool client for a chain is undefined + result = await blockRangesAreInvalidForSpokeClients( + {}, + [[0, spokePoolClients[chainId].latestBlockSearched]], + chainIds, + {} + ); + expect(result.length).to.equal(1); + expect(result[0].chainId).to.equal(chainId); + expect(result[0].reason).to.contain("spoke pool client undefined"); + + // Block ranges are valid if the range = 0 + result = await blockRangesAreInvalidForSpokeClients(_spokePoolClients, [[0, 0]], chainIds, {}); + expect(result.length).to.equal(0); + + // Block ranges are invalid if a from or to block is undefined + result = await blockRangesAreInvalidForSpokeClients(_spokePoolClients, [[0, undefined]], chainIds, {}); + expect(result.length).to.equal(1); + expect(result[0].chainId).to.equal(chainId); + expect(result[0].reason).to.contain("isNaN(end)"); + result = await blockRangesAreInvalidForSpokeClients(_spokePoolClients, [[undefined, 0]], chainIds, {}); + expect(result.length).to.equal(1); + expect(result[0].chainId).to.equal(chainId); + expect(result[0].reason).to.contain("isNaN(start)"); // Look if bundle range from block is before the latest invalid // bundle start block. If so, then the range is invalid. @@ -169,6 +195,15 @@ describe("Dataworker block range-related utility methods", async function () { throw new Error(`Chain ${originChainId} SpokePoolClient has not been updated`); } + // Does not error if earliest block range object is empty: + result = await blockRangesAreInvalidForSpokeClients( + _spokePoolClients, + [[0, spokePoolClients[chainId].latestBlockSearched]], + chainIds, + {} + ); + expect(result.length).to.equal(0); + // latestInvalidBundleStartBlock is only used if its greater than the spoke pool deployment block, so in the // following tests, set latestInvalidBundleStartBlock > deployment blocks. @@ -178,81 +213,88 @@ describe("Dataworker block range-related utility methods", async function () { // Bundle block range fromBlocks are greater than // latest invalid bundle start blocks below and toBlocks are >= client's last block queried, return false meaning // that block ranges can be validated by spoke pool clients. - expect( - await blockRangesAreInvalidForSpokeClients( - _spokePoolClients, - [[mainnetDeploymentBlock + 3, spokePoolClients[chainId].latestBlockSearched]], - chainIds, - { [chainId]: mainnetDeploymentBlock + 2 } - ) - ).to.equal(false); + result = await blockRangesAreInvalidForSpokeClients( + _spokePoolClients, + [[mainnetDeploymentBlock + 3, spokePoolClients[chainId].latestBlockSearched]], + chainIds, + { [chainId]: mainnetDeploymentBlock + 2 } + ); + expect(result.length).to.equal(0); // Set block range toBlock > client's last block queried. Clients can no longer validate this block range. - expect( - await blockRangesAreInvalidForSpokeClients( - _spokePoolClients, - [[mainnetDeploymentBlock + 3, spokePoolClients[chainId].latestBlockSearched + 3]], - chainIds, - { [chainId]: mainnetDeploymentBlock + 2 } - ) - ).to.equal(true); + result = await blockRangesAreInvalidForSpokeClients( + _spokePoolClients, + [[mainnetDeploymentBlock + 3, spokePoolClients[chainId].latestBlockSearched + 3]], + chainIds, + { [chainId]: mainnetDeploymentBlock + 2 } + ); + expect(result.length).to.equal(1); + expect(result[0].chainId).to.equal(chainIds[0]); + expect(result[0].reason).to.contain("> clientLastBlockQueried"); // Bundle block range toBlocks is less than // latest invalid bundle start blocks below, so block ranges can't be validated by clients. - expect( - await blockRangesAreInvalidForSpokeClients( - _spokePoolClients, - [[mainnetDeploymentBlock + 1, spokePoolClients[chainId].latestBlockSearched]], - chainIds, - { [chainId]: mainnetDeploymentBlock + 2 } - ) - ).to.equal(true); + result = await blockRangesAreInvalidForSpokeClients( + _spokePoolClients, + [[mainnetDeploymentBlock + 1, spokePoolClients[chainId].latestBlockSearched]], + chainIds, + { [chainId]: mainnetDeploymentBlock + 2 } + ); + expect(result.length).to.equal(1); + expect(result[0].chainId).to.equal(chainIds[0]); + expect(result[0].reason).to.contain("< earliestValidBundleStartBlockForChain"); + // Works even if the condition is true for one chain. const optimismDeploymentBlock = getDeployedBlockNumber("SpokePool", 10); - expect( - await blockRangesAreInvalidForSpokeClients( - { [chainId]: spokePoolClients[chainId], [10]: spokePoolClients[originChainId] }, - [ - [mainnetDeploymentBlock + 1, spokePoolClients[chainId].latestBlockSearched], - [optimismDeploymentBlock + 3, spokePoolClients[originChainId].latestBlockSearched], - ], - [chainId, 10], - { [chainId]: mainnetDeploymentBlock + 2, [10]: optimismDeploymentBlock + 2 } - ) - ).to.equal(true); - expect( - await blockRangesAreInvalidForSpokeClients( - { [chainId]: spokePoolClients[chainId], [10]: spokePoolClients[originChainId] }, - [ - [mainnetDeploymentBlock + 3, spokePoolClients[chainId].latestBlockSearched], - [optimismDeploymentBlock + 3, spokePoolClients[originChainId].latestBlockSearched], - ], - [chainId, 10], - { [chainId]: mainnetDeploymentBlock + 2, [10]: optimismDeploymentBlock + 2 } - ) - ).to.equal(false); + result = await blockRangesAreInvalidForSpokeClients( + { [chainId]: spokePoolClients[chainId], [10]: spokePoolClients[originChainId] }, + [ + [mainnetDeploymentBlock + 1, spokePoolClients[chainId].latestBlockSearched], + [optimismDeploymentBlock + 3, spokePoolClients[originChainId].latestBlockSearched], + ], + [chainId, 10], + // hub chain start block is higher than block range from block passed in for hub chain above + { [chainId]: mainnetDeploymentBlock + 2, [10]: optimismDeploymentBlock + 2 } + ); + expect(result.length).to.equal(1); + expect(result[0].chainId).to.equal(chainIds[0]); + expect(result[0].reason).to.contain("< earliestValidBundleStartBlockForChain"); + + // Now both from blocks are above the earliest invalid start block. + result = await blockRangesAreInvalidForSpokeClients( + { [chainId]: spokePoolClients[chainId], [10]: spokePoolClients[originChainId] }, + [ + [mainnetDeploymentBlock + 3, spokePoolClients[chainId].latestBlockSearched], + [optimismDeploymentBlock + 3, spokePoolClients[originChainId].latestBlockSearched], + ], + [chainId, 10], + { [chainId]: mainnetDeploymentBlock + 2, [10]: optimismDeploymentBlock + 2 } + ); + expect(result.length).to.equal(0); // On these tests, set block range fromBlock < deployment block. The deployment block is now compared against // the latest invalid start block. This means that the dataworker will refuse to validate any bundles with clients // that don't have early enough data for the first bundle, which started at the deployment block height. - expect( - await blockRangesAreInvalidForSpokeClients( - _spokePoolClients, - [[0, spokePoolClients[chainId].latestBlockSearched]], - chainIds, - { - [chainId]: mainnetDeploymentBlock + 2, - } - ) - ).to.equal(true); - expect( - await blockRangesAreInvalidForSpokeClients( - _spokePoolClients, - [[0, spokePoolClients[chainId].latestBlockSearched]], - chainIds, - { - [chainId]: mainnetDeploymentBlock - 1, - } - ) - ).to.equal(false); + result = await blockRangesAreInvalidForSpokeClients( + _spokePoolClients, + [[0, spokePoolClients[chainId].latestBlockSearched]], + chainIds, + { + [chainId]: mainnetDeploymentBlock + 2, + } + ); + expect(result.length).to.equal(1); + expect(result[0].chainId).to.equal(chainIds[0]); + expect(result[0].reason).to.contain("< earliestValidBundleStartBlockForChain"); + + // This time, the deployment block is higher than the earliestValidBundleStartBlockForChain so the range is valid. + result = await blockRangesAreInvalidForSpokeClients( + _spokePoolClients, + [[0, spokePoolClients[chainId].latestBlockSearched]], + chainIds, + { + [chainId]: mainnetDeploymentBlock - 1, + } + ); + expect(result.length).to.equal(0); // Override spoke pool client fill deadline buffer and oldest time searched and check that it returns false // buffer if not great enough to cover the time between the end block and the oldest time searched by @@ -288,30 +330,30 @@ describe("Dataworker block range-related utility methods", async function () { ); const fillDeadlineOverride = expectedTimeBetweenOldestAndEndBlockTimestamp + 1; mockSpokePoolClient.setMaxFillDeadlineOverride(fillDeadlineOverride); - expect( - await blockRangesAreInvalidForSpokeClients( - { [originChainId]: mockSpokePoolClient as SpokePoolClient }, - blockRanges, - chainIds, - { - [originChainId]: mainnetDeploymentBlock, - }, - true // isV3 - ) - ).to.equal(true); + result = await blockRangesAreInvalidForSpokeClients( + { [originChainId]: mockSpokePoolClient as SpokePoolClient }, + blockRanges, + chainIds, + { + [originChainId]: mainnetDeploymentBlock, + }, + true // isV3 + ); + expect(result.length).to.equal(1); + expect(result[0].chainId).to.equal(chainIds[0]); + expect(result[0].reason).to.contain("cannot evaluate all possible expired deposits"); // Should be valid if not V3 - expect( - await blockRangesAreInvalidForSpokeClients( - { [originChainId]: mockSpokePoolClient as SpokePoolClient }, - blockRanges, - chainIds, - { - [originChainId]: mainnetDeploymentBlock, - }, - false // isV3 - ) - ).to.equal(false); + result = await blockRangesAreInvalidForSpokeClients( + { [originChainId]: mockSpokePoolClient as SpokePoolClient }, + blockRanges, + chainIds, + { + [originChainId]: mainnetDeploymentBlock, + }, + false // isV3 + ); + expect(result.length).to.equal(0); // Set oldest time older such that fill deadline buffer now exceeds the time between the end block and the oldest // time plus the conservative bundle time. Block ranges should now be valid. @@ -319,17 +361,16 @@ describe("Dataworker block range-related utility methods", async function () { endBlockTimestamps[originChainId] - fillDeadlineOverride - CONSERVATIVE_BUNDLE_FREQUENCY_SECONDS - 1; assert(oldestBlockTimestampOverride > 0, "unrealistic oldest block timestamp"); mockSpokePoolClient.setOldestBlockTimestampOverride(oldestBlockTimestampOverride); - expect( - await blockRangesAreInvalidForSpokeClients( - { [originChainId]: mockSpokePoolClient as SpokePoolClient }, - blockRanges, - chainIds, - { - [originChainId]: mainnetDeploymentBlock, - }, - true // isV3 - ) - ).to.equal(false); + result = await blockRangesAreInvalidForSpokeClients( + { [originChainId]: mockSpokePoolClient as SpokePoolClient }, + blockRanges, + chainIds, + { + [originChainId]: mainnetDeploymentBlock, + }, + true // isV3 + ); + expect(result.length).to.equal(0); // Finally, reset fill deadline buffer in contracts and reset the override in the mock to test that // the client calls from the contracts. @@ -337,16 +378,15 @@ describe("Dataworker block range-related utility methods", async function () { mockSpokePoolClient.setMaxFillDeadlineOverride(undefined); fakeSpokePool.fillDeadlineBuffer.returns(expectedTimeBetweenOldestAndEndBlockTimestamp); // This should be same // length as time between oldest time and end block timestamp so it should be a valid block range. - expect( - await blockRangesAreInvalidForSpokeClients( - { [originChainId]: mockSpokePoolClient as SpokePoolClient }, - blockRanges, - chainIds, - { - [originChainId]: mainnetDeploymentBlock, - }, - true // isV3 - ) - ).to.equal(false); + result = await blockRangesAreInvalidForSpokeClients( + { [originChainId]: mockSpokePoolClient as SpokePoolClient }, + blockRanges, + chainIds, + { + [originChainId]: mainnetDeploymentBlock, + }, + true // isV3 + ); + expect(result.length).to.equal(0); }); }); diff --git a/test/Dataworker.customSpokePoolClients.ts b/test/Dataworker.customSpokePoolClients.ts index 73509bb58..5cca2f878 100644 --- a/test/Dataworker.customSpokePoolClients.ts +++ b/test/Dataworker.customSpokePoolClients.ts @@ -70,7 +70,7 @@ describe("Dataworker: Using SpokePool clients with short lookback windows", asyn expect(lastSpyLogIncludes(spy, "Skipping dispute")).to.be.true; expect(spyLogLevel(spy, -1)).to.equal("error"); expect(spyLogIncludes(spy, -2, "Cannot validate bundle with insufficient event data")).to.be.true; - expect(spyLogLevel(spy, -2)).to.equal("debug"); + expect(spyLogLevel(spy, -2)).to.equal("warn"); expect(multiCallerClient.transactionCount()).to.equal(0); }); }); diff --git a/test/Dataworker.executePoolRebalanceUtils.ts b/test/Dataworker.executePoolRebalanceUtils.ts new file mode 100644 index 000000000..601743e40 --- /dev/null +++ b/test/Dataworker.executePoolRebalanceUtils.ts @@ -0,0 +1,901 @@ +import { ConfigStoreClient, HubPoolClient, MultiCallerClient, SpokePoolClient } from "../src/clients"; +import { + BaseContract, + bnZero, + buildPoolRebalanceLeafTree, + CHAIN_IDs, + ERC20, + getCurrentTime, + toBNWei, + TOKEN_SYMBOLS_MAP, +} from "../src/utils"; +import { MAX_L1_TOKENS_PER_POOL_REBALANCE_LEAF, MAX_REFUNDS_PER_RELAYER_REFUND_LEAF, ZERO_ADDRESS } from "./constants"; +import { setupDataworker } from "./fixtures/Dataworker.Fixture"; +import { + Contract, + FakeContract, + ethers, + expect, + smock, + sinon, + randomAddress, + lastSpyLogIncludes, + assert, + lastSpyLogLevel, +} from "./utils"; + +// Tested +import { BalanceAllocator } from "../src/clients/BalanceAllocator"; +import { ARBITRUM_ORBIT_L1L2_MESSAGE_FEE_DATA, spokePoolClientsToProviders } from "../src/common"; +import { Dataworker } from "../src/dataworker/Dataworker"; +import { MockHubPoolClient } from "./mocks/MockHubPoolClient"; +import { PoolRebalanceLeaf } from "../src/interfaces"; + +// Set to arbitrum to test that the dataworker sends ETH to the HubPool to test L1 --> Arbitrum message transfers. +const destinationChainId = 42161; + +let erc20_1: Contract; +let l1Token_1: Contract, hubPool: Contract; +let spy: sinon.SinonSpy; + +let hubPoolClient: HubPoolClient; +let dataworkerInstance: Dataworker, multiCallerClient: MultiCallerClient; +let spokePoolClients: { [chainId: number]: SpokePoolClient }; + +let updateAllClients: () => Promise; + +describe("Dataworker: Utilities to execute pool rebalance leaves", async function () { + function getNewBalanceAllocator(): BalanceAllocator { + const providers = { + ...spokePoolClientsToProviders(spokePoolClients), + [hubPoolClient.chainId]: hubPool.provider, + }; + return new BalanceAllocator(providers); + } + async function createMockHubPoolClient(): Promise<{ + mockHubPoolClient: MockHubPoolClient; + fakeHubPool: FakeContract; + }> { + const fakeHubPool = await smock.fake(hubPool.interface, { address: hubPool.address }); + const mockHubPoolClient = new MockHubPoolClient( + hubPoolClient.logger, + fakeHubPool as unknown as Contract, + hubPoolClient.configStoreClient as unknown as ConfigStoreClient + ); + mockHubPoolClient.chainId = hubPoolClient.chainId; + mockHubPoolClient.setTokenInfoToReturn({ address: l1Token_1.address, decimals: 18, symbol: "TEST" }); + + // Sub in a dummy root bundle proposal for use in HubPoolClient update. + const zero = "0x0000000000000000000000000000000000000000000000000000000000000000"; + fakeHubPool.multicall.returns([ + hubPool.interface.encodeFunctionResult("getCurrentTime", [getCurrentTime().toString()]), + hubPool.interface.encodeFunctionResult("rootBundleProposal", [zero, zero, zero, 0, ZERO_ADDRESS, 0, 0]), + ]); + return { + mockHubPoolClient, + fakeHubPool, + }; + } + beforeEach(async function () { + ({ + hubPool, + erc20_1, + hubPoolClient, + l1Token_1, + dataworkerInstance, + multiCallerClient, + updateAllClients, + spokePoolClients, + spy, + } = await setupDataworker( + ethers, + MAX_REFUNDS_PER_RELAYER_REFUND_LEAF, + MAX_L1_TOKENS_PER_POOL_REBALANCE_LEAF, + 0, + destinationChainId + )); + }); + describe("update exchange rates", function () { + let mockHubPoolClient: MockHubPoolClient, fakeHubPool: FakeContract; + beforeEach(async function () { + ({ mockHubPoolClient, fakeHubPool } = await createMockHubPoolClient()); + dataworkerInstance.clients.hubPoolClient = mockHubPoolClient; + + await updateAllClients(); + }); + describe("_updateExchangeRatesBeforeExecutingHubChainLeaves", function () { + let balanceAllocator: BalanceAllocator; + beforeEach(function () { + balanceAllocator = getNewBalanceAllocator(); + }); + it("ignores negative net send amounts", async function () { + const liquidReserves = toBNWei("1"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + const latestReserves = await dataworkerInstance._updateExchangeRatesBeforeExecutingHubChainLeaves( + balanceAllocator, + { netSendAmounts: [toBNWei(-1)], l1Tokens: [l1Token_1.address] }, + true + ); + expect(latestReserves[l1Token_1.address]).to.equal(liquidReserves); + expect(multiCallerClient.transactionCount()).to.equal(0); + }); + it("considers positive net send amounts", async function () { + const currentReserves = toBNWei("2"); + const netSendAmount = toBNWei("1"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, currentReserves); + + const latestReserves = await dataworkerInstance._updateExchangeRatesBeforeExecutingHubChainLeaves( + balanceAllocator, + { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address] }, + true + ); + expect(latestReserves[l1Token_1.address]).to.equal(currentReserves.sub(netSendAmount)); + expect(multiCallerClient.transactionCount()).to.equal(0); + expect(lastSpyLogIncludes(spy, "current liquid reserves > netSendAmount")).to.be.true; + }); + it("logs error if updated liquid reserves aren't enough to execute leaf", async function () { + const netSendAmount = toBNWei("1"); + const liquidReserves = netSendAmount.sub(1); + const postUpdateLiquidReserves = liquidReserves.sub(1); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + hubPool.address, + postUpdateLiquidReserves + ); + + const latestReserves = await dataworkerInstance._updateExchangeRatesBeforeExecutingHubChainLeaves( + balanceAllocator, + { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address] }, + true + ); + expect(lastSpyLogLevel(spy)).to.equal("warn"); + expect(lastSpyLogIncludes(spy, "Not enough funds to execute Ethereum pool rebalance leaf")).to.be.true; + expect(latestReserves[l1Token_1.address]).to.equal(liquidReserves); + expect(multiCallerClient.transactionCount()).to.equal(0); + }); + it("submits update if updated liquid reserves cover execution of pool leaf", async function () { + const netSendAmount = toBNWei("1"); + const updatedLiquidReserves = netSendAmount.add(1); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, bnZero); + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + hubPool.address, + updatedLiquidReserves + ); + + const latestReserves = await dataworkerInstance._updateExchangeRatesBeforeExecutingHubChainLeaves( + balanceAllocator, + { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address] }, + true + ); + expect(latestReserves[l1Token_1.address]).to.equal(updatedLiquidReserves.sub(netSendAmount)); + expect(multiCallerClient.transactionCount()).to.equal(1); + }); + }); + describe("_updateExchangeRatesBeforeExecutingNonHubChainLeaves", function () { + let balanceAllocator: BalanceAllocator; + beforeEach(function () { + balanceAllocator = getNewBalanceAllocator(); + }); + it("uses input liquid reserves value for a token if it exists", async function () { + // In this test the `liquidReserves` > `netSendAmount` but we pass in the + // `passedInLiquidReserves` value which is less than `liquidReserves`. So, the function + // should attempt an update. + const netSendAmount = toBNWei("1"); + const liquidReserves = toBNWei("3"); + const passedInLiquidReserves = toBNWei("0"); + + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + balanceAllocator.testSetBalance(hubPoolClient.chainId, l1Token_1.address, hubPool.address, netSendAmount); + + const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + { + [l1Token_1.address]: passedInLiquidReserves, + }, + balanceAllocator, + [ + { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 1 }, + { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 10 }, + ], + true + ); + expect(updated.size).to.equal(1); + expect(updated.has(l1Token_1.address)).to.be.true; + const errorLogs = spy.getCalls().filter((call) => call.lastArg.level === "warn"); + expect(errorLogs.length).to.equal(1); + expect(errorLogs[0].lastArg.message).to.contain("Not enough funds to execute ALL non-Ethereum"); + }); + it("exits early if current liquid reserves are greater than all individual net send amount", async function () { + const netSendAmount = toBNWei("1"); + const liquidReserves = toBNWei("3"); + // For this test, do not pass in a liquid reserves object and force dataworker to load + // from HubPoolClient + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + {}, + balanceAllocator, + [ + { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 1 }, + { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 10 }, + ], + true + ); + expect(updated.size).to.equal(0); + expect(multiCallerClient.transactionCount()).to.equal(0); + expect(lastSpyLogIncludes(spy, "Skipping exchange rate update")).to.be.true; + }); + it("exits early if total required net send amount is 0", async function () { + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, toBNWei("0")); + const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + {}, + balanceAllocator, + [{ netSendAmounts: [toBNWei(0)], l1Tokens: [l1Token_1.address], chainId: 1 }], + true + ); + expect(updated.size).to.equal(0); + expect(multiCallerClient.transactionCount()).to.equal(0); + expect( + spy.getCalls().filter((call) => call.lastArg.message.includes("Skipping exchange rate update")).length + ).to.equal(0); + }); + it("groups aggregate net send amounts by L1 token", async function () { + // Total net send amount is 1 for each token but they are not summed together because they are different, + // so the liquid reserves of 1 for each individual token is enough. + const liquidReserves = toBNWei("1"); + const l1Token2 = erc20_1.address; + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + mockHubPoolClient.setLpTokenInfo(l1Token2, 0, liquidReserves); + const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + {}, + balanceAllocator, + [ + { netSendAmounts: [liquidReserves], l1Tokens: [l1Token_1.address], chainId: 1 }, + { netSendAmounts: [liquidReserves], l1Tokens: [l1Token2], chainId: 10 }, + ], + true + ); + expect(updated.size).to.equal(0); + expect(multiCallerClient.transactionCount()).to.equal(0); + }); + it("Logs error if any l1 token's aggregate net send amount exceeds post-sync liquid reserves", async function () { + const liquidReserves = toBNWei("1"); + const postUpdateLiquidReserves = liquidReserves.mul(toBNWei("1.1")).div(toBNWei("1")); + const l1Token2 = erc20_1.address; + + // Current reserves are 1 which is insufficient to execute all leaves. + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + mockHubPoolClient.setLpTokenInfo(l1Token2, 0, liquidReserves); + + // Post-sync reserves are still insufficient to execute all leaves. + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + hubPool.address, + postUpdateLiquidReserves + ); + balanceAllocator.testSetBalance(hubPoolClient.chainId, l1Token2, hubPool.address, postUpdateLiquidReserves); + + const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + {}, + balanceAllocator, + [ + { netSendAmounts: [liquidReserves], l1Tokens: [l1Token_1.address], chainId: 1 }, + // This one exceeds the post-update liquid reserves for the l1 token. + { netSendAmounts: [liquidReserves.mul(2)], l1Tokens: [l1Token2], chainId: 10 }, + ], + true + ); + expect(updated.size).to.equal(1); + expect(updated.has(l1Token2)).to.be.true; + const errorLogs = spy.getCalls().filter((call) => call.lastArg.level === "warn"); + expect(errorLogs.length).to.equal(1); + expect(errorLogs[0].lastArg.message).to.contain("Not enough funds to execute ALL non-Ethereum"); + }); + it("Logs one error for each L1 token whose aggregate net send amount exceeds post-sync liquid reserves", async function () { + const liquidReserves = toBNWei("1"); + const postUpdateLiquidReserves = liquidReserves.mul(toBNWei("1.1")).div(toBNWei("1")); + const l1Token2 = erc20_1.address; + + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + mockHubPoolClient.setLpTokenInfo(l1Token2, 0, liquidReserves); + + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + hubPool.address, + postUpdateLiquidReserves + ); + balanceAllocator.testSetBalance(hubPoolClient.chainId, l1Token2, hubPool.address, postUpdateLiquidReserves); + + const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + {}, + balanceAllocator, + [ + // Both net send amounts exceed the post update liquid reserves + { netSendAmounts: [liquidReserves.mul(2)], l1Tokens: [l1Token_1.address], chainId: 1 }, + { netSendAmounts: [liquidReserves.mul(2)], l1Tokens: [l1Token2], chainId: 10 }, + ], + true + ); + expect(updated.size).to.equal(2); + expect(updated.has(l1Token2)).to.be.true; + expect(updated.has(l1Token_1.address)).to.be.true; + const errorLogs = spy.getCalls().filter((call) => call.lastArg.level === "warn"); + expect(errorLogs.length).to.equal(2); + }); + it("ignores negative net send amounts", async function () { + const liquidReserves = toBNWei("2"); + const postUpdateLiquidReserves = liquidReserves; + + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + hubPool.address, + postUpdateLiquidReserves + ); + + await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + {}, + balanceAllocator, + [ + { netSendAmounts: [liquidReserves.mul(2)], l1Tokens: [l1Token_1.address], chainId: 1 }, + // This negative liquid reserves doesn't offset the positive one, it just gets ignored. + { netSendAmounts: [liquidReserves.mul(-10)], l1Tokens: [l1Token_1.address], chainId: 10 }, + ], + true + ); + const errorLog = spy.getCalls().filter((call) => call.lastArg.level === "warn"); + expect(errorLog.length).to.equal(1); + expect(errorLog[0].lastArg.message).to.contain("Not enough funds to execute ALL non-Ethereum"); + }); + it("submits update: liquid reserves post-sync are enough to execute leaf", async function () { + // Liquid reserves cover one leaf but not two. + const postUpdateLiquidReserves = toBNWei("20"); + + // Current reserves are insufficient to cover the two leaves: + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, bnZero); + + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + hubPool.address, + postUpdateLiquidReserves + ); + + const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + {}, + balanceAllocator, + // Each leaf's net send amount is individually less than the post-updateliquid reserves, + // but the sum of the three is greater than the post-update liquid reserves. + // This should force the dataworker to submit an update. + [ + { netSendAmounts: [toBNWei("4")], l1Tokens: [l1Token_1.address], chainId: 1 }, + { netSendAmounts: [toBNWei("9")], l1Tokens: [l1Token_1.address], chainId: 10 }, + { netSendAmounts: [toBNWei("7")], l1Tokens: [l1Token_1.address], chainId: 137 }, + ], + true + ); + expect(updated.size).to.equal(1); + expect(updated.has(l1Token_1.address)).to.be.true; + expect(multiCallerClient.transactionCount()).to.equal(1); + }); + it("Logs error and does not submit update if liquid reserves post-sync are <= current liquid reserves and are insufficient to execute leaf", async function () { + const liquidReserves = toBNWei("1"); + const postUpdateLiquidReserves = liquidReserves.sub(1); + + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + hubPool.address, + postUpdateLiquidReserves + ); + + const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( + {}, + balanceAllocator, + [{ netSendAmounts: [liquidReserves.mul(2)], l1Tokens: [l1Token_1.address], chainId: 1 }], + true + ); + expect(updated.size).to.equal(0); + const errorLogs = spy.getCalls().filter((call) => call.lastArg.level === "warn"); + expect(errorLogs.length).to.equal(1); + expect(errorLogs[0].lastArg.message).to.contain("Not enough funds to execute ALL non-Ethereum"); + expect(lastSpyLogIncludes(spy, "liquid reserves would not increase")).to.be.true; + }); + }); + describe("_updateOldExchangeRates", function () { + it("exits early if we recently synced l1 token", async function () { + mockHubPoolClient.currentTime = 10_000; + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 10_000, toBNWei("0")); + await dataworkerInstance._updateOldExchangeRates([l1Token_1.address], true); + expect(multiCallerClient.transactionCount()).to.equal(0); + }); + it("exits early if liquid reserves wouldn't increase for token post-update", async function () { + // Last update was at time 0, current time is at 1_000_000, so definitely past the update threshold + mockHubPoolClient.currentTime = 1_000_000; + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0); + + // Hardcode multicall output such that it looks like liquid reserves stayed the same + fakeHubPool.multicall.returns([ + hubPool.interface.encodeFunctionResult("pooledTokens", [ + ZERO_ADDRESS, // lp token address + true, // enabled + 0, // last lp fee update + bnZero, // utilized reserves + bnZero, // liquid reserves + bnZero, // unaccumulated fees + ]), + ZERO_ADDRESS, // sync output + hubPool.interface.encodeFunctionResult("pooledTokens", [ + ZERO_ADDRESS, // lp token address + true, // enabled + 0, // last lp fee update + bnZero, // utilized reserves + bnZero, // liquid reserves, equal to "current" reserves + bnZero, // unaccumulated fees + ]), + ]); + + await dataworkerInstance._updateOldExchangeRates([l1Token_1.address], true); + expect(multiCallerClient.transactionCount()).to.equal(0); + + // Add test when liquid reserves decreases + fakeHubPool.multicall.returns([ + hubPool.interface.encodeFunctionResult("pooledTokens", [ + ZERO_ADDRESS, // lp token address + true, // enabled + 0, // last lp fee update + bnZero, // utilized reserves + toBNWei(1), // liquid reserves + bnZero, // unaccumulated fees + ]), + ZERO_ADDRESS, // sync output + hubPool.interface.encodeFunctionResult("pooledTokens", [ + ZERO_ADDRESS, // lp token address + true, // enabled + 0, // last lp fee update + bnZero, // utilized reserves + toBNWei(1).sub(1), // liquid reserves, less than "current" reserves + bnZero, // unaccumulated fees + ]), + ]); + + await dataworkerInstance._updateOldExchangeRates([l1Token_1.address], true); + expect(multiCallerClient.transactionCount()).to.equal(0); + }); + it("submits update if liquid reserves would increase for token post-update and last update was old enough", async function () { + // Last update was at time 0, current time is at 1_000_000, so definitely past the update threshold + mockHubPoolClient.currentTime = 1_000_000; + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0); + + // Hardcode multicall output such that it looks like liquid reserves increased + fakeHubPool.multicall.returns([ + hubPool.interface.encodeFunctionResult("pooledTokens", [ + ZERO_ADDRESS, // lp token address + true, // enabled + 0, // last lp fee update + bnZero, // utilized reserves + toBNWei(1), // liquid reserves + bnZero, // unaccumulated fees + ]), + ZERO_ADDRESS, + hubPool.interface.encodeFunctionResult("pooledTokens", [ + ZERO_ADDRESS, // lp token address + true, // enabled + 0, // last lp fee update + bnZero, // utilized reserves + toBNWei(1).add(1), // liquid reserves, higher than "current" reserves + bnZero, // unaccumulated fees + ]), + ]); + + await dataworkerInstance._updateOldExchangeRates([l1Token_1.address], true); + expect(multiCallerClient.transactionCount()).to.equal(1); + }); + }); + }); + describe("_executePoolRebalanceLeaves", async function () { + let token1: string, token2: string, balanceAllocator: BalanceAllocator; + beforeEach(function () { + token1 = randomAddress(); + token2 = randomAddress(); + balanceAllocator = getNewBalanceAllocator(); + balanceAllocator.testSetBalance(hubPoolClient.chainId, token1, hubPool.address, toBNWei("2")); + balanceAllocator.testSetBalance(hubPoolClient.chainId, token2, hubPool.address, toBNWei("2")); + }); + it("non-orbit leaf", async function () { + // Should just submit execution + const leaves: PoolRebalanceLeaf[] = [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + { + chainId: 137, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + ]; + const result = await dataworkerInstance._executePoolRebalanceLeaves( + spokePoolClients, + leaves, + balanceAllocator, + buildPoolRebalanceLeafTree(leaves), + true + ); + expect(result).to.equal(2); + + expect(multiCallerClient.transactionCount()).to.equal(2); + const queuedTransactions = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(queuedTransactions[0].method).to.equal("executeRootBundle"); + expect(queuedTransactions[0].message).to.match(/chain 10/); + expect(queuedTransactions[1].method).to.equal("executeRootBundle"); + expect(queuedTransactions[1].message).to.match(/chain 137/); + }); + it("Subtracts virtual balance from hub pool", async function () { + // All chain leaves remove virtual balance from hub pool + const leaves: PoolRebalanceLeaf[] = [ + { + chainId: 42161, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("1")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [token1], + }, + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("1")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [token1], + }, + ]; + const result = await dataworkerInstance._executePoolRebalanceLeaves( + spokePoolClients, + leaves, + balanceAllocator, + buildPoolRebalanceLeafTree(leaves), + true + ); + expect(result).to.equal(2); + expect(await balanceAllocator.getUsed(hubPoolClient.chainId, token1, hubPoolClient.hubPool.address)).to.equal( + toBNWei("2") + ); + }); + it("Adds virtual balance to SpokePool for ethereum leaves", async function () { + const leaves: PoolRebalanceLeaf[] = [ + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("1")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [token1], + }, + ]; + const result = await dataworkerInstance._executePoolRebalanceLeaves( + spokePoolClients, + leaves, + balanceAllocator, + buildPoolRebalanceLeafTree(leaves), + true + ); + expect(result).to.equal(1); + expect( + await balanceAllocator.getUsed( + hubPoolClient.chainId, + token1, + spokePoolClients[hubPoolClient.chainId].spokePool.address + ) + ).to.equal(toBNWei("-1")); + }); + it("funds arbitrum leaf", async function () { + // Adds one fee per net send amount + one extra if groupIndex = 0 + const leaves: PoolRebalanceLeaf[] = [ + { + chainId: 42161, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + { + chainId: 42161, + groupIndex: 1, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + ]; + // Should have a total of 2 + 1 + 2 = 5 fees. + const { amountWei, amountMultipleToFund } = ARBITRUM_ORBIT_L1L2_MESSAGE_FEE_DATA[CHAIN_IDs.ARBITRUM]; + const expectedFee = toBNWei(amountWei).mul(amountMultipleToFund); + const expectedFeeLeaf1 = expectedFee.mul(2).add(expectedFee); + const expectedFeeLeaf2 = expectedFee.mul(2); + const result = await dataworkerInstance._executePoolRebalanceLeaves( + spokePoolClients, + leaves, + balanceAllocator, + buildPoolRebalanceLeafTree(leaves), + true + ); + expect(result).to.equal(2); + + // Should submit two transactions to load ETH for each leaf plus pool rebalance leaf execution. + expect(multiCallerClient.transactionCount()).to.equal(4); + const queuedTransactions = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(queuedTransactions[0].method).to.equal("loadEthForL2Calls"); + expect(queuedTransactions[0].value).to.equal(expectedFeeLeaf1); + expect(queuedTransactions[1].method).to.equal("loadEthForL2Calls"); + expect(queuedTransactions[1].value).to.equal(expectedFeeLeaf2); + expect(queuedTransactions[2].method).to.equal("executeRootBundle"); + expect(queuedTransactions[3].method).to.equal("executeRootBundle"); + }); + it("funds custom gas token orbit leaf", async function () { + // Replicate custom gas token setups: + const azero = await smock.fake(ERC20.abi, { + address: TOKEN_SYMBOLS_MAP.AZERO.addresses[CHAIN_IDs.MAINNET], + provider: hubPoolClient.hubPool.signer.provider, + }); + // Custom gas token funder for AZERO + const { amountWei, amountMultipleToFund, feePayer } = ARBITRUM_ORBIT_L1L2_MESSAGE_FEE_DATA[CHAIN_IDs.ALEPH_ZERO]; + assert(feePayer !== undefined); + const customGasTokenFunder = feePayer; + azero.balanceOf.whenCalledWith(customGasTokenFunder).returns(0); + expect(await balanceAllocator.getBalance(hubPoolClient.chainId, azero.address, customGasTokenFunder)).to.equal(0); + + // Adds one fee per net send amount + one extra if groupIndex = 0 + const leaves: PoolRebalanceLeaf[] = [ + { + chainId: 41455, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + { + chainId: 41455, + groupIndex: 1, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + ]; + // Should have a total of 2 + 1 + 2 = 5 fees. + const expectedFee = toBNWei(amountWei).mul(amountMultipleToFund); + const expectedFeeLeaf1 = expectedFee.mul(2).add(expectedFee); + const expectedFeeLeaf2 = expectedFee.mul(2); + azero.balanceOf + .whenCalledWith(await hubPoolClient.hubPool.signer.getAddress()) + .returns(expectedFeeLeaf1.add(expectedFeeLeaf2)); + const result = await dataworkerInstance._executePoolRebalanceLeaves( + spokePoolClients, + leaves, + balanceAllocator, + buildPoolRebalanceLeafTree(leaves), + true + ); + expect(result).to.equal(2); + + // Should submit two transactions to load ETH for each leaf plus pool rebalance leaf execution. + expect(multiCallerClient.transactionCount()).to.equal(4); + const queuedTransactions = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(queuedTransactions[0].method).to.equal("transfer"); + expect(queuedTransactions[0].args).to.deep.equal([customGasTokenFunder, expectedFeeLeaf1]); + expect(queuedTransactions[1].method).to.equal("transfer"); + expect(queuedTransactions[1].args).to.deep.equal([customGasTokenFunder, expectedFeeLeaf2]); + expect(queuedTransactions[2].method).to.equal("executeRootBundle"); + expect(queuedTransactions[3].method).to.equal("executeRootBundle"); + }); + it("fails to fund custom gas token orbit leaf", async function () { + // Replicate custom gas token setups, but this time do not set a balance for the custom gas token funder. + const azero = await smock.fake(ERC20.abi, { + address: TOKEN_SYMBOLS_MAP.AZERO.addresses[CHAIN_IDs.MAINNET], + provider: hubPoolClient.hubPool.signer.provider, + }); + // Custom gas token funder for AZERO + const customGasTokenFunder = "0x0d57392895Db5aF3280e9223323e20F3951E81B1"; + azero.balanceOf.whenCalledWith(customGasTokenFunder).returns(0); + expect(await balanceAllocator.getBalance(hubPoolClient.chainId, azero.address, customGasTokenFunder)).to.equal(0); + + // Adds one fee per net send amount + one extra if groupIndex = 0 + const leaves: PoolRebalanceLeaf[] = [ + { + chainId: 41455, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + ]; + // Should throw an error if caller doesn't have enough custom gas token to fund + // DonationBox. + const result = await dataworkerInstance._executePoolRebalanceLeaves( + spokePoolClients, + leaves, + balanceAllocator, + buildPoolRebalanceLeafTree(leaves), + true + ); + expect(result).to.equal(0); + expect(lastSpyLogLevel(spy)).to.equal("error"); + expect(lastSpyLogIncludes(spy, "Failed to fund")).to.be.true; + }); + it("Ignores leaves without sufficient reserves to execute", async function () { + // Should only be able to execute the first leaf + balanceAllocator.testSetBalance(hubPoolClient.chainId, token1, hubPoolClient.hubPool.address, toBNWei("1")); + + const leaves: PoolRebalanceLeaf[] = [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("1")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [token1], + }, + { + chainId: 137, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("1")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [token1], + }, + ]; + const result = await dataworkerInstance._executePoolRebalanceLeaves( + spokePoolClients, + leaves, + balanceAllocator, + buildPoolRebalanceLeafTree(leaves), + true + ); + expect(result).to.equal(1); + }); + }); + describe("_getExecutablePoolRebalanceLeaves", function () { + let token1: string, token2: string, balanceAllocator: BalanceAllocator; + beforeEach(function () { + token1 = randomAddress(); + token2 = randomAddress(); + balanceAllocator = getNewBalanceAllocator(); + }); + it("All l1 tokens on single leaf are executable", async function () { + balanceAllocator.testSetBalance(hubPoolClient.chainId, token1, hubPoolClient.hubPool.address, toBNWei("1")); + balanceAllocator.testSetBalance(hubPoolClient.chainId, token2, hubPoolClient.hubPool.address, toBNWei("1")); + const leaves = await dataworkerInstance._getExecutablePoolRebalanceLeaves( + [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + ], + balanceAllocator + ); + expect(leaves.length).to.equal(1); + }); + it("Some l1 tokens on single leaf are not executable", async function () { + // Not enough to cover one net send amounts of 1 + balanceAllocator.testSetBalance(hubPoolClient.chainId, token1, hubPoolClient.hubPool.address, toBNWei("0")); + balanceAllocator.testSetBalance(hubPoolClient.chainId, token2, hubPoolClient.hubPool.address, toBNWei("1")); + const leaves = await dataworkerInstance._getExecutablePoolRebalanceLeaves( + [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + ], + balanceAllocator + ); + expect(leaves.length).to.equal(0); + const errorLogs = spy.getCalls().filter((call) => call.lastArg.level === "error"); + expect(errorLogs.length).to.equal(1); + expect(errorLogs[0].lastArg.message).to.contain("Not enough funds to execute"); + }); + it("All l1 tokens on multiple leaves are executable", async function () { + // Covers 2 leaves each with one net send amount of 1 + balanceAllocator.testSetBalance(hubPoolClient.chainId, token1, hubPoolClient.hubPool.address, toBNWei("2")); + balanceAllocator.testSetBalance(hubPoolClient.chainId, token2, hubPoolClient.hubPool.address, toBNWei("2")); + const leaves = await dataworkerInstance._getExecutablePoolRebalanceLeaves( + [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + { + chainId: 42161, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + ], + balanceAllocator + ); + expect(leaves.length).to.equal(2); + }); + it("Some l1 tokens are not executable after first leaf is executed", async function () { + // 1 only covers the first leaf + balanceAllocator.testSetBalance(hubPoolClient.chainId, token1, hubPoolClient.hubPool.address, toBNWei("1")); + balanceAllocator.testSetBalance(hubPoolClient.chainId, token2, hubPoolClient.hubPool.address, toBNWei("2")); + + const leaves = await dataworkerInstance._getExecutablePoolRebalanceLeaves( + [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + { + chainId: 42161, + groupIndex: 0, + bundleLpFees: [toBNWei("1"), toBNWei("1")], + netSendAmounts: [toBNWei("1"), toBNWei("1")], + runningBalances: [toBNWei("1"), toBNWei("1")], + leafId: 0, + l1Tokens: [token1, token2], + }, + ], + balanceAllocator + ); + expect(leaves.length).to.equal(1); + expect(leaves[0].chainId).to.equal(10); + const errorLogs = spy.getCalls().filter((call) => call.lastArg.level === "error"); + expect(errorLogs.length).to.equal(1); + }); + }); +}); diff --git a/test/Dataworker.executePoolRebalances.ts b/test/Dataworker.executePoolRebalances.ts index 5a1472e0c..b6ffad369 100644 --- a/test/Dataworker.executePoolRebalances.ts +++ b/test/Dataworker.executePoolRebalances.ts @@ -1,5 +1,13 @@ -import { HubPoolClient, MultiCallerClient, SpokePoolClient } from "../src/clients"; -import { bnZero, getCurrentTime, MAX_UINT_VAL, toBNWei } from "../src/utils"; +import { ConfigStoreClient, HubPoolClient, MultiCallerClient, SpokePoolClient } from "../src/clients"; +import { + BaseContract, + bnZero, + getCurrentTime, + MAX_UINT_VAL, + MerkleTree, + RelayerRefundLeaf, + toBNWei, +} from "../src/utils"; import { MAX_L1_TOKENS_PER_POOL_REBALANCE_LEAF, MAX_REFUNDS_PER_RELAYER_REFUND_LEAF, @@ -15,10 +23,10 @@ import { ethers, expect, fillV3, - lastSpyLogLevel, smock, sinon, lastSpyLogIncludes, + randomAddress, } from "./utils"; // Tested @@ -26,12 +34,13 @@ import { BalanceAllocator } from "../src/clients/BalanceAllocator"; import { spokePoolClientsToProviders } from "../src/common"; import { Dataworker } from "../src/dataworker/Dataworker"; import { MockHubPoolClient } from "./mocks/MockHubPoolClient"; +import { PoolRebalanceLeaf, SlowFillLeaf } from "../src/interfaces"; // Set to arbitrum to test that the dataworker sends ETH to the HubPool to test L1 --> Arbitrum message transfers. const destinationChainId = 42161; let spokePool_1: Contract, erc20_1: Contract, spokePool_2: Contract, erc20_2: Contract; -let l1Token_1: Contract, hubPool: Contract; +let l1Token_1: Contract, hubPool: Contract, spokePool_4: Contract; let depositor: SignerWithAddress, spy: sinon.SinonSpy; let hubPoolClient: HubPoolClient; @@ -41,10 +50,43 @@ let spokePoolClients: { [chainId: number]: SpokePoolClient }; let updateAllClients: () => Promise; describe("Dataworker: Execute pool rebalances", async function () { + function getNewBalanceAllocator(): BalanceAllocator { + const providers = { + ...spokePoolClientsToProviders(spokePoolClients), + [hubPoolClient.chainId]: hubPool.provider, + }; + return new BalanceAllocator(providers); + } + async function createMockHubPoolClient(): Promise<{ + mockHubPoolClient: MockHubPoolClient; + fakeHubPool: FakeContract; + }> { + const fakeHubPool = await smock.fake(hubPool.interface, { address: hubPool.address }); + const mockHubPoolClient = new MockHubPoolClient( + hubPoolClient.logger, + fakeHubPool as unknown as Contract, + hubPoolClient.configStoreClient as unknown as ConfigStoreClient + ); + mockHubPoolClient.chainId = hubPoolClient.chainId; + mockHubPoolClient.setTokenInfoToReturn({ address: l1Token_1.address, decimals: 18, symbol: "TEST" }); + mockHubPoolClient.setTokenMapping(l1Token_1.address, hubPoolClient.chainId, l1Token_1.address); + + // Sub in a dummy root bundle proposal for use in HubPoolClient update. + const zero = "0x0000000000000000000000000000000000000000000000000000000000000000"; + fakeHubPool.multicall.returns([ + hubPool.interface.encodeFunctionResult("getCurrentTime", [getCurrentTime().toString()]), + hubPool.interface.encodeFunctionResult("rootBundleProposal", [zero, zero, zero, 0, ZERO_ADDRESS, 0, 0]), + ]); + return { + mockHubPoolClient, + fakeHubPool, + }; + } beforeEach(async function () { ({ hubPool, spokePool_1, + spokePool_4, erc20_1, erc20_2, spokePool_2, @@ -81,10 +123,10 @@ describe("Dataworker: Execute pool rebalances", async function () { await fillV3(spokePool_2, depositor, deposit, destinationChainId); await updateAllClients(); - const providers = { - ...spokePoolClientsToProviders(spokePoolClients), - [hubPoolClient.chainId]: hubPool.provider, - }; + // Executing leaves before there is a bundle should do nothing: + let leafCount = await dataworkerInstance.executePoolRebalanceLeaves(spokePoolClients, getNewBalanceAllocator()); + expect(leafCount).to.equal(0); + expect(lastSpyLogIncludes(spy, "No pending proposal")).to.be.true; await dataworkerInstance.proposeRootBundle(spokePoolClients); @@ -92,13 +134,16 @@ describe("Dataworker: Execute pool rebalances", async function () { await l1Token_1.approve(hubPool.address, MAX_UINT_VAL); await multiCallerClient.executeTxnQueues(); + // Executing leaves before bundle challenge period has passed should do nothing: + await updateAllClients(); + leafCount = await dataworkerInstance.executePoolRebalanceLeaves(spokePoolClients, getNewBalanceAllocator()); + expect(leafCount).to.equal(0); + expect(lastSpyLogIncludes(spy, "Challenge period not passed")).to.be.true; + // Advance time and execute leaves: await hubPool.setCurrentTime(Number(await hubPool.getCurrentTime()) + Number(await hubPool.liveness()) + 1); await updateAllClients(); - let leafCount = await dataworkerInstance.executePoolRebalanceLeaves( - spokePoolClients, - new BalanceAllocator(providers) - ); + leafCount = await dataworkerInstance.executePoolRebalanceLeaves(spokePoolClients, getNewBalanceAllocator()); expect(leafCount).to.equal(2); // Should be 4 transactions: 1 for the to chain, 1 for the from chain, 1 for the extra ETH sent to cover @@ -107,6 +152,11 @@ describe("Dataworker: Execute pool rebalances", async function () { expect(multiCallerClient.transactionCount()).to.equal(4); await multiCallerClient.executeTxnQueues(); + // If we attempt execution again, the hub pool client should show them as already executed. + await updateAllClients(); + leafCount = await dataworkerInstance.executePoolRebalanceLeaves(spokePoolClients, getNewBalanceAllocator()); + expect(leafCount).to.equal(0); + // TEST 3: // Submit another root bundle proposal and check bundle block range. There should be no leaves in the new range // yet. In the bundle block range, all chains should have increased their start block, including those without @@ -118,396 +168,497 @@ describe("Dataworker: Execute pool rebalances", async function () { // Advance time and execute leaves: await hubPool.setCurrentTime(Number(await hubPool.getCurrentTime()) + Number(await hubPool.liveness()) + 1); await updateAllClients(); - leafCount = await dataworkerInstance.executePoolRebalanceLeaves(spokePoolClients, new BalanceAllocator(providers)); + leafCount = await dataworkerInstance.executePoolRebalanceLeaves(spokePoolClients, getNewBalanceAllocator()); expect(leafCount).to.equal(0); expect(multiCallerClient.transactionCount()).to.equal(0); }); - describe("update exchange rates", function () { - let mockHubPoolClient: MockHubPoolClient, fakeHubPool: FakeContract; + it("Executes mainnet leaves before non-mainnet leaves", async function () { + // Send deposit on SpokePool with same chain ID as hub chain. + // Fill it on a different spoke pool. + await updateAllClients(); + + // Mainnet deposit should produce a mainnet pool leaf. + const deposit = await depositV3( + spokePool_4, + destinationChainId, + depositor, + l1Token_1.address, + amountToDeposit, + erc20_2.address, + amountToDeposit + ); + await updateAllClients(); + // Fill and take repayment on a non-mainnet spoke pool. + await fillV3(spokePool_2, depositor, deposit, destinationChainId); + await updateAllClients(); + + const balanceAllocator = getNewBalanceAllocator(); + await dataworkerInstance.proposeRootBundle(spokePoolClients); + + // Execute queue and check that root bundle is pending: + await l1Token_1.approve(hubPool.address, MAX_UINT_VAL); + await multiCallerClient.executeTxnQueues(); + + // Advance time and execute leaves: + await hubPool.setCurrentTime(Number(await hubPool.getCurrentTime()) + Number(await hubPool.liveness()) + 1); + await updateAllClients(); + const leafCount = await dataworkerInstance.executePoolRebalanceLeaves(spokePoolClients, balanceAllocator); + expect(leafCount).to.equal(2); + + const leafExecutions = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId).map((tx, index) => { + return { + ...tx, + index, + }; + }); + const poolLeafExecutions = leafExecutions.filter((tx) => tx.method === "executeRootBundle"); + expect(poolLeafExecutions[0].args[0]).to.equal(hubPoolClient.chainId); + const refundLeafExecutions = leafExecutions.filter((tx) => tx.method === "executeRelayerRefundLeaf"); + expect(refundLeafExecutions.length).to.equal(1); + + // Hub chain relayer refund leaves should also execute before non-mainnet pool leaves + expect(refundLeafExecutions[0].index).to.be.greaterThan(poolLeafExecutions[0].index); + expect(refundLeafExecutions[0].index).to.be.lessThan(poolLeafExecutions[1].index); + expect(poolLeafExecutions[1].args[0]).to.equal(destinationChainId); + }); + describe("_executePoolLeavesAndSyncL1Tokens", function () { + let mockHubPoolClient: MockHubPoolClient, balanceAllocator: BalanceAllocator; beforeEach(async function () { - fakeHubPool = await smock.fake(hubPool.interface, { address: hubPool.address }); - mockHubPoolClient = new MockHubPoolClient(hubPoolClient.logger, fakeHubPool, hubPoolClient.configStoreClient); - mockHubPoolClient.setTokenInfoToReturn({ address: l1Token_1.address, decimals: 18, symbol: "TEST" }); + ({ mockHubPoolClient } = await createMockHubPoolClient()); dataworkerInstance.clients.hubPoolClient = mockHubPoolClient; - // Sub in a dummy root bundle proposal for use in HubPoolClient update. - const zero = "0x0000000000000000000000000000000000000000000000000000000000000000"; - fakeHubPool.multicall.returns([ - hubPool.interface.encodeFunctionResult("getCurrentTime", [getCurrentTime().toString()]), - hubPool.interface.encodeFunctionResult("rootBundleProposal", [zero, zero, zero, 0, ZERO_ADDRESS, 0, 0]), + // Make sure post-sync reserves are greater than the net send amount. + balanceAllocator = getNewBalanceAllocator(); + }); + it("Should not double update an LP token", async function () { + // In this test, the HubPool client returns the liquid reserves as 0 for a token. + + // So, executing the ethereum leaves results in an exchangeRate() update call. + + // The subsequent call to execute non-ethereum leaves should not result in an extra exchange rate call + // if a sync was already included. + + // Set LP reserves to 0 for the token. + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, bnZero); + + // Make sure post-sync reserves are greater than the net send amount. + balanceAllocator.testSetBalance(hubPoolClient.chainId, l1Token_1.address, hubPool.address, toBNWei("2")); + + const poolRebalanceLeaves: PoolRebalanceLeaf[] = [ + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("1")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("1")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + ]; + + const leafCount = await dataworkerInstance._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + poolRebalanceLeaves, + new MerkleTree(poolRebalanceLeaves, () => "test"), + [], + new MerkleTree([], () => "test"), + [], + new MerkleTree([], () => "test"), + true + ); + expect(leafCount).to.equal(2); + + // Should sync LP token for first leaf execution, but not for the second. This tests that latestLiquidReserves + // are passed correctly into _updateExchangeRatesBeforeExecutingNonHubChainLeaves so that currentReserves + // don't get set to the HubPool.pooledTokens.liquidReserves value. If this was done incorrectly then I would + // expect a second exchangeRateCurrent method before the second executeRootBundle call. + const enqueuedTxns = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(enqueuedTxns.map((txn) => txn.method)).to.deep.equal([ + "exchangeRateCurrent", + "executeRootBundle", + "executeRootBundle", + ]); + }); + it("Executing hub chain pool leaves should decrement available liquid reserves for subsequent executions", async function () { + // In this test, the HubPool client returns the liquid reserves as sufficient for + // executing Hub chain leaves for a token. + + // The subsequent call to execute non-ethereum leaves should force an LP token update + // before executing the non hub chain leaves. + + const netSendAmount = toBNWei("1"); + const liquidReserves = toBNWei("1"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + + // Make sure post-sync reserves are >= than the net send amount. + const postUpdateLiquidReserves = toBNWei("2"); + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + hubPool.address, + postUpdateLiquidReserves + ); + + const poolRebalanceLeaves: PoolRebalanceLeaf[] = [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [netSendAmount], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [netSendAmount], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + ]; + + const leafCount = await dataworkerInstance._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + poolRebalanceLeaves, + new MerkleTree(poolRebalanceLeaves, () => "test"), + [], + new MerkleTree([], () => "test"), + [], + new MerkleTree([], () => "test"), + true + ); + expect(leafCount).to.equal(2); + + // The order should be: executeRootBundle, exchangeRateCurrent, execute + const enqueuedTxns = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(enqueuedTxns.map((txn) => txn.method)).to.deep.equal([ + "executeRootBundle", + "exchangeRateCurrent", + "executeRootBundle", ]); - - await updateAllClients(); }); - describe("_updateExchangeRatesBeforeExecutingHubChainLeaves", function () { - it("exits early if net send amount is negative", async function () { - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingHubChainLeaves( - { netSendAmounts: [toBNWei(-1)], l1Tokens: [l1Token_1.address] }, - true - ); - expect(Object.keys(updated)).to.have.length(0); - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("exits early if current reserves are sufficient to pay for net send amounts", async function () { - const netSendAmount = toBNWei("1"); - - fakeHubPool.multicall.returns([ - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - netSendAmount, // liquid reserves - bnZero, // unaccumulated fees - ]), - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - bnZero, // liquid reserves, post update. Doesn't matter for this test - // because we should be early exiting if current liquid reserves are sufficient. - bnZero, // unaccumulated fees - ]), - ]); - - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingHubChainLeaves( - { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address] }, - true - ); - expect(Object.keys(updated)).to.have.length(0); - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("logs error if updated liquid reserves aren't enough to execute leaf", async function () { - const netSendAmount = toBNWei("1"); - - fakeHubPool.multicall.returns([ - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - bnZero, // liquid reserves, set less than netSendAmount - bnZero, // unaccumulated fees - ]), - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - bnZero, // liquid reserves, still less than net send amount - bnZero, // unaccumulated fees - ]), - ]); - - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingHubChainLeaves( - { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address] }, - true - ); - expect(Object.keys(updated)).to.have.length(0); - expect(lastSpyLogLevel(spy)).to.equal("error"); - expect(lastSpyLogIncludes(spy, "Not enough funds to execute")).to.be.true; - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("submits update", async function () { - const netSendAmount = toBNWei("1"); - const updatedLiquidReserves = netSendAmount.add(1); - - fakeHubPool.multicall.returns([ - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - bnZero, // liquid reserves, set less than netSendAmount - bnZero, // unaccumulated fees - ]), - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - updatedLiquidReserves, // liquid reserves, >= than netSendAmount - bnZero, // unaccumulated fees - ]), - ]); - - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingHubChainLeaves( - { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address] }, - true - ); - expect(Object.keys(updated)).to.have.length(1); - expect(updated[l1Token_1.address]).to.equal(updatedLiquidReserves); - expect(multiCallerClient.transactionCount()).to.equal(1); - }); + it("Executing hub chain refund leaves should increment available liquid reserves for subsequent executions", async function () { + // In this test, the refund leaf returns reserves to the hub chain, which gives enough post-sync liquid + // reserves to execute the non hub chain leaf. + + const netSendAmount = toBNWei("1"); + const liquidReserves = toBNWei("0"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + + const poolRebalanceLeaves: PoolRebalanceLeaf[] = [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [netSendAmount], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("0")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + ]; + + // Need to set a balance for the spoke pool to make the dataworker believe this leaf can be executed. + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + spokePoolClients[hubPoolClient.chainId].spokePool.address, + netSendAmount + ); + + const relayerRefundLeaves: RelayerRefundLeaf[] = [ + { + chainId: hubPoolClient.chainId, + l2TokenAddress: l1Token_1.address, + amountToReturn: netSendAmount, + refundAddresses: [], + refundAmounts: [], + leafId: 0, + }, + ]; + + const leafCount = await dataworkerInstance._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + poolRebalanceLeaves, + new MerkleTree(poolRebalanceLeaves, () => "test"), + relayerRefundLeaves, + new MerkleTree(relayerRefundLeaves, () => "test"), + [], + new MerkleTree([], () => "test"), + true + ); + expect(leafCount).to.equal(2); + + // Execute mainnet refund leaf after mainnet pool leaf. Then update exchange rates to execute non-mainnet pool leaf. + const enqueuedTxns = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(enqueuedTxns.map((txn) => txn.method)).to.deep.equal([ + "executeRootBundle", + "executeRelayerRefundLeaf", + "exchangeRateCurrent", + "executeRootBundle", + ]); }); - describe("_updateExchangeRatesBeforeExecutingNonHubChainLeaves", function () { - let balanceAllocator; - beforeEach(async function () { - const providers = { - ...spokePoolClientsToProviders(spokePoolClients), - [hubPoolClient.chainId]: hubPool.provider, - }; - balanceAllocator = new BalanceAllocator(providers); - }); - it("exits early if net send amount is negative", async function () { - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( - {}, - balanceAllocator, - [{ netSendAmounts: [toBNWei(-1)], l1Tokens: [l1Token_1.address], chainId: 1 }], - true - ); - expect(updated.size).to.equal(0); - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("exits early if current liquid reserves are greater than net send amount", async function () { - const netSendAmount = toBNWei("1"); - const liquidReserves = toBNWei("2"); - // For this test, do not pass in a liquid reserves object and force dataworker to load - // from HubPoolClient - mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( - {}, - balanceAllocator, - [{ netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 1 }], - true - ); - expect(updated.size).to.equal(0); - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("exits early if passed in liquid reserves are greater than net send amount", async function () { - const netSendAmount = toBNWei("1"); - const liquidReserves = toBNWei("2"); - // For this test, pass in a liquid reserves object - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( - { - [l1Token_1.address]: liquidReserves, - }, - balanceAllocator, - [{ netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 1 }], - true - ); - expect(updated.size).to.equal(0); - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("logs error if updated liquid reserves aren't enough to execute leaf", async function () { - const netSendAmount = toBNWei("1"); - const liquidReserves = toBNWei("0"); - mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); - balanceAllocator.addUsed(hubPoolClient.chainId, l1Token_1.address, hubPool.address, toBNWei(0)); - - // Even after simulating sync, there are not enough liquid reserves. - fakeHubPool.multicall.returns([ - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - liquidReserves, // liquid reserves, >= than netSendAmount - bnZero, // unaccumulated fees - ]), - ]); - - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( - {}, - balanceAllocator, - [{ netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 1 }], - true - ); - expect(lastSpyLogLevel(spy)).to.equal("error"); - expect(lastSpyLogIncludes(spy, "will fail due to lack of funds to send")).to.be.true; - expect(updated.size).to.equal(0); - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("submits update: liquid reserves post-sync are enough to execute leaf", async function () { - const netSendAmount = toBNWei("10"); - const liquidReserves = toBNWei("1"); - mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); - balanceAllocator.addUsed(hubPoolClient.chainId, l1Token_1.address, hubPool.address, toBNWei(1)); - - // At this point, passed in liquid reserves will be 1 and the balance allocator will add 1. - // This won't be enough. However, we should test that the dataworker simulates sync-ing the exchange - // rate and sees that the liquid reserves post-sync are enough to execute the leaf. - fakeHubPool.multicall.returns([ - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - netSendAmount, // liquid reserves, >= than netSendAmount - bnZero, // unaccumulated fees - ]), - ]); - - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( - { - [l1Token_1.address]: liquidReserves, - }, - balanceAllocator, - [{ netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 1 }], - true - ); - expect(updated.size).to.equal(1); - expect(updated.has(l1Token_1.address)).to.be.true; - expect(multiCallerClient.transactionCount()).to.equal(1); - }); - it("submits update: liquid reserves plus balanceAllocator.used are sufficient", async function () { - const netSendAmount = toBNWei("1"); - - // Liquid reserves are read from HubPoolClient. - // Liquid reserves are below net send amount, but virtual balance is above net send amount. - const liquidReserves = toBNWei("0"); - mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); - balanceAllocator.addUsed(1, l1Token_1.address, hubPool.address, netSendAmount.mul(-1)); - fakeHubPool.multicall.returns([ - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - liquidReserves, // liquid reserves, >= than netSendAmount - bnZero, // unaccumulated fees - ]), - ]); - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( - {}, - balanceAllocator, - [{ netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 1 }], - true - ); - expect(updated.size).to.equal(1); - expect(updated.has(l1Token_1.address)).to.be.true; - expect(multiCallerClient.transactionCount()).to.equal(1); - }); - it("Skips duplicate L1 tokens", async function () { - const netSendAmount = toBNWei("1"); - - // Liquid reserves are passed as input. - // Liquid reserves are below net send amount, but virtual balance is above net send amount. - const liquidReserves = toBNWei("0"); - balanceAllocator.addUsed(1, l1Token_1.address, hubPool.address, netSendAmount.mul(-1)); - fakeHubPool.multicall.returns([ - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - netSendAmount, // liquid reserves, >= than netSendAmount - bnZero, // unaccumulated fees - ]), - ]); - const updated = await dataworkerInstance._updateExchangeRatesBeforeExecutingNonHubChainLeaves( - { - [l1Token_1.address]: liquidReserves, + it("Executes mainnet slow fill leaves", async function () { + // In this test, we verify slow fill leaves are executed after mainnet pool leaves. + + const slowFillAmount = toBNWei("1"); + const liquidReserves = toBNWei("0"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + + const poolRebalanceLeaves: PoolRebalanceLeaf[] = [ + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("0")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + ]; + + // Need to set a balance for the spoke pool to make the dataworker believe this leaf can be executed. + balanceAllocator.testSetBalance( + hubPoolClient.chainId, + l1Token_1.address, + spokePoolClients[hubPoolClient.chainId].spokePool.address, + slowFillAmount + ); + const slowFillLeaves: SlowFillLeaf[] = [ + { + relayData: { + originChainId: 10, + depositor: randomAddress(), + recipient: randomAddress(), + depositId: 0, + inputToken: randomAddress(), + inputAmount: slowFillAmount, + outputToken: l1Token_1.address, + outputAmount: slowFillAmount, + message: "0x", + fillDeadline: 0, + exclusiveRelayer: randomAddress(), + exclusivityDeadline: 0, }, - balanceAllocator, - [ - { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 137 }, - { netSendAmounts: [netSendAmount], l1Tokens: [l1Token_1.address], chainId: 10 }, - ], - true - ); - expect(updated.size).to.equal(1); - expect(updated.has(l1Token_1.address)).to.be.true; - expect(multiCallerClient.transactionCount()).to.equal(1); - }); + chainId: hubPoolClient.chainId, + updatedOutputAmount: slowFillAmount, + }, + ]; + + const leafCount = await dataworkerInstance._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + poolRebalanceLeaves, + new MerkleTree(poolRebalanceLeaves, () => "test"), + [], + new MerkleTree([], () => "test"), + slowFillLeaves, + new MerkleTree(slowFillLeaves, () => "test"), + true + ); + expect(leafCount).to.equal(1); + + // Execute mainnet refund leaf after mainnet pool leaf. Then update exchange rates to execute non-mainnet pool leaf. + const enqueuedTxns = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(enqueuedTxns.map((txn) => txn.method)).to.deep.equal(["executeRootBundle", "executeV3SlowRelayLeaf"]); + }); + it("No non-mainnet leaves", async function () { + // In this test, check that if there are no mainnet leaves, then the dataworker should just execute non + // mainnet leaves. + const netSendAmount = toBNWei("1"); + const liquidReserves = toBNWei("1"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + + const poolRebalanceLeaves: PoolRebalanceLeaf[] = [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [netSendAmount], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + ]; + + const leafCount = await dataworkerInstance._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + poolRebalanceLeaves, + new MerkleTree(poolRebalanceLeaves, () => "test"), + [], + new MerkleTree([], () => "test"), + [], + new MerkleTree([], () => "test"), + true + ); + expect(leafCount).to.equal(1); + + const enqueuedTxns = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(enqueuedTxns.map((txn) => txn.method)).to.deep.equal(["executeRootBundle"]); + }); + it("Fails to execute mainnet leaf, still executes non-mainnet leaves", async function () { + // In this test, the hub pool leaf can't be funded using liquid reserves, but the + // dataworker should still try to execute the non-mainnet leaves. + + const hubChainNetSendAmount = toBNWei("10"); + const nonHubChainNetSendAmount = toBNWei("1"); + const liquidReserves = toBNWei("1"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + balanceAllocator.testSetBalance(hubPoolClient.chainId, l1Token_1.address, hubPool.address, liquidReserves); + + const poolRebalanceLeaves: PoolRebalanceLeaf[] = [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [nonHubChainNetSendAmount], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [hubChainNetSendAmount], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + ]; + + const leafCount = await dataworkerInstance._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + poolRebalanceLeaves, + new MerkleTree(poolRebalanceLeaves, () => "test"), + [], + new MerkleTree([], () => "test"), + [], + new MerkleTree([], () => "test"), + true + ); + expect(leafCount).to.equal(1); + + const enqueuedTxns = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(enqueuedTxns.map((txn) => txn.method)).to.deep.equal(["executeRootBundle"]); + }); + it("Fails to execute some non-mainnet leaves", async function () { + // In this test, there is a mainnet leaf that can be executed, but one of the non-mainnet leaves cannot + // be executed. + const netSendAmount = toBNWei("1"); + + // This liquid reserve is only sufficient to execute one of the non-mainnet leaves. + const liquidReserves = toBNWei("1"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + balanceAllocator.testSetBalance(hubPoolClient.chainId, l1Token_1.address, hubPool.address, liquidReserves); + + const poolRebalanceLeaves: PoolRebalanceLeaf[] = [ + { + chainId: 10, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [netSendAmount], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + { + chainId: 137, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [netSendAmount], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("0")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + ]; + + const leafCount = await dataworkerInstance._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + poolRebalanceLeaves, + new MerkleTree(poolRebalanceLeaves, () => "test"), + [], + new MerkleTree([], () => "test"), + [], + new MerkleTree([], () => "test"), + true + ); + expect(leafCount).to.equal(2); + + const enqueuedTxns = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(enqueuedTxns.map((txn) => txn.method)).to.deep.equal(["executeRootBundle", "executeRootBundle"]); + + const errorLogs = spy.getCalls().filter((call) => call.lastArg.level === "error"); + expect(errorLogs.length).to.equal(1); + expect(errorLogs[0].lastArg.message).to.contain("Not enough funds to execute pool rebalance leaf for chain 137"); }); - describe("_updateOldExchangeRates", function () { - it("exits early if we recently synced l1 token", async function () { - mockHubPoolClient.currentTime = 10_000; - mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 10_000, toBNWei("0")); - await dataworkerInstance._updateOldExchangeRates([l1Token_1.address], true); - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("exits early if liquid reserves wouldn't increase for token post-update", async function () { - // Last update was at time 0, current time is at 1_000_000, so definitely past the update threshold - mockHubPoolClient.currentTime = 1_000_000; - mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0); - - // Hardcode multicall output such that it looks like liquid reserves stayed the same - fakeHubPool.multicall.returns([ - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - bnZero, // liquid reserves - bnZero, // unaccumulated fees - ]), - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - bnZero, // liquid reserves, equal to "current" reserves - bnZero, // unaccumulated fees - ]), - ]); - - await dataworkerInstance._updateOldExchangeRates([l1Token_1.address], true); - expect(multiCallerClient.transactionCount()).to.equal(0); - - // Add test when liquid reserves decreases - fakeHubPool.multicall.returns([ - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - toBNWei(1), // liquid reserves - bnZero, // unaccumulated fees - ]), - ZERO_ADDRESS, // sync output - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - toBNWei(1).sub(1), // liquid reserves, less than "current" reserves - bnZero, // unaccumulated fees - ]), - ]); - - await dataworkerInstance._updateOldExchangeRates([l1Token_1.address], true); - expect(multiCallerClient.transactionCount()).to.equal(0); - }); - it("submits update if liquid reserves would increase for token post-update and last update was old enough", async function () { - // Last update was at time 0, current time is at 1_000_000, so definitely past the update threshold - mockHubPoolClient.currentTime = 1_000_000; - mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0); - - // Hardcode multicall output such that it looks like liquid reserves increased - fakeHubPool.multicall.returns([ - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - toBNWei(1), // liquid reserves - bnZero, // unaccumulated fees - ]), - ZERO_ADDRESS, - hubPool.interface.encodeFunctionResult("pooledTokens", [ - ZERO_ADDRESS, // lp token address - true, // enabled - 0, // last lp fee update - bnZero, // utilized reserves - toBNWei(1).add(1), // liquid reserves, higher than "current" reserves - bnZero, // unaccumulated fees - ]), - ]); - - await dataworkerInstance._updateOldExchangeRates([l1Token_1.address], true); - expect(multiCallerClient.transactionCount()).to.equal(1); - }); + it("Only mainnet leaves", async function () { + // Shouuld not throw if there are only mainnet leaves. + const liquidReserves = toBNWei("1"); + mockHubPoolClient.setLpTokenInfo(l1Token_1.address, 0, liquidReserves); + + const poolRebalanceLeaves: PoolRebalanceLeaf[] = [ + { + chainId: hubPoolClient.chainId, + groupIndex: 0, + bundleLpFees: [toBNWei("1")], + netSendAmounts: [toBNWei("0")], + runningBalances: [toBNWei("1")], + leafId: 0, + l1Tokens: [l1Token_1.address], + }, + ]; + + const leafCount = await dataworkerInstance._executePoolLeavesAndSyncL1Tokens( + spokePoolClients, + balanceAllocator, + poolRebalanceLeaves, + new MerkleTree(poolRebalanceLeaves, () => "test"), + [], + new MerkleTree([], () => "test"), + [], + new MerkleTree([], () => "test"), + true + ); + expect(leafCount).to.equal(1); + + const enqueuedTxns = multiCallerClient.getQueuedTransactions(hubPoolClient.chainId); + expect(enqueuedTxns.map((txn) => txn.method)).to.deep.equal(["executeRootBundle"]); }); }); }); diff --git a/test/Dataworker.executeRelayerRefunds.ts b/test/Dataworker.executeRelayerRefunds.ts index a25a5a367..a2939b9a5 100644 --- a/test/Dataworker.executeRelayerRefunds.ts +++ b/test/Dataworker.executeRelayerRefunds.ts @@ -1,5 +1,5 @@ import { BundleDataClient, HubPoolClient, MultiCallerClient, SpokePoolClient } from "../src/clients"; -import { MAX_UINT_VAL, toBN } from "../src/utils"; +import { buildRelayerRefundTree, MAX_UINT_VAL, RelayerRefundLeaf, toBN, toBNWei } from "../src/utils"; import { MAX_L1_TOKENS_PER_POOL_REBALANCE_LEAF, MAX_REFUNDS_PER_RELAYER_REFUND_LEAF, @@ -16,7 +16,7 @@ import { spokePoolClientsToProviders } from "../src/common"; import { Dataworker } from "../src/dataworker/Dataworker"; let spokePool_1: Contract, erc20_1: Contract, spokePool_2: Contract, erc20_2: Contract; -let l1Token_1: Contract, hubPool: Contract, hubPoolClient: HubPoolClient; +let l1Token_1: Contract, hubPool: Contract, hubPoolClient: HubPoolClient, spokePool_4: Contract; let depositor: SignerWithAddress; let dataworkerInstance: Dataworker, multiCallerClient: MultiCallerClient; @@ -39,6 +39,7 @@ describe("Dataworker: Execute relayer refunds", async function () { spokePool_1, erc20_1, spokePool_2, + spokePool_4, erc20_2, l1Token_1, depositor, @@ -103,6 +104,35 @@ describe("Dataworker: Execute relayer refunds", async function () { await multiCallerClient.executeTxnQueues(); }); + it("Modifies BalanceAllocator when executing hub chain leaf", async function () { + const refundLeaves: RelayerRefundLeaf[] = [ + { + amountToReturn: toBNWei("1"), + chainId: hubPoolClient.chainId, + refundAmounts: [], + leafId: 0, + l2TokenAddress: l1Token_1.address, + refundAddresses: [], + }, + ]; + const relayerRefundTree = buildRelayerRefundTree(refundLeaves); + const balanceAllocator = await getNewBalanceAllocator(); + await spokePool_4.relayRootBundle( + relayerRefundTree.getHexRoot(), + "0x0000000000000000000000000000000000000000000000000000000000000000" + ); + await l1Token_1.mint(spokePool_4.address, amountToDeposit); + await updateAllClients(); + await dataworkerInstance._executeRelayerRefundLeaves( + refundLeaves, + balanceAllocator, + spokePoolClients[hubPoolClient.chainId], + relayerRefundTree, + true, + 0 + ); + expect(balanceAllocator.getUsed(hubPoolClient.chainId, l1Token_1.address, hubPool.address)).to.equal(toBNWei("-1")); + }); describe("Computing refunds for bundles", function () { let relayer: SignerWithAddress; let bundleDataClient: BundleDataClient; diff --git a/test/Dataworker.loadData.fill.ts b/test/Dataworker.loadData.fill.ts index d5e2a0a30..a78a704bc 100644 --- a/test/Dataworker.loadData.fill.ts +++ b/test/Dataworker.loadData.fill.ts @@ -18,6 +18,7 @@ import { expect, fillV3, getDefaultBlockRange, + getDisabledBlockRanges, randomAddress, sinon, smock, @@ -246,6 +247,28 @@ describe("Dataworker: Load data used in all functions", async function () { ).to.deep.equal(expiredDeposits.map((event) => event.args.depositId)); expect(data1.expiredDepositsToRefundV3[originChainId][erc20_1.address].length).to.equal(1); }); + + it("Ignores disabled chains", async function () { + const bundleBlockTimestamps = await dataworkerInstance.clients.bundleDataClient.getBundleBlockTimestamps( + [originChainId, destinationChainId], + getDefaultBlockRange(5), + spokePoolClients + ); + // Send unexpired deposit + generateV3Deposit(); + // Send expired deposit + generateV3Deposit({ fillDeadline: bundleBlockTimestamps[destinationChainId][1] - 1 }); + await mockOriginSpokePoolClient.update(["V3FundsDeposited"]); + + // Returns no data if block range is undefined + const emptyData = await dataworkerInstance.clients.bundleDataClient.loadData( + getDisabledBlockRanges(), + spokePoolClients + ); + expect(emptyData.bundleDepositsV3).to.deep.equal({}); + expect(emptyData.expiredDepositsToRefundV3).to.deep.equal({}); + }); + it("Filters unexpired deposit out of block range", async function () { // Send deposit behind and after origin chain block range. Should not be included in bundleDeposits. // First generate mock deposit events with some block time between events. @@ -338,7 +361,29 @@ describe("Dataworker: Load data used in all functions", async function () { .div(fixedPointAdjustment), }); }); + it("Ignores disabled chains", async function () { + const depositV3Events: Event[] = []; + const fillV3Events: Event[] = []; + // Create three valid deposits + depositV3Events.push(generateV3Deposit({ outputToken: randomAddress() })); + depositV3Events.push(generateV3Deposit({ outputToken: randomAddress() })); + depositV3Events.push(generateV3Deposit({ outputToken: randomAddress() })); + await mockOriginSpokePoolClient.update(["V3FundsDeposited"]); + const deposits = mockOriginSpokePoolClient.getDeposits(); + + // Fill deposits from different relayers + const relayer2 = randomAddress(); + fillV3Events.push(generateV3FillFromDeposit(deposits[0])); + fillV3Events.push(generateV3FillFromDeposit(deposits[1])); + fillV3Events.push(generateV3FillFromDeposit(deposits[2], {}, relayer2)); + await mockDestinationSpokePoolClient.update(["FilledV3Relay"]); + const emptyData = await dataworkerInstance.clients.bundleDataClient.loadData( + getDisabledBlockRanges(), + spokePoolClients + ); + expect(emptyData.bundleFillsV3).to.deep.equal({}); + }); it("Saves V3 fast fill under correct repayment chain and repayment token when dealing with lite chains", async function () { // Mock the config store client to include the lite chain index. mockConfigStore.updateGlobalConfig( diff --git a/test/Dataworker.loadData.slowFill.ts b/test/Dataworker.loadData.slowFill.ts index 926c9e873..39635325c 100644 --- a/test/Dataworker.loadData.slowFill.ts +++ b/test/Dataworker.loadData.slowFill.ts @@ -17,6 +17,7 @@ import { expect, fillV3, getDefaultBlockRange, + getDisabledBlockRanges, mineRandomBlocks, randomAddress, requestSlowFill, @@ -386,6 +387,27 @@ describe("BundleDataClient: Slow fill handling & validation", async function () ); }); + it("Ignores disabled chains", async function () { + // Only one deposit is eligible to be slow filled because its input and output tokens are equivalent. + generateV3Deposit({ outputToken: randomAddress() }); + generateV3Deposit({ outputToken: erc20_2.address }); + await mockOriginSpokePoolClient.update(["V3FundsDeposited"]); + const deposits = mockOriginSpokePoolClient.getDeposits(); + + generateSlowFillRequestFromDeposit(deposits[0]); + generateSlowFillRequestFromDeposit(deposits[1]); + await mockDestinationSpokePoolClient.update(["RequestedV3SlowFill"]); + expect(mockDestinationSpokePoolClient.getSlowFillRequestsForOriginChain(originChainId).length).to.equal(2); + + const emptyData = await dataworkerInstance.clients.bundleDataClient.loadData( + getDisabledBlockRanges(), + spokePoolClients + ); + expect(emptyData.bundleDepositsV3).to.deep.equal({}); + expect(emptyData.expiredDepositsToRefundV3).to.deep.equal({}); + expect(emptyData.bundleSlowFillsV3).to.deep.equal({}); + }); + it("Slow fill requests cannot coincide with fill in same bundle", async function () { generateV3Deposit({ outputToken: erc20_2.address }); generateV3Deposit({ outputToken: erc20_2.address }); @@ -422,6 +444,24 @@ describe("BundleDataClient: Slow fill handling & validation", async function () expect(data1.unexecutableSlowFills).to.deep.equal({}); }); + it("Ignores disabled chains", async function () { + generateV3Deposit({ outputToken: erc20_2.address }); + await mockOriginSpokePoolClient.update(["V3FundsDeposited"]); + const deposits = mockOriginSpokePoolClient.getDeposits(); + + generateSlowFillRequestFromDeposit(deposits[0]); + generateV3FillFromDeposit(deposits[0], undefined, undefined, undefined, interfaces.FillType.ReplacedSlowFill); + await mockDestinationSpokePoolClient.update(["RequestedV3SlowFill", "FilledV3Relay"]); + + const emptyData = await dataworkerInstance.clients.bundleDataClient.loadData( + getDisabledBlockRanges(), + spokePoolClients + ); + expect(emptyData.unexecutableSlowFills).to.deep.equal({}); + expect(emptyData.bundleFillsV3).to.deep.equal({}); + expect(emptyData.bundleSlowFillsV3).to.deep.equal({}); + }); + it("Handles slow fill requests out of block range", async function () { generateV3Deposit({ outputToken: erc20_2.address }); generateV3Deposit({ outputToken: erc20_2.address }); diff --git a/test/Monitor.ts b/test/Monitor.ts index 26fed2661..ce7a2abf8 100644 --- a/test/Monitor.ts +++ b/test/Monitor.ts @@ -237,7 +237,7 @@ describe("Monitor", async function () { await monitorInstance.updateCurrentRelayerBalances(reports); expect(reports[depositor.address]["L1Token1"][ALL_CHAINS_NAME][BalanceType.CURRENT].toString()).to.be.equal( - "60000000000000000000000" + "75000000000000000000000" ); }); diff --git a/test/fixtures/Dataworker.Fixture.ts b/test/fixtures/Dataworker.Fixture.ts index 09b073e59..cba81c698 100644 --- a/test/fixtures/Dataworker.Fixture.ts +++ b/test/fixtures/Dataworker.Fixture.ts @@ -69,6 +69,7 @@ export async function setupDataworker( spokePool_1: Contract; erc20_1: Contract; spokePool_2: Contract; + spokePool_4: Contract; erc20_2: Contract; l1Token_1: Contract; l1Token_2: Contract; @@ -127,7 +128,7 @@ export async function setupDataworker( // Enable deposit routes for second L2 tokens so relays can be sent between spoke pool 1 <--> 2. await enableRoutes(spokePool_1, [{ originToken: erc20_2.address, destinationChainId: destinationChainId }]); await enableRoutes(spokePool_2, [{ originToken: erc20_1.address, destinationChainId: originChainId }]); - + await enableRoutes(spokePool_4, [{ originToken: l1Token_1.address, destinationChainId: destinationChainId }]); // For each chain, enable routes to both erc20's so that we can fill relays await enableRoutesOnHubPool(hubPool, [ { destinationChainId: originChainId, l1Token: l1Token_1, destinationToken: erc20_1 }, @@ -245,7 +246,7 @@ export async function setupDataworker( // Give depositors the tokens they'll deposit into spoke pools: await setupTokensForWallet(spokePool_1, depositor, [erc20_1, erc20_2], undefined, 10); await setupTokensForWallet(spokePool_2, depositor, [erc20_2, erc20_1], undefined, 10); - + await setupTokensForWallet(spokePool_4, depositor, [l1Token_1], undefined, 10); // Give relayers the tokens they'll need to relay on spoke pools: await setupTokensForWallet(spokePool_1, relayer, [erc20_1, erc20_2, l1Token_1, l1Token_2], undefined, 10); await setupTokensForWallet(spokePool_2, relayer, [erc20_1, erc20_2, l1Token_1, l1Token_2], undefined, 10); @@ -254,12 +255,14 @@ export async function setupDataworker( // "reasonable" block number based off the block time when looking at quote timestamps. await spokePool_1.setCurrentTime(await getLastBlockTime(spokePool_1.provider)); await spokePool_2.setCurrentTime(await getLastBlockTime(spokePool_2.provider)); + await spokePool_4.setCurrentTime(await getLastBlockTime(spokePool_4.provider)); return { hubPool, spokePool_1, erc20_1, spokePool_2, + spokePool_4, erc20_2, l1Token_1, l1Token_2, diff --git a/test/utils/utils.ts b/test/utils/utils.ts index 409210a51..d07a63c90 100644 --- a/test/utils/utils.ts +++ b/test/utils/utils.ts @@ -481,6 +481,10 @@ export function getDefaultBlockRange(toBlockOffset: number): number[][] { return DEFAULT_BLOCK_RANGE_FOR_CHAIN.map((range) => [range[0], range[1] + toBlockOffset]); } +export function getDisabledBlockRanges(): number[][] { + return DEFAULT_BLOCK_RANGE_FOR_CHAIN.map((range) => [range[0], range[0]]); +} + export function createRefunds( outputToken: string, refundAmount: BigNumber,