diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3f9719ee..d71ca876 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -79,9 +79,23 @@ jobs: - run: yarn test:esm # full e2e integration tests working-directory: ./ar-io-sdk env: - IO_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} + ARIO_NETWORK_PROCESS_ID: ${{ vars.ARIO_NETWORK_PROCESS_ID }} AO_CU_URL: ${{ vars.AO_CU_URL }} + monitor: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + cache: 'yarn' + + - run: yarn --frozen-lockfile + - run: yarn monitor + env: + ARIO_NETWORK_PROCESS_ID: ${{ vars.ARIO_NETWORK_PROCESS_ID }} + evolve: runs-on: ubuntu-latest needs: [integration, unit, sdk, lint] @@ -97,7 +111,7 @@ jobs: - run: yarn evolve env: WALLET: ${{ secrets.WALLET }} - IO_NETWORK_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} + ARIO_NETWORK_PROCESS_ID: ${{ vars.ARIO_NETWORK_PROCESS_ID }} AO_CU_URL: ${{ vars.AO_CU_URL }} - name: Notify Success if: success() @@ -120,12 +134,12 @@ jobs: }, { "title": "Process ID", - "value": "${{ vars.IO_NETWORK_PROCESS_ID }}", + "value": "${{ vars.ARIO_NETWORK_PROCESS_ID }}", "short": true }, { "title": "View on ao.link", - "value": "https://www.ao.link/#/entity/${{ vars.IO_NETWORK_PROCESS_ID }}?tab=source-code", + "value": "https://www.ao.link/#/entity/${{ vars.ARIO_NETWORK_PROCESS_ID }}?tab=source-code", "short": false } , @@ -151,7 +165,7 @@ jobs: "fallback": "Failed to update IO Process!", "color": "danger", "title": "Details", - "text": 'The IO "${{ github.ref_name == 'main' && 'testnet' || 'devnet' }} Process ( ${{ vars.IO_NETWORK_PROCESS_ID }}) FAILED to update!', + "text": 'The IO "${{ github.ref_name == 'main' && 'testnet' || 'devnet' }} Process ( ${{ vars.ARIO_NETWORK_PROCESS_ID }}) FAILED to update!', "fields": [{ "title": "Network", "value": "${{ github.ref_name == 'main' && 'testnet' || 'devnet' }}", @@ -159,7 +173,7 @@ jobs: }, { "title": "Process ID", - "value": "${{ vars.IO_NETWORK_PROCESS_ID }}", + "value": "${{ vars.ARIO_NETWORK_PROCESS_ID }}", "short": true }, { diff --git a/.github/workflows/monitor.yaml b/.github/workflows/monitor.yaml index b6a21d92..f2976fc8 100644 --- a/.github/workflows/monitor.yaml +++ b/.github/workflows/monitor.yaml @@ -32,7 +32,7 @@ jobs: run: yarn monitor id: monitor env: - IO_PROCESS_ID: ${{ matrix.network == 'testnet' && 'agYcCFJtrMG6cqMuZfskIkFTGvUPddICmtQSBIoPdiA' || 'GaQrvEMKBpkjofgnBi_B3IgIDmY_XYelVLB6GcRGrHc' }} + ARIO_NETWORK_PROCESS_ID: ${{ matrix.network == 'testnet' && 'agYcCFJtrMG6cqMuZfskIkFTGvUPddICmtQSBIoPdiA' || 'GaQrvEMKBpkjofgnBi_B3IgIDmY_XYelVLB6GcRGrHc' }} - name: Notify Failure if: failure() diff --git a/.vscode/settings.json b/.vscode/settings.json index 816990a1..5f79587f 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -37,5 +37,5 @@ "editor.formatOnPaste": true, "editor.formatOnSaveMode": "file" }, - "cSpell.words": ["hashchain", "redelegate"] + "cSpell.words": ["ARIO", "hashchain", "redelegate"] } diff --git a/package.json b/package.json index e2ccbdf8..d519420e 100644 --- a/package.json +++ b/package.json @@ -9,13 +9,13 @@ "test:unit": "rm -rf coverage && mkdir -p coverage && busted . && luacov", "test:coverage": "rm -rf luacov-html && yarn test:unit && luacov --reporter html && open luacov-html/index.html", "monitor": "node --test tests/monitor/monitor.test.mjs", - "monitor:devnet": "IO_PROCESS_ID=GaQrvEMKBpkjofgnBi_B3IgIDmY_XYelVLB6GcRGrHc node --test tests/monitor/monitor.test.mjs", - "monitor:testnet": "IO_PROCESS_ID=agYcCFJtrMG6cqMuZfskIkFTGvUPddICmtQSBIoPdiA node --test tests/monitor/monitor.test.mjs", + "monitor:devnet": "ARIO_NETWORK_PROCESS_ID=GaQrvEMKBpkjofgnBi_B3IgIDmY_XYelVLB6GcRGrHc node --test tests/monitor/monitor.test.mjs", + "monitor:testnet": "ARIO_NETWORK_PROCESS_ID=agYcCFJtrMG6cqMuZfskIkFTGvUPddICmtQSBIoPdiA node --test tests/monitor/monitor.test.mjs", "evolve": "yarn build && node tools/evolve.mjs", "prepare": "husky" }, "devDependencies": { - "@ar.io/sdk": "alpha", + "@ar.io/sdk": "^3.1.0-alpha.9", "@permaweb/ao-loader": "^0.0.36", "@permaweb/aoconnect": "^0.0.59", "arweave": "^1.15.1", diff --git a/spec/arns_spec.lua b/spec/arns_spec.lua index e5b5f90a..b9ef5a7f 100644 --- a/spec/arns_spec.lua +++ b/spec/arns_spec.lua @@ -767,6 +767,35 @@ describe("arns", function() assert.is_false(status) assert.match("Name must be extended before it can be reassigned", error) end) + + it("should not allow reassigning names during the grace period", function() + -- Setup record in grace period + _G.NameRegistry.records["test-name"] = { + endTimestamp = 123456789, + processId = testProcessId, + purchasePrice = 600000000, + startTimestamp = 0, + type = "lease", + undernameLimit = 10, + } + + -- Attempt to reassign + local newProcessId = "test-this-is-valid-arweave-wallet-address-2" + local status, error = pcall( + arns.reassignName, + "test-name", + testProcessId, + -- Just before the grace period ends + 123456789 + + constants.gracePeriodMs + - 1, + newProcessId + ) + + -- Assertions + assert.is_false(status) + assert.match("Name must be extended before it can be reassigned", error) + end) end) end diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 8df8d1f8..17f1cbec 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -13,7 +13,7 @@ local testSettings = { delegateRewardShareRatio = 0, } local startTimestamp = 1704092400000 -local protocolBalance = 500000000 * 1000000 +local protocolBalance = constants.ARIOToMARIO(500000000) local hashchain = "NGU1fq_ssL9m6kRbRU1bqiIDBht79ckvAwRMGElkSOg" -- base64 of "some sample hash" describe("epochs", function() @@ -51,8 +51,8 @@ describe("epochs", function() } end) - describe("computePrescribedObserversForEpoch", function() - it("should return all eligible gateways if fewer than the maximum in network", function() + describe("getPrescribedObserversWithWeightsForEpoch", function() + it("should return the prescribed observers with weights for the epoch", function() _G.GatewayRegistry["test-this-is-valid-arweave-wallet-address-1"] = { operatorStake = gar.getSettings().operators.minStake, totalDelegatedStake = 0, @@ -71,26 +71,65 @@ describe("epochs", function() settings = testSettings, status = "joined", observerAddress = "observerAddress", + weights = { + normalizedCompositeWeight = 1, + stakeWeight = 1, + tenureWeight = 1, + gatewayRewardRatioWeight = 1, + observerRewardRatioWeight = 1, + compositeWeight = 1, + }, + } + _G.Epochs[0].prescribedObservers = { + ["observerAddress"] = "test-this-is-valid-arweave-wallet-address-1", } + local epochIndex = 0 local expectation = { { - gatewayAddress = "test-this-is-valid-arweave-wallet-address-1", observerAddress = "observerAddress", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, + gatewayAddress = "test-this-is-valid-arweave-wallet-address-1", + normalizedCompositeWeight = 1, stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, + tenureWeight = 1, gatewayRewardRatioWeight = 1, observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1, + compositeWeight = 1, + stake = gar.getSettings().operators.minStake, + startTimestamp = startTimestamp, }, } - local status, result = pcall(epochs.computePrescribedObserversForEpoch, 0, hashchain) - assert.is_true(status) - assert.are.equal(1, #result) + local result = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) assert.are.same(expectation, result) end) + end) + + describe("computePrescribedObserversForEpoch", function() + it("should return all eligible gateways if fewer than the maximum in network", function() + _G.GatewayRegistry["test-this-is-valid-arweave-wallet-address-1"] = { + operatorStake = gar.getSettings().operators.minStake, + totalDelegatedStake = 0, + vaults = {}, + delegates = {}, + startTimestamp = startTimestamp, + stats = { + prescribedEpochCount = 0, + observedEpochCount = 0, + totalEpochCount = 0, + passedEpochCount = 0, + failedEpochCount = 0, + failedConsecutiveEpochs = 0, + passedConsecutiveEpochs = 0, + }, + settings = testSettings, + status = "joined", + observerAddress = "observerAddress", + } + local expectation = { + ["observerAddress"] = "test-this-is-valid-arweave-wallet-address-1", + } + local prescribedObserverMap = epochs.computePrescribedObserversForEpoch(0, hashchain) + assert.are.same(expectation, prescribedObserverMap) + end) it("should return the maximum number of gateways if more are enrolled in network", function() local testHashchain = "c29tZSBzYW1wbGUgaGFzaA==" -- base64 of "some sample hash" @@ -118,52 +157,25 @@ describe("epochs", function() }, settings = testSettings, status = "joined", - observerAddress = "observerAddress", + observerAddress = "observer-address-" .. i, } -- note - ordering of keys is not guaranteed when insert into maps _G.GatewayRegistry["observer" .. i] = gateway end local expectation = { - { - gatewayAddress = "observer1", - observerAddress = "observerAddress", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1 / 3, - }, - { - gatewayAddress = "observer3", - observerAddress = "observerAddress", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1 / 3, - }, + ["observer-address-1"] = "observer1", + ["observer-address-3"] = "observer3", } - local status, result = pcall(epochs.computePrescribedObserversForEpoch, 0, testHashchain) - assert.is_true(status) - assert.are.equal(2, #result) - table.sort(result, function(a, b) - return a.gatewayAddress < b.gatewayAddress - end) - assert.are.same(expectation, result) + local prescribedObserverMap = epochs.computePrescribedObserversForEpoch(0, testHashchain) + assert.are.same(expectation, prescribedObserverMap) end) end) describe("computePrescribedNamesForEpoch", function() -- NOTE: Record names in the tests below use spelled out numbers because without that -- there's insufficient base64url information encoded in the final encoded block to - -- disambiguate the decoded vallues. + -- disambiguate the decoded values. it("should return all eligible names if fewer than the maximum in name registry", function() _G.NameRegistry.records = { ["arns-name-one"] = { @@ -252,7 +264,7 @@ describe("epochs", function() assert.match("Observations for the current epoch cannot be submitted before", error) end) it("should throw an error if the caller is not prescribed", function() - local observer = "test-this-is-valid-arweave-wallet-address-2" + local observer = "test-this-is-valid-arweave-observer-address-2" local reportTxId = "test-this-very-valid-observations-report-tx" local settings = epochs.getSettings() local timestamp = settings.epochZeroStartTimestamp + settings.distributionDelayMs + 1 @@ -260,18 +272,7 @@ describe("epochs", function() "test-this-is-valid-arweave-wallet-address-1", } _G.Epochs[0].prescribedObservers = { - { - gatewayAddress = "test-this-is-valid-arweave-wallet-address-1", - observerAddress = "test-this-is-valid-arweave-wallet-address-1", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1, - }, + ["test-this-is-valid-arweave-observer-address-1"] = "test-this-is-valid-arweave-gateway-address-1", } local status, error = pcall(epochs.saveObservations, observer, reportTxId, failedGateways, timestamp) assert.is_false(status) @@ -280,7 +281,7 @@ describe("epochs", function() it( "should save observation when the timestamp is after the distribution delay and only mark gateways around during the full epoch as failed", function() - local observer = "test-this-is-valid-arweave-wallet-address-2" + local observer = "test-this-is-valid-arweave-observer-address-2" local reportTxId = "test-this-very-valid-observations-report-tx" local settings = epochs.getSettings() local timestamp = settings.epochZeroStartTimestamp + settings.distributionDelayMs + 1 @@ -302,7 +303,7 @@ describe("epochs", function() }, settings = testSettings, status = "joined", - observerAddress = "test-this-is-valid-arweave-wallet-address-1", + observerAddress = "test-this-is-valid-arweave-observer-address-1", }, ["test-this-is-valid-arweave-wallet-address-2"] = { operatorStake = gar.getSettings().operators.minStake, @@ -321,7 +322,7 @@ describe("epochs", function() }, settings = testSettings, status = "joined", - observerAddress = "test-this-is-valid-arweave-wallet-address-2", + observerAddress = "test-this-is-valid-arweave-observer-address-2", }, ["test-this-is-valid-arweave-wallet-address-3"] = { operatorStake = gar.getSettings().operators.minStake, @@ -340,7 +341,7 @@ describe("epochs", function() }, settings = testSettings, status = "joined", - observerAddress = "test-this-is-valid-arweave-wallet-address-3", + observerAddress = "test-this-is-valid-arweave-observer-address-3", }, ["test-this-is-valid-arweave-wallet-address-4"] = { operatorStake = gar.getSettings().operators.minStake, @@ -360,22 +361,11 @@ describe("epochs", function() }, settings = testSettings, status = "leaving", -- leaving, so it is not eligible to receive stats from this epoch - observerAddress = "test-this-is-valid-arweave-wallet-address-4", + observerAddress = "test-this-is-valid-arweave-observer-address-4", }, } _G.Epochs[0].prescribedObservers = { - { - gatewayAddress = "test-this-is-valid-arweave-wallet-address-2", - observerAddress = "test-this-is-valid-arweave-wallet-address-2", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1, - }, + ["test-this-is-valid-arweave-observer-address-2"] = "test-this-is-valid-arweave-wallet-address-2", } local failedGateways = { "test-this-is-valid-arweave-wallet-address-1", @@ -513,18 +503,7 @@ describe("epochs", function() reports = {}, }, prescribedObservers = { - { - compositeWeight = 4.0, - gatewayAddress = "test-this-is-valid-arweave-wallet-address-1", - gatewayRewardRatioWeight = 1.0, - normalizedCompositeWeight = 1.0, - observerAddress = "test-this-is-valid-arweave-wallet-address-1", - observerRewardRatioWeight = 1.0, - stake = gar.getSettings().operators.minStake, - stakeWeight = 1.0, - startTimestamp = 0, - tenureWeight = 4, - }, + ["test-this-is-valid-arweave-wallet-address-1"] = "test-this-is-valid-arweave-wallet-address-1", }, prescribedNames = {}, distributions = { @@ -634,21 +613,11 @@ describe("epochs", function() }, prescribedNames = {}, prescribedObservers = { - { - observerAddress = "test-this-very-valid-observer-wallet-addr-1", - }, - { - observerAddress = "test-this-very-valid-observer-wallet-addr-2", - }, - { - observerAddress = "test-this-very-valid-observer-wallet-addr-3", - }, - { - observerAddress = "test-this-very-valid-observer-wallet-addr-4", - }, - { - observerAddress = "test-this-very-valid-observer-wallet-addr-5", - }, + ["test-this-very-valid-observer-wallet-addr-1"] = "test-this-very-valid-arweave-wallet-addr-1", + ["test-this-very-valid-observer-wallet-addr-2"] = "test-this-very-valid-arweave-wallet-addr-2", + ["test-this-very-valid-observer-wallet-addr-3"] = "test-this-very-valid-arweave-wallet-addr-3", + ["test-this-very-valid-observer-wallet-addr-4"] = "test-this-very-valid-arweave-wallet-addr-4", + ["test-this-very-valid-observer-wallet-addr-5"] = "test-this-very-valid-arweave-wallet-addr-5", }, } diff --git a/spec/gar_spec.lua b/spec/gar_spec.lua index 345e0d0c..3a10fa5b 100644 --- a/spec/gar_spec.lua +++ b/spec/gar_spec.lua @@ -3048,6 +3048,7 @@ describe("gar", function() local testRedelegatorAddress = "test-re-delegator-1234567890123456789012345" local testSourceAddress = "unique-source-address-123456789012345678901" local testTargetAddress = "unique-target-address-123456789012345678901" + describe("redelegateStake", function() local timestamp = 12345 local testRedelgationGateway = utils.deepCopy({ @@ -3167,6 +3168,56 @@ describe("gar", function() assert.are.same(targetGateway, _G.GatewayRegistry[testTargetAddress]) end) + it( + "should allow operators to redelegate to its own stake when that stake is below the minimum delegated stake value", + function() + local sourceGateway = utils.deepCopy(testRedelgationGateway) + local targetGateway = utils.deepCopy(testRedelgationGateway) + + sourceGateway.delegates = { + [testRedelegatorAddress] = { + delegatedStake = minDelegatedStake + 1, + startTimestamp = 0, + vaults = {}, + }, + } + _G.GatewayRegistry = { + [testRedelegatorAddress] = targetGateway, + [testSourceAddress] = sourceGateway, + } + + local result = gar.redelegateStake({ + delegateAddress = testRedelegatorAddress, + sourceAddress = testSourceAddress, + targetAddress = testRedelegatorAddress, + qty = 1, -- Move 1 mARIO to the operator gateway + currentTimestamp = timestamp, + }) + + assert.are.same({ + sourceAddress = testSourceAddress, + targetAddress = testRedelegatorAddress, + redelegationFee = 0, + feeResetTimestamp = timestamp + sevenDays, + redelegationsSinceFeeReset = 1, + }, result) + + assert.are.same({ + timestamp = timestamp, + redelegations = 1, + }, _G.Redelegations[testRedelegatorAddress]) + + -- setup expectations on gateway tables + sourceGateway.delegates[testRedelegatorAddress] = { + delegatedStake = minDelegatedStake, + startTimestamp = 0, + vaults = {}, + } + sourceGateway.totalDelegatedStake = minDelegatedStake - 1 + targetGateway.operatorStake = minOperatorStake + 1 + end + ) + it( "should redelegate stake for a fee if the delegator has already done redelegations in the last seven epochs", function() @@ -4404,6 +4455,7 @@ describe("gar", function() error:find("Allow listing only possible when allowDelegatedStaking is set to 'allowlist'") ~= nil ) end) + it( "should disallow delegates if allowDelegatedStaking is true and the allowedDelegatesLookup is not nil", function() diff --git a/spec/primary_names_spec.lua b/spec/primary_names_spec.lua index 5de9b732..06b09c8a 100644 --- a/spec/primary_names_spec.lua +++ b/spec/primary_names_spec.lua @@ -92,6 +92,26 @@ describe("Primary Names", function() ) end) + it("should fail if the arns record is in its grace period", function() + _G.NameRegistry.records = { + ["test"] = { + processId = "base-name-owner", + type = "lease", + endTimestamp = 1234567890, + }, + } + local status, err = pcall( + primaryNames.createPrimaryNameRequest, + "test", + "user-requesting-primary-name", + -- Just after grace period starts + 1234567890 + 1, + "test-msg-id" + ) + assert.is_false(status) + assert.match("ArNS record 'test' is not active", err) + end) + it( "should create a primary name request and transfer the cost from the initiator to the protocol balance", function() diff --git a/spec/utils_spec.lua b/spec/utils_spec.lua index e285a773..ca5ce50c 100644 --- a/spec/utils_spec.lua +++ b/spec/utils_spec.lua @@ -23,6 +23,20 @@ describe("utils", function() end) end) + describe("isValidUnformattedEthAddress", function() + it("should return true on a valid unformatted ETH address", function() + assert.is_true(utils.isValidUnformattedEthAddress(testEthAddress)) + end) + + it("should return false on a non-string value", function() + assert.is_false(utils.isValidUnformattedEthAddress(3)) + end) + + it("should return false on an invalid unformatted ETH address", function() + assert.is_false(utils.isValidUnformattedEthAddress("ZxFCAd0B19bB29D4674531d6f115237E16AfCE377C")) + end) + end) + describe("formatAddress", function() it("should format ETH address to lowercase", function() assert.is.equal(testEthAddress, utils.formatAddress(testEthAddress)) diff --git a/src/arns.lua b/src/arns.lua index 3791e166..f0d7f4d8 100644 --- a/src/arns.lua +++ b/src/arns.lua @@ -1042,7 +1042,7 @@ function arns.pruneReservedNames(currentTimestamp) end --- Asserts that a name can be reassigned ---- @param record StoredRecord The record to check +--- @param record StoredRecord | nil The record to check --- @param currentTimestamp number The current timestamp --- @param from string The address of the sender --- @param newProcessId string The new process id @@ -1055,11 +1055,11 @@ function arns.assertValidReassignName(record, currentTimestamp, from, newProcess assert(record.processId == from, "Not authorized to reassign this name") if record.endTimestamp then - local isWithinGracePeriod = record.endTimestamp < currentTimestamp - and record.endTimestamp + constants.gracePeriodMs > currentTimestamp - local isExpired = record.endTimestamp + constants.gracePeriodMs < currentTimestamp - assert(not isWithinGracePeriod, "Name must be extended before it can be reassigned") - assert(not isExpired, "Name is expired") + assert( + not arns.recordInGracePeriod(record, currentTimestamp), + "Name must be extended before it can be reassigned" + ) + assert(not arns.recordExpired(record, currentTimestamp), "Name is expired") end return true @@ -1075,7 +1075,6 @@ end function arns.reassignName(name, from, currentTimestamp, newProcessId, allowUnsafeProcessId) allowUnsafeProcessId = allowUnsafeProcessId or false local record = arns.getRecord(name) - assert(record, "Name is not registered") arns.assertValidReassignName(record, currentTimestamp, from, newProcessId, allowUnsafeProcessId) local updatedRecord = arns.modifyProcessId(name, newProcessId) return updatedRecord diff --git a/src/balances.lua b/src/balances.lua index 56d95b55..41309754 100644 --- a/src/balances.lua +++ b/src/balances.lua @@ -19,6 +19,7 @@ function balances.transfer(recipient, from, qty, allowUnsafeAddresses) assert(from ~= recipient, "Cannot transfer to self") assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") assert(type(qty) == "number", "Quantity is required and must be a number!") + assert(recipient ~= from, "Cannot transfer to self") assert(utils.isInteger(qty), "Quantity must be an integer: " .. qty) assert(qty > 0, "Quantity must be greater than 0") @@ -50,6 +51,8 @@ end ---@throws error If target has insufficient balance function balances.reduceBalance(target, qty) assert(balances.walletHasSufficientBalance(target, qty), "Insufficient balance") + assert(qty > 0, "Quantity must be greater than 0") + local prevBalance = balances.getBalance(target) Balances[target] = prevBalance - qty end diff --git a/src/constants.lua b/src/constants.lua index 1b677207..ea858a41 100644 --- a/src/constants.lua +++ b/src/constants.lua @@ -9,6 +9,14 @@ constants.oneWeekMs = constants.oneDayMs * 7 constants.twoWeeksMs = constants.oneWeekMs * 2 constants.oneYearMs = 31536000 * 1000 +constants.mARIOPerARIO = 1000000 + +--- @param ARIO number +--- @return mARIO +function constants.ARIOToMARIO(ARIO) + return ARIO * constants.mARIOPerARIO +end + -- EPOCHS constants.defaultEpochDurationMs = constants.oneDayMs constants.maximumRewardRate = 0.001 @@ -19,10 +27,9 @@ constants.rewardDecayLastEpoch = 547 -- GAR constants.DEFAULT_UNDERNAME_COUNT = 10 constants.DEADLINE_DURATION_MS = constants.oneHourMs -constants.totalTokenSupply = 1000000000 * 1000000 -- 1 billion tokens +constants.totalTokenSupply = constants.ARIOToMARIO(1000000000) -- 1 billion tokens constants.MIN_EXPEDITED_WITHDRAWAL_PENALTY_RATE = 0.10 -- the minimum penalty rate for an expedited withdrawal (10% of the amount being withdrawn) constants.MAX_EXPEDITED_WITHDRAWAL_PENALTY_RATE = 0.50 -- the maximum penalty rate for an expedited withdrawal (50% of the amount being withdrawn) -constants.mARIOPerARIO = 1000000 constants.minimumWithdrawalAmount = constants.mARIOPerARIO -- the minimum amount that can be withdrawn from the GAR constants.redelegationFeeResetIntervalMs = constants.defaultEpochDurationMs * 7 -- 7 epochs constants.maxDelegateRewardShareRatio = 95 -- 95% of rewards can be shared with delegates @@ -43,7 +50,7 @@ constants.ANNUAL_PERCENTAGE_FEE = 0.2 -- 20% constants.ARNS_NAME_DOES_NOT_EXIST_MESSAGE = "Name not found in the ArNS Registry!" constants.UNDERNAME_LEASE_FEE_PERCENTAGE = 0.001 constants.UNDERNAME_PERMABUY_FEE_PERCENTAGE = 0.005 -constants.PRIMARY_NAME_REQUEST_COST = 10000000 -- 10 ARIO +constants.PRIMARY_NAME_REQUEST_COST = constants.ARIOToMARIO(10) -- 10 ARIO constants.gracePeriodMs = constants.defaultEpochDurationMs * 14 -- 14 epochs constants.maxLeaseLengthYears = 5 constants.returnedNamePeriod = constants.defaultEpochDurationMs * 14 -- 14 epochs @@ -67,63 +74,63 @@ constants.demandSettings = { } -- VAULTS -constants.MIN_VAULT_SIZE = 100000000 -- 100 ARIO +constants.MIN_VAULT_SIZE = constants.ARIOToMARIO(100) -- 100 ARIO constants.MAX_TOKEN_LOCK_TIME_MS = 12 * 365 * 24 * 60 * 60 * 1000 -- The maximum amount of blocks tokens can be locked in a vault (12 years of blocks) constants.MIN_TOKEN_LOCK_TIME_MS = 14 * 24 * 60 * 60 * 1000 -- The minimum amount of blocks tokens can be locked in a vault (14 days of blocks) -- ARNS FEES constants.genesisFees = { - [1] = 2000000000000, - [2] = 200000000000, - [3] = 40000000000, - [4] = 10000000000, - [5] = 4000000000, - [6] = 2000000000, - [7] = 1000000000, - [8] = 600000000, - [9] = 500000000, - [10] = 500000000, - [11] = 500000000, - [12] = 500000000, - [13] = 400000000, - [14] = 400000000, - [15] = 400000000, - [16] = 400000000, - [17] = 400000000, - [18] = 400000000, - [19] = 400000000, - [20] = 400000000, - [21] = 400000000, - [22] = 400000000, - [23] = 400000000, - [24] = 400000000, - [25] = 400000000, - [26] = 400000000, - [27] = 400000000, - [28] = 400000000, - [29] = 400000000, - [30] = 400000000, - [31] = 400000000, - [32] = 400000000, - [33] = 400000000, - [34] = 400000000, - [35] = 400000000, - [36] = 400000000, - [37] = 400000000, - [38] = 400000000, - [39] = 400000000, - [40] = 400000000, - [41] = 400000000, - [42] = 400000000, - [43] = 400000000, - [44] = 400000000, - [45] = 400000000, - [46] = 400000000, - [47] = 400000000, - [48] = 400000000, - [49] = 400000000, - [50] = 400000000, - [51] = 400000000, + [1] = constants.ARIOToMARIO(2000000), + [2] = constants.ARIOToMARIO(200000), + [3] = constants.ARIOToMARIO(40000), + [4] = constants.ARIOToMARIO(10000), + [5] = constants.ARIOToMARIO(4000), + [6] = constants.ARIOToMARIO(2000), + [7] = constants.ARIOToMARIO(1000), + [8] = constants.ARIOToMARIO(600), + [9] = constants.ARIOToMARIO(500), + [10] = constants.ARIOToMARIO(500), + [11] = constants.ARIOToMARIO(500), + [12] = constants.ARIOToMARIO(500), + [13] = constants.ARIOToMARIO(400), + [14] = constants.ARIOToMARIO(400), + [15] = constants.ARIOToMARIO(400), + [16] = constants.ARIOToMARIO(400), + [17] = constants.ARIOToMARIO(400), + [18] = constants.ARIOToMARIO(400), + [19] = constants.ARIOToMARIO(400), + [20] = constants.ARIOToMARIO(400), + [21] = constants.ARIOToMARIO(400), + [22] = constants.ARIOToMARIO(400), + [23] = constants.ARIOToMARIO(400), + [24] = constants.ARIOToMARIO(400), + [25] = constants.ARIOToMARIO(400), + [26] = constants.ARIOToMARIO(400), + [27] = constants.ARIOToMARIO(400), + [28] = constants.ARIOToMARIO(400), + [29] = constants.ARIOToMARIO(400), + [30] = constants.ARIOToMARIO(400), + [31] = constants.ARIOToMARIO(400), + [32] = constants.ARIOToMARIO(400), + [33] = constants.ARIOToMARIO(400), + [34] = constants.ARIOToMARIO(400), + [35] = constants.ARIOToMARIO(400), + [36] = constants.ARIOToMARIO(400), + [37] = constants.ARIOToMARIO(400), + [38] = constants.ARIOToMARIO(400), + [39] = constants.ARIOToMARIO(400), + [40] = constants.ARIOToMARIO(400), + [41] = constants.ARIOToMARIO(400), + [42] = constants.ARIOToMARIO(400), + [43] = constants.ARIOToMARIO(400), + [44] = constants.ARIOToMARIO(400), + [45] = constants.ARIOToMARIO(400), + [46] = constants.ARIOToMARIO(400), + [47] = constants.ARIOToMARIO(400), + [48] = constants.ARIOToMARIO(400), + [49] = constants.ARIOToMARIO(400), + [50] = constants.ARIOToMARIO(400), + [51] = constants.ARIOToMARIO(400), } -- General diff --git a/src/demand.lua b/src/demand.lua index 91baa6ab..cb2ec0ab 100644 --- a/src/demand.lua +++ b/src/demand.lua @@ -58,7 +58,9 @@ end --- @return number #The base fee for the name length function demand.baseFeeForNameLength(nameLength) assert(utils.isInteger(nameLength) and nameLength > 0, "nameLength must be a positive integer") - return demand.getFees()[nameLength] + local fee = demand.getFees()[nameLength] + assert(fee, "No fee found for name length: " .. nameLength) + return fee end --- Gets the moving average of trailing purchase counts diff --git a/src/epochs.lua b/src/epochs.lua index cb521c79..9ec8625d 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -64,7 +64,6 @@ EpochSettings = EpochSettings or { pruneEpochsCount = 14, -- prune epochs older than 14 days prescribedNameCount = 2, - rewardPercentage = 0.0005, -- 0.05% maxObservers = 50, epochZeroStartTimestamp = 1719900000000, -- July 9th, 00:00:00 UTC durationMs = constants.defaultEpochDurationMs, -- 24 hours @@ -108,11 +107,43 @@ end --- Gets the prescribed observers for an epoch --- @param epochIndex number The epoch index ---- @return WeightedGateway[] # The prescribed observers for the epoch +--- @return table # The prescribed observers for the epoch function epochs.getPrescribedObserversForEpoch(epochIndex) return epochs.getEpoch(epochIndex).prescribedObservers or {} end +--- Get prescribed observers with weights for epoch +--- @param epochIndex number The epoch index +--- @return WeightedGateway[] # The prescribed observers with weights for the epoch +function epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) + -- Iterate over prescribed observers and add gateway details + local prescribedObserversWithWeights = {} + for _, gatewayAddress in pairs(prescribedObservers) do + local gateway = gar.getGateway(gatewayAddress) + if gateway then + table.insert(prescribedObserversWithWeights, { + observerAddress = gateway.observerAddress, + gatewayAddress = gatewayAddress, + normalizedCompositeWeight = gateway.weights.normalizedCompositeWeight, + stakeWeight = gateway.weights.stakeWeight, + tenureWeight = gateway.weights.tenureWeight, + gatewayRewardRatioWeight = gateway.weights.gatewayRewardRatioWeight, + observerRewardRatioWeight = gateway.weights.observerRewardRatioWeight, + compositeWeight = gateway.weights.compositeWeight, + stake = gateway.operatorStake, + startTimestamp = gateway.startTimestamp, + }) + end + end + + -- sort by normalizedCompositeWeight + table.sort(prescribedObserversWithWeights, function(a, b) + return a.normalizedCompositeWeight > b.normalizedCompositeWeight + end) + return prescribedObserversWithWeights +end + --- Gets the eligible rewards for an epoch --- @param epochIndex number The epoch index --- @return Rewards # T he eligible rewards for the epoch @@ -228,7 +259,7 @@ end --- Computes the prescribed observers for an epoch --- @param epochIndex number The epoch index --- @param hashchain string The hashchain ---- @return WeightedGateway[], WeightedGateway[] # The prescribed observers for the epoch, and all the gateways with weights +--- @return table, WeightedGateway[] # The prescribed observers for the epoch, and all the gateways with weights function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) assert(epochIndex >= 0, "Epoch index must be greater than or equal to 0") assert(type(hashchain) == "string", "Hashchain must be a string") @@ -239,6 +270,7 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) -- Filter out any observers that could have a normalized composite weight of 0 local filteredObservers = {} + local prescribedObserversLookup = {} -- use ipairs as weightedObservers in array for _, observer in ipairs(weightedGateways) do if observer.normalizedCompositeWeight > 0 then @@ -246,7 +278,10 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) end end if #filteredObservers <= epochs.getSettings().maxObservers then - return filteredObservers, weightedGateways + for _, observer in ipairs(filteredObservers) do + prescribedObserversLookup[observer.observerAddress] = observer.gatewayAddress + end + return prescribedObserversLookup, weightedGateways end -- the hash we will use to create entropy for prescribed observers @@ -263,14 +298,12 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) -- get our prescribed observers, using the hashchain as entropy local hash = epochHash - local prescribedObserversAddressesLookup = {} - while utils.lengthOfTable(prescribedObserversAddressesLookup) < epochs.getSettings().maxObservers do + while utils.lengthOfTable(prescribedObserversLookup) < epochs.getSettings().maxObservers do local hashString = crypto.utils.array.toString(hash) local random = crypto.random(nil, nil, hashString) / 0xffffffff local cumulativeNormalizedCompositeWeight = 0 for _, observer in ipairs(filteredObservers) do - local alreadyPrescribed = prescribedObserversAddressesLookup[observer.gatewayAddress] - + local alreadyPrescribed = prescribedObserversLookup[observer.observerAddress] -- add only if observer has not already been prescribed if not alreadyPrescribed then -- add the observers normalized composite weight to the cumulative weight @@ -278,7 +311,7 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) + observer.normalizedCompositeWeight -- if the random value is less than the cumulative weight, we have found our observer if random <= cumulativeNormalizedCompositeWeight then - prescribedObserversAddressesLookup[observer.gatewayAddress] = true + prescribedObserversLookup[observer.observerAddress] = observer.gatewayAddress break end end @@ -287,22 +320,8 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) local newHash = crypto.utils.stream.fromArray(hash) hash = crypto.digest.sha2_256(newHash).asBytes() end - local prescribedObservers = {} - local filteredObserversAddressMap = utils.reduce(filteredObservers, function(acc, _, observer) - acc[observer.gatewayAddress] = observer - return acc - end, {}) - for address, _ in pairs(prescribedObserversAddressesLookup) do - table.insert(prescribedObservers, filteredObserversAddressMap[address]) - end - - -- sort them in place - table.sort(prescribedObservers, function(a, b) - return a.normalizedCompositeWeight > b.normalizedCompositeWeight -- sort by descending weight - end) - -- return the prescribed observers and the weighted observers - return prescribedObservers, weightedGateways + return prescribedObserversLookup, weightedGateways end --- Gets the epoch timestamps for an epoch index @@ -433,17 +452,13 @@ function epochs.saveObservations(observerAddress, reportTxId, failedGatewayAddre "Observations for the current epoch cannot be submitted before: " .. epochDistributionTimestamp ) - local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) - assert(#prescribedObservers > 0, "No prescribed observers for the current epoch.") + local prescribedObserversLookup = epochs.getPrescribedObserversForEpoch(epochIndex) + assert(utils.lengthOfTable(prescribedObserversLookup) > 0, "No prescribed observers for the current epoch.") - local observerIndex = utils.findInArray(prescribedObservers, function(prescribedObserver) - return prescribedObserver.observerAddress == observerAddress - end) + local gatewayAddressForObserver = prescribedObserversLookup[observerAddress] + assert(gatewayAddressForObserver, "Caller is not a prescribed observer for the current epoch.") - local observer = prescribedObservers[observerIndex] - assert(observer, "Caller is not a prescribed observer for the current epoch.") - - local observingGateway = gar.getGateway(observer.gatewayAddress) + local observingGateway = gar.getGateway(gatewayAddressForObserver) assert(observingGateway, "The associated gateway not found in the registry.") local epoch = epochs.getEpoch(epochIndex) @@ -503,20 +518,17 @@ end --- Computes the total eligible rewards for an epoch based on the protocol balance and the reward percentage and prescribed observers --- @param epochIndex number The epoch index ---- @param prescribedObservers WeightedGateway[] The prescribed observers for the epoch +--- @param prescribedObserversLookup table The prescribed observers for the epoch --- @return ComputedRewards # The total eligible rewards -function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObservers) +function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObserversLookup) local epochStartTimestamp = epochs.getEpochTimestampsForIndex(epochIndex) local activeGatewayAddresses = gar.getActiveGatewaysBeforeTimestamp(epochStartTimestamp) local protocolBalance = balances.getBalance(ao.id) local rewardRate = epochs.getRewardRateForEpoch(epochIndex) local totalEligibleRewards = math.floor(protocolBalance * rewardRate) local eligibleGatewayReward = math.floor(totalEligibleRewards * 0.90 / #activeGatewayAddresses) -- TODO: make these setting variables - local eligibleObserverReward = math.floor(totalEligibleRewards * 0.10 / #prescribedObservers) -- TODO: make these setting variables - local prescribedObserversLookup = utils.reduce(prescribedObservers, function(acc, _, observer) - acc[observer.observerAddress] = true - return acc - end, {}) + local eligibleObserverReward = + math.floor(totalEligibleRewards * 0.10 / utils.lengthOfTable(prescribedObserversLookup)) -- TODO: make these setting variables -- compute for each gateway what their potential rewards are and for their delegates local potentialRewards = {} -- use ipairs as activeGatewayAddresses is an array @@ -590,14 +602,7 @@ function epochs.distributeRewardsForEpoch(currentTimestamp) end local eligibleGatewaysForEpoch = epochs.getEligibleRewardsForEpoch(epochIndex) - local prescribedObserversLookup = utils.reduce( - epochs.getPrescribedObserversForEpoch(epochIndex), - function(acc, _, observer) - acc[observer.observerAddress] = true - return acc - end, - {} - ) + local prescribedObserversLookup = epochs.getPrescribedObserversForEpoch(epochIndex) local totalObservationsSubmitted = utils.lengthOfTable(epoch.observations.reports) or 0 -- get the eligible rewards for the epoch diff --git a/src/gar.lua b/src/gar.lua index b6f4294f..68269090 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -27,7 +27,7 @@ local gar = {} --- @field services GatewayServices | nil --- @field status "joined"|"leaving" --- @field observerAddress WalletAddress ---- @field weights GatewayWeights | nil +--- @field weights GatewayWeights | nil // TODO: make this required and update tests to match the type --- @field slashings table | nil --- @class Gateway : CompactGateway @@ -104,14 +104,14 @@ GatewayRegistrySettings = { maxTenureWeight = 4, }, operators = { - minStake = 10000 * 1000000, -- 10,000 ARIO + minStake = constants.ARIOToMARIO(10000), -- 10,000 ARIO withdrawLengthMs = 90 * 24 * 60 * 60 * 1000, -- 90 days to lower operator stake leaveLengthMs = 90 * 24 * 60 * 60 * 1000, -- 90 days that balance will be vaulted failedEpochCountMax = 30, -- number of epochs failed before marked as leaving failedEpochSlashRate = 0.2, -- 20% of stake is returned to protocol balance }, delegates = { - minStake = 10 * 1000000, -- 10 ARIO + minStake = constants.ARIOToMARIO(10), -- 10 ARIO withdrawLengthMs = 90 * 24 * 60 * 60 * 1000, -- 90 days }, } @@ -207,28 +207,18 @@ function gar.leaveNetwork(from, currentTimestamp, msgId) assert(gar.isGatewayEligibleToLeave(gateway, currentTimestamp), "The gateway is not eligible to leave the network.") local gatewayEndTimestamp = currentTimestamp + gar.getSettings().operators.leaveLengthMs - local gatewayStakeWithdrawTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs - local minimumStakedTokens = math.min(gar.getSettings().operators.minStake, gateway.operatorStake) + local gatewayStakeWithdrawTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs -- if the slash happens to be 100% we do not need to vault anything if minimumStakedTokens > 0 then - gateway.vaults[from] = { - balance = minimumStakedTokens, - startTimestamp = currentTimestamp, - endTimestamp = gatewayEndTimestamp, - } - -- pruning scheduling for this vault is captured below + createGatewayExitVault(gateway, minimumStakedTokens, currentTimestamp, from) -- if there is more than the minimum staked tokens, we need to vault the rest but on shorter term local remainingStake = gateway.operatorStake - gar.getSettings().operators.minStake if remainingStake > 0 then - gateway.vaults[msgId] = { - balance = remainingStake, - startTimestamp = currentTimestamp, - endTimestamp = gatewayStakeWithdrawTimestamp, - } + createGatewayWithdrawVault(gateway, msgId, remainingStake, currentTimestamp) gar.scheduleNextGatewaysPruning(gatewayStakeWithdrawTimestamp) end end @@ -328,11 +318,7 @@ function gar.decreaseOperatorStake(from, qty, currentTimestamp, msgId, instantWi -- Calculate the penalty and withdraw using the utility function expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, from) else - gateway.vaults[msgId] = { - balance = qty, - startTimestamp = currentTimestamp, - endTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs, - } + createGatewayWithdrawVault(gateway, msgId, qty, currentTimestamp) gar.scheduleNextGatewaysPruning(gateway.vaults[msgId].endTimestamp) end @@ -493,6 +479,13 @@ end --- @param gateway Gateway --- @param quantity mARIO function increaseDelegateStakeAtGateway(delegate, gateway, quantity) + assert(delegate, "Delegate is required") + assert(gateway, "Gateway is required") + -- zero is allowed as it is a no-op + assert( + quantity and utils.isInteger(quantity) and quantity >= 0, + "Quantity is required and must be an integer greater than or equal to 0: " .. quantity + ) delegate.delegatedStake = delegate.delegatedStake + quantity gateway.totalDelegatedStake = gateway.totalDelegatedStake + quantity end @@ -503,8 +496,15 @@ end --- @param ban boolean|nil do not add the delegate back to the gateway allowlist if their delegation is over function decreaseDelegateStakeAtGateway(delegateAddress, gateway, quantity, ban) local delegate = gateway.delegates[delegateAddress] - -- use this in an inverse way - increaseDelegateStakeAtGateway(delegate, gateway, -quantity) + assert(delegate, "Delegate is required") + -- zero is allowed as it is a no-op + assert( + quantity and utils.isInteger(quantity) and quantity >= 0, + "Quantity is required and must be an integer greater than or equal to 0: " .. quantity + ) + assert(gateway, "Gateway is required") + delegate.delegatedStake = delegate.delegatedStake - quantity + gateway.totalDelegatedStake = gateway.totalDelegatedStake - quantity gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) if ban and gateway.settings.allowedDelegatesLookup then gateway.settings.allowedDelegatesLookup[delegateAddress] = nil @@ -643,11 +643,7 @@ function gar.decreaseDelegateStake(gatewayAddress, delegator, qty, currentTimest -- Calculate the penalty and withdraw using the utility function and move the balances expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, delegator) else - -- Withdraw the delegate's stake - local newDelegateVault = gar.createDelegateVault(qty, currentTimestamp) - - -- Lock the qty in a vault to be unlocked after withdrawal period and decrease the gateway's total delegated stake - gateway.delegates[delegator].vaults[messageId] = newDelegateVault + createDelegateWithdrawVault(gateway, delegator, messageId, qty, currentTimestamp) end decreaseDelegateStakeAtGateway(delegator, gateway, qty) @@ -697,8 +693,8 @@ function gar.getGatewayWeightsAtTimestamp(gatewayAddresses, timestamp) local totalCompositeWeight = 0 -- Iterate over gateways to calculate weights - for _, address in pairs(gatewayAddresses) do - local gateway = gar.getGateway(address) + for _, gatewayAddress in pairs(gatewayAddresses) do + local gateway = gar.getGateway(gatewayAddress) if gateway then local totalStake = gateway.operatorStake + gateway.totalDelegatedStake -- 100 - no cap to this local stakeWeightRatio = totalStake / gar.getSettings().operators.minStake -- this is always greater than 1 as the minOperatorStake is always less than the stake @@ -728,7 +724,7 @@ function gar.getGatewayWeightsAtTimestamp(gatewayAddresses, timestamp) * observerRewardRatioWeight table.insert(weightedObservers, { - gatewayAddress = address, + gatewayAddress = gatewayAddress, observerAddress = gateway.observerAddress, stake = totalStake, startTimestamp = gateway.startTimestamp, @@ -963,9 +959,9 @@ function gar.pruneGateways(currentTimestamp, msgId) -- first, return any expired vaults regardless of the gateway status for vaultId, vault in pairs(gateway.vaults) do if vault.endTimestamp <= currentTimestamp then - balances.increaseBalance(address, vault.balance) + unlockGatewayWithdrawVault(gateway, address, vaultId) + result.gatewayStakeReturned = result.gatewayStakeReturned + vault.balance - gateway.vaults[vaultId] = nil else -- find the next prune timestamp minNextEndTimestamp = math.min(minNextEndTimestamp or vault.endTimestamp, vault.endTimestamp) @@ -975,9 +971,8 @@ function gar.pruneGateways(currentTimestamp, msgId) for delegateAddress, delegate in pairs(gateway.delegates) do for vaultId, vault in pairs(delegate.vaults) do if vault.endTimestamp <= currentTimestamp then - balances.increaseBalance(delegateAddress, vault.balance) + unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) result.delegateStakeReturned = result.delegateStakeReturned + vault.balance - delegate.vaults[vaultId] = nil else -- find the next prune timestamp minNextEndTimestamp = math.min(minNextEndTimestamp or vault.endTimestamp, vault.endTimestamp) @@ -1147,12 +1142,9 @@ function gar.cancelGatewayWithdrawal(from, gatewayAddress, vaultId) local previousTotalDelegatedStake = gateway.totalDelegatedStake local vaultBalance = existingVault.balance if isGatewayWithdrawal then - gateway.vaults[vaultId] = nil - gateway.operatorStake = gateway.operatorStake + vaultBalance + cancelGatewayWithdrawVault(gateway, vaultId) else - assert(gar.delegateAllowedToStake(from, gateway), "This Gateway does not allow this delegate to stake.") - delegate.vaults[vaultId] = nil - increaseDelegateStakeAtGateway(delegate, gateway, vaultBalance) + cancelGatewayDelegateVault(gateway, from, vaultId) end GatewayRegistry[gatewayAddress] = gateway return { @@ -1342,7 +1334,7 @@ function gar.kickDelegateFromGateway(delegateAddress, gateway, msgId, currentTim local remainingStake = delegate.delegatedStake if remainingStake > 0 then - delegate.vaults[msgId] = gar.createDelegateVault(delegate.delegatedStake, currentTimestamp) + createDelegateWithdrawVault(gateway, delegateAddress, msgId, remainingStake, currentTimestamp) end decreaseDelegateStakeAtGateway(delegateAddress, gateway, remainingStake, ban) end @@ -1593,8 +1585,10 @@ function gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) } -- draw down balance first - balances.reduceBalance(fundingPlan.address, fundingPlan.balance) - appliedPlan.totalFunded = appliedPlan.totalFunded + fundingPlan.balance + if fundingPlan.balance > 0 then + balances.reduceBalance(fundingPlan.address, fundingPlan.balance) + appliedPlan.totalFunded = appliedPlan.totalFunded + fundingPlan.balance + end --draw down stakes and vaults, creating withdraw vaults if necessary for gatewayAddress, delegationPlan in pairs(fundingPlan.stakes) do @@ -1635,8 +1629,7 @@ function gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) -- create an exit vault for the remaining stake if less than the gateway's minimum if delegate.delegatedStake > 0 and delegate.delegatedStake < gateway.settings.minDelegatedStake then - -- create a vault for the remaining stake - delegate.vaults[msgId] = gar.createDelegateVault(delegate.delegatedStake, currentTimestamp) + createDelegateWithdrawVault(gateway, fundingPlan.address, msgId, delegate.delegatedStake, currentTimestamp) decreaseDelegateStakeAtGateway(fundingPlan.address, gateway, delegate.delegatedStake) appliedPlan.newWithdrawVaults[gatewayAddress] = { [msgId] = utils.deepCopy(delegate.vaults[msgId]), @@ -1830,7 +1823,6 @@ function gar.redelegateStake(params) "This Gateway does not allow this delegate to stake." ) - local previousRedelegations = gar.getRedelegation(delegateAddress) local redelegationFeeRate = gar.getRedelegationFee(delegateAddress).redelegationFeeRate local redelegationFee = math.ceil(stakeToTakeFromSource * (redelegationFeeRate / 100)) local stakeToDelegate = stakeToTakeFromSource - redelegationFee @@ -1851,14 +1843,7 @@ function gar.redelegateStake(params) "Quantity must be less than or equal to the vaulted stake amount." ) - if existingVault.balance == stakeToTakeFromSource then - -- The operator vault has been emptied - sourceGateway.vaults[vaultId] = nil - else - -- The operator vault has been partially emptied - sourceGateway.vaults[delegateAddress][vaultId].balance = sourceGateway.vaults[delegateAddress][vaultId].balance - - stakeToTakeFromSource - end + reduceStakeFromGatewayVault(sourceGateway, stakeToTakeFromSource, vaultId) else -- Get the redelegation amount from the operator stakes local maxWithdraw = sourceGateway.operatorStake - gar.getSettings().operators.minStake @@ -1883,14 +1868,7 @@ function gar.redelegateStake(params) "Quantity must be less than or equal to the vaulted stake amount." ) - if existingVault.balance == stakeToTakeFromSource then - -- The vault has been emptied - sourceGateway.delegates[delegateAddress].vaults[vaultId] = nil - gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, sourceGateway) - else - -- The vault has been partially emptied - existingVault.balance = existingVault.balance - stakeToTakeFromSource - end + reduceStakeFromDelegateVault(sourceGateway, delegateAddress, stakeToTakeFromSource, vaultId) else -- Check if the delegate has enough stake to redelegate assert( @@ -1910,27 +1888,27 @@ function gar.redelegateStake(params) end end - local existingTargetDelegate = targetGateway.delegates[delegateAddress] - local minimumStakeForGatewayAndDelegate - if existingTargetDelegate and existingTargetDelegate.delegatedStake ~= 0 then - -- It already has a stake that is not zero - minimumStakeForGatewayAndDelegate = 1 -- Delegate must provide at least one additional mARIO - else - -- Consider if the operator increases the minimum amount after you've already staked - minimumStakeForGatewayAndDelegate = targetGateway.settings.minDelegatedStake - end - - -- Check if the delegate has enough stake to redelegate - assert( - stakeToDelegate >= minimumStakeForGatewayAndDelegate, - "Quantity must be greater than the minimum delegated stake amount." - ) - -- The stake can now be applied to the targetGateway if targetAddress == delegateAddress then -- move the stake to the operator's stake targetGateway.operatorStake = targetGateway.operatorStake + stakeToDelegate else + local existingTargetDelegate = targetGateway.delegates[delegateAddress] + local minimumStakeForGatewayAndDelegate + if existingTargetDelegate and existingTargetDelegate.delegatedStake ~= 0 then + -- It already has a stake that is not zero + minimumStakeForGatewayAndDelegate = 1 -- Delegate must provide at least one additional mARIO + else + -- Consider if the operator increases the minimum amount after you've already staked + minimumStakeForGatewayAndDelegate = targetGateway.settings.minDelegatedStake + end + + -- Check if the delegate has enough stake to redelegate + assert( + stakeToDelegate >= minimumStakeForGatewayAndDelegate, + "Quantity must be greater than the minimum delegated stake amount." + ) + targetGateway.delegates[delegateAddress] = targetGateway.delegates[delegateAddress] or gar.createDelegateAtGateway(currentTimestamp, targetGateway, delegateAddress) increaseDelegateStakeAtGateway(targetGateway.delegates[delegateAddress], targetGateway, stakeToDelegate) @@ -1939,6 +1917,7 @@ function gar.redelegateStake(params) -- Move redelegation fee to protocol balance balances.increaseBalance(ao.id, redelegationFee) + local previousRedelegations = gar.getRedelegation(delegateAddress) local redelegationsSinceFeeReset = (previousRedelegations and previousRedelegations.redelegations or 0) + 1 -- update the source and target gateways, and the delegator's redelegation fee data @@ -2008,6 +1987,126 @@ function gar.getPaginatedVaultsForGateway(gatewayAddress, cursor, limit, sortBy, ) end +--- @param gateway Gateway +--- @param vaultId WalletAddress | MessageId +--- @param qty mARIO +--- @param currentTimestamp Timestamp +function createGatewayWithdrawVault(gateway, vaultId, qty, currentTimestamp) + assert(not gateway.vaults[vaultId], "Vault already exists") + + gateway.vaults[vaultId] = { + balance = qty, + startTimestamp = currentTimestamp, + endTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs, + } +end + +--- @param gateway Gateway +--- @param qty mARIO +--- @param currentTimestamp Timestamp +--- @param gatewayAddress WalletAddress +function createGatewayExitVault(gateway, qty, currentTimestamp, gatewayAddress) + assert(not gateway.vaults[gatewayAddress], "Exit Vault already exists") + gateway.vaults[gatewayAddress] = { + balance = qty, + startTimestamp = currentTimestamp, + endTimestamp = currentTimestamp + gar.getSettings().operators.leaveLengthMs, + } +end + +--- @param gateway Gateway +--- @param delegateAddress WalletAddress +--- @param vaultId MessageId +--- @param qty mARIO +--- @param currentTimestamp Timestamp +function createDelegateWithdrawVault(gateway, delegateAddress, vaultId, qty, currentTimestamp) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + assert(not delegate.vaults[vaultId], "Vault already exists") + + -- Lock the qty in a vault to be unlocked after withdrawal period and decrease the gateway's total delegated stake + gateway.delegates[delegateAddress].vaults[vaultId] = gar.createDelegateVault(qty, currentTimestamp) +end + +---@param gateway Gateway +---@param vaultId MessageId +function cancelGatewayWithdrawVault(gateway, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + gateway.vaults[vaultId] = nil + gateway.operatorStake = gateway.operatorStake + vault.balance +end + +---@param gateway Gateway +---@param gatewayAddress WalletAddress +---@param vaultId MessageId +function unlockGatewayWithdrawVault(gateway, gatewayAddress, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + balances.increaseBalance(gatewayAddress, vault.balance) + gateway.vaults[vaultId] = nil +end + +---@param gateway Gateway +---@param delegateAddress WalletAddress +function cancelGatewayDelegateVault(gateway, delegateAddress, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + assert(gar.delegateAllowedToStake(delegateAddress, gateway), "This Gateway does not allow this delegate to stake.") + + gateway.delegates[delegateAddress].vaults[vaultId] = nil + increaseDelegateStakeAtGateway(delegate, gateway, vault.balance) +end + +---@param gateway Gateway +---@param delegateAddress WalletAddress +function unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + + balances.increaseBalance(delegateAddress, vault.balance) + -- delete the delegate's vault and prune the delegate if necessary + gateway.delegates[delegateAddress].vaults[vaultId] = nil + gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) +end + +--- @param gateway Gateway +--- @param qty mARIO +--- @param vaultId MessageId +function reduceStakeFromGatewayVault(gateway, qty, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + assert(qty <= vault.balance, "Insufficient balance in vault") + + if qty == vault.balance then + gateway.vaults[vaultId] = nil + else + gateway.vaults[vaultId].balance = vault.balance - qty + end +end + +--- @param gateway Gateway +--- @param delegateAddress WalletAddress +--- @param vaultId MessageId +function reduceStakeFromDelegateVault(gateway, delegateAddress, qty, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + assert(qty <= vault.balance, "Insufficient balance in vault") + + if qty == vault.balance then + gateway.delegates[delegateAddress].vaults[vaultId] = nil + gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) + else + gateway.delegates[delegateAddress].vaults[vaultId].balance = vault.balance - qty + end +end + --- @param timestamp Timestamp function gar.scheduleNextGatewaysPruning(timestamp) NextGatewaysPruneTimestamp = math.min(NextGatewaysPruneTimestamp or timestamp, timestamp) diff --git a/src/main.lua b/src/main.lua index 6096db13..a5c38811 100644 --- a/src/main.lua +++ b/src/main.lua @@ -14,8 +14,8 @@ Protocol = Protocol or ao.env.Process.Id Balances = Balances or {} if not Balances[Protocol] then -- initialize the balance for the process id Balances = { - [Protocol] = math.floor(50000000 * 1000000), -- 50M ARIO - [Owner] = math.floor(constants.totalTokenSupply - (50000000 * 1000000)), -- 950M ARIO + [Protocol] = math.floor(constants.ARIOToMARIO(50000000)), -- 50M ARIO + [Owner] = math.floor(constants.totalTokenSupply - (constants.ARIOToMARIO(50000000))), -- 950M ARIO } end Vaults = Vaults or {} @@ -24,6 +24,8 @@ NameRegistry = NameRegistry or {} Epochs = Epochs or {} LastTickedEpochIndex = LastTickedEpochIndex or -1 LastGracePeriodEntryEndTimestamp = LastGracePeriodEntryEndTimestamp or 0 +LastKnownMessageTimestamp = LastKnownMessageTimestamp or 0 +LastKnownMessageId = LastKnownMessageId or "" local utils = require("utils") local json = require("json") @@ -114,8 +116,7 @@ local ActionMap = { PrimaryName = "Primary-Name", } ---- @alias Message table -- an AO message TODO - update this type with the actual Message type ---- @param msg Message +--- @param msg ParsedMessage --- @param response any local function Send(msg, response) if msg.reply then @@ -157,8 +158,8 @@ local function adjustSuppliesForFundingPlan(fundingPlan, rewardForInitiator) LastKnownCirculatingSupply = LastKnownCirculatingSupply - fundingPlan.balance + rewardForInitiator end ---- @param ioEvent table ---- @param result BuyRecordResult|RecordInteractionResult +--- @param ioEvent IOEvent +--- @param result BuyRecordResult|RecordInteractionResult|CreatePrimaryNameResult|PrimaryNameRequestApproval local function addResultFundingPlanFields(ioEvent, result) ioEvent:addFieldsWithPrefixIfExist(result.fundingPlan, "FP-", { "balance" }) local fundingPlanVaultsCount = 0 @@ -199,7 +200,7 @@ local function addResultFundingPlanFields(ioEvent, result) adjustSuppliesForFundingPlan(result.fundingPlan, result.returnedName and result.returnedName.rewardForInitiator) end ---- @param ioEvent table +--- @param ioEvent IOEvent ---@param result RecordInteractionResult|BuyRecordResult local function addRecordResultFields(ioEvent, result) ioEvent:addFieldsIfExist(result, { @@ -322,7 +323,7 @@ local function addPruneGatewaysResult(ioEvent, pruneGatewaysResult) end end ---- @param ioEvent table +--- @param ioEvent IOEvent local function addNextPruneTimestampsData(ioEvent) ioEvent:addField("Next-Returned-Names-Prune-Timestamp", arns.nextReturnedNamesPruneTimestamp()) ioEvent:addField("Next-Epochs-Prune-Timestamp", epochs.nextEpochsPruneTimestamp()) @@ -333,7 +334,7 @@ local function addNextPruneTimestampsData(ioEvent) ioEvent:addField("Next-Primary-Names-Prune-Timestamp", primaryNames.nextPrimaryNamesPruneTimestamp()) end ---- @param ioEvent table +--- @param ioEvent IOEvent --- @param prunedStateResult PruneStateResult local function addNextPruneTimestampsResults(ioEvent, prunedStateResult) --- @type PrunedGatewaysResult @@ -367,13 +368,13 @@ local function assertValidFundFrom(fundFrom) assert(validFundFrom[fundFrom], "Invalid fund from type. Must be one of: any, balance, stakes") end ---- @param ioEvent table +--- @param ioEvent IOEvent local function addPrimaryNameCounts(ioEvent) ioEvent:addField("Total-Primary-Names", utils.lengthOfTable(primaryNames.getUnsafePrimaryNames())) ioEvent:addField("Total-Primary-Name-Requests", utils.lengthOfTable(primaryNames.getUnsafePrimaryNameRequests())) end ---- @param ioEvent table +--- @param ioEvent IOEvent --- @param primaryNameResult CreatePrimaryNameResult|PrimaryNameRequestApproval local function addPrimaryNameRequestData(ioEvent, primaryNameResult) ioEvent:addFieldsIfExist(primaryNameResult, { "baseNameOwner" }) @@ -383,8 +384,50 @@ local function addPrimaryNameRequestData(ioEvent, primaryNameResult) addPrimaryNameCounts(ioEvent) end -local function addEventingHandler(handlerName, pattern, handleFn, critical) +-- Sanitize inputs before every interaction +local function assertAndSanitizeInputs(msg) + assert( + msg.Timestamp and tonumber(msg.Timestamp) >= LastKnownMessageTimestamp, + "Timestamp must be greater than or equal to the last known message timestamp of " + .. LastKnownMessageTimestamp + .. " but was " + .. msg.Timestamp + ) + assert(msg.From, "From is required") + assert(msg.Tags and type(msg.Tags) == "table", "Tags are required") + + msg.Tags = utils.validateAndSanitizeInputs(msg.Tags) + msg.From = utils.formatAddress(msg.From) + msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) -- Timestamp should always be provided by the CU +end + +local function updateLastKnownMessage(msg) + if msg.Timestamp >= LastKnownMessageTimestamp then + LastKnownMessageTimestamp = msg.Timestamp + LastKnownMessageId = msg.Id + end +end + +--- @alias IOEvent table -- TODO: Type this + +--- @class ParsedMessage +--- @field Id string +--- @field Action string +--- @field From string +--- @field Timestamp Timestamp +--- @field Tags table +--- @field ioEvent IOEvent +--- @field Cast boolean? +--- @field reply? fun(response: any) + +--- @param handlerName string +--- @param pattern fun(msg: ParsedMessage):'continue'|boolean +--- @param handleFn fun(msg: ParsedMessage) +--- @param critical boolean? +--- @param printEvent boolean? +local function addEventingHandler(handlerName, pattern, handleFn, critical, printEvent) critical = critical or false + printEvent = printEvent == nil and true or printEvent Handlers.add(handlerName, pattern, function(msg) -- add an IOEvent to the message if it doesn't exist msg.ioEvent = msg.ioEvent or ARIOEvent(msg) @@ -408,21 +451,24 @@ local function addEventingHandler(handlerName, pattern, handleFn, critical) local errorWithEvent = tostring(resultOrError) .. "\n" .. errorEvent:toJSON() error(errorWithEvent, 0) -- 0 ensures not to include this line number in the error message end - -- isolate out prune handler here when printing - if handlerName ~= "prune" then + if printEvent then msg.ioEvent:printEvent() end end) end --- prune state before every interaction +addEventingHandler("sanitize", function() + return "continue" +end, function(msg) + assertAndSanitizeInputs(msg) + updateLastKnownMessage(msg) +end, CRITICAL, false) + -- NOTE: THIS IS A CRITICAL HANDLER AND WILL DISCARD THE MEMORY ON ERROR addEventingHandler("prune", function() return "continue" -- continue is a pattern that matches every message and continues to the next handler that matches the tags end, function(msg) - local msgTimestamp = tonumber(msg.Timestamp or msg.Tags.Timestamp) - assert(msgTimestamp, "Timestamp is required for a tick interaction") - local epochIndex = epochs.getEpochIndexForTimestamp(msgTimestamp) + local epochIndex = epochs.getEpochIndexForTimestamp(msg.Timestamp) msg.ioEvent:addField("epochIndex", epochIndex) local previousStateSupplies = { @@ -435,54 +481,6 @@ end, function(msg) lastKnownTotalSupply = token.lastKnownTotalTokenSupply(), } - msg.From = utils.formatAddress(msg.From) - msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) or nil - - local knownAddressTags = { - "Recipient", - "Initiator", - "Target", - "Source", - "Address", - "Vault-Id", - "Process-Id", - "Observer-Address", - } - - for _, tagName in ipairs(knownAddressTags) do - -- Format all incoming addresses - msg.Tags[tagName] = msg.Tags[tagName] and utils.formatAddress(msg.Tags[tagName]) or nil - end - - local knownNumberTags = { - "Quantity", - "Lock-Length", - "Operator-Stake", - "Delegated-Stake", - "Withdraw-Stake", - "Timestamp", - "Years", - "Min-Delegated-Stake", - "Port", - "Extend-Length", - "Delegate-Reward-Share-Ratio", - "Epoch-Index", - "Price-Interval-Ms", - "Block-Height", - } - for _, tagName in ipairs(knownNumberTags) do - -- Format all incoming numbers - msg.Tags[tagName] = msg.Tags[tagName] and tonumber(msg.Tags[tagName]) or nil - end - - local knownBooleanTags = { - "Allow-Unsafe-Addresses", - "Force-Prune", - } - for _, tagName in ipairs(knownBooleanTags) do - msg.Tags[tagName] = utils.booleanOrBooleanStringToBoolean(msg.Tags[tagName]) - end - if msg.Tags["Force-Prune"] then gar.scheduleNextGatewaysPruning(0) gar.scheduleNextRedelegationsPruning(0) @@ -492,9 +490,8 @@ end, function(msg) vaults.scheduleNextVaultsPruning(0) end - local msgId = msg.Id - print("Pruning state at timestamp: " .. msgTimestamp) - local prunedStateResult = prune.pruneState(msgTimestamp, msgId, LastGracePeriodEntryEndTimestamp) + print("Pruning state at timestamp: " .. msg.Timestamp) + local prunedStateResult = prune.pruneState(msg.Timestamp, msg.Id, LastGracePeriodEntryEndTimestamp) if prunedStateResult then local prunedRecordsCount = utils.lengthOfTable(prunedStateResult.prunedRecords or {}) @@ -571,9 +568,7 @@ end, function(msg) then addSupplyData(msg.ioEvent) end - - return prunedStateResult -end, CRITICAL) +end, CRITICAL, false) -- Write handlers addEventingHandler(ActionMap.Transfer, utils.hasMatchingTag("Action", ActionMap.Transfer), function(msg) @@ -597,6 +592,12 @@ addEventingHandler(ActionMap.Transfer, utils.hasMatchingTag("Action", ActionMap. msg.ioEvent:addField("RecipientNewBalance", recipientNewBalance) end + -- if the sender is the protocol, then we need to update the circulating supply as tokens are now in circulation + if msg.From == ao.id then + LastKnownCirculatingSupply = LastKnownCirculatingSupply + quantity + addSupplyData(msg.ioEvent) + end + -- Casting implies that the sender does not want a response - Reference: https://elixirforum.com/t/what-is-the-etymology-of-genserver-cast/33610/3 if not msg.Cast then -- Debit-Notice message template, that is sent to the Sender of the transfer @@ -642,7 +643,6 @@ end) addEventingHandler(ActionMap.CreateVault, utils.hasMatchingTag("Action", ActionMap.CreateVault), function(msg) local quantity = msg.Tags.Quantity local lockLengthMs = msg.Tags["Lock-Length"] - local timestamp = msg.Timestamp local msgId = msg.Id assert( lockLengthMs and lockLengthMs > 0 and utils.isInteger(lockLengthMs), @@ -652,8 +652,7 @@ addEventingHandler(ActionMap.CreateVault, utils.hasMatchingTag("Action", ActionM quantity and utils.isInteger(quantity) and quantity >= constants.MIN_VAULT_SIZE, "Invalid quantity. Must be integer greater than or equal to " .. constants.MIN_VAULT_SIZE .. " mARIO" ) - assert(timestamp, "Timestamp is required for a tick interaction") - local vault = vaults.createVault(msg.From, quantity, lockLengthMs, timestamp, msgId) + local vault = vaults.createVault(msg.From, quantity, lockLengthMs, msg.Timestamp, msgId) if vault ~= nil then msg.ioEvent:addField("Vault-Id", msgId) @@ -680,7 +679,6 @@ addEventingHandler(ActionMap.VaultedTransfer, utils.hasMatchingTag("Action", Act local recipient = msg.Tags.Recipient local quantity = msg.Tags.Quantity local lockLengthMs = msg.Tags["Lock-Length"] - local timestamp = msg.Timestamp local msgId = msg.Id local allowUnsafeAddresses = msg.Tags["Allow-Unsafe-Addresses"] or false assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") @@ -692,11 +690,10 @@ addEventingHandler(ActionMap.VaultedTransfer, utils.hasMatchingTag("Action", Act quantity and utils.isInteger(quantity) and quantity >= constants.MIN_VAULT_SIZE, "Invalid quantity. Must be integer greater than or equal to " .. constants.MIN_VAULT_SIZE .. " mARIO" ) - assert(timestamp, "Timestamp is required for a tick interaction") assert(recipient ~= msg.From, "Cannot transfer to self") local vault = - vaults.vaultedTransfer(msg.From, recipient, quantity, lockLengthMs, timestamp, msgId, allowUnsafeAddresses) + vaults.vaultedTransfer(msg.From, recipient, quantity, lockLengthMs, msg.Timestamp, msgId, allowUnsafeAddresses) if vault ~= nil then msg.ioEvent:addField("Vault-Id", msgId) @@ -737,16 +734,14 @@ end) addEventingHandler(ActionMap.ExtendVault, utils.hasMatchingTag("Action", ActionMap.ExtendVault), function(msg) local vaultId = msg.Tags["Vault-Id"] - local timestamp = msg.Timestamp local extendLengthMs = msg.Tags["Extend-Length"] assert(utils.isValidAddress(vaultId, true), "Invalid vault id") assert( extendLengthMs and extendLengthMs > 0 and utils.isInteger(extendLengthMs), "Invalid extension length. Must be integer greater than 0" ) - assert(timestamp, "Timestamp is required for a tick interaction") - local vault = vaults.extendVault(msg.From, extendLengthMs, timestamp, vaultId) + local vault = vaults.extendVault(msg.From, extendLengthMs, msg.Timestamp, vaultId) if vault ~= nil then msg.ioEvent:addField("Vault-Id", vaultId) @@ -795,14 +790,12 @@ addEventingHandler(ActionMap.BuyRecord, utils.hasMatchingTag("Action", ActionMap local purchaseType = msg.Tags["Purchase-Type"] and string.lower(msg.Tags["Purchase-Type"]) or "lease" local years = msg.Tags.Years or nil local processId = msg.Tags["Process-Id"] - local timestamp = msg.Timestamp local fundFrom = msg.Tags["Fund-From"] local allowUnsafeProcessId = msg.Tags["Allow-Unsafe-Addresses"] assert( type(purchaseType) == "string" and purchaseType == "lease" or purchaseType == "permabuy", "Invalid purchase type" ) - assert(timestamp, "Timestamp is required for a tick interaction") arns.assertValidArNSName(name) assert(utils.isValidAddress(processId, true), "Process Id must be a valid address.") if years then @@ -817,7 +810,7 @@ addEventingHandler(ActionMap.BuyRecord, utils.hasMatchingTag("Action", ActionMap purchaseType, years, msg.From, - timestamp, + msg.Timestamp, processId, msg.Id, fundFrom, @@ -868,12 +861,10 @@ end) addEventingHandler("upgradeName", utils.hasMatchingTag("Action", ActionMap.UpgradeName), function(msg) local fundFrom = msg.Tags["Fund-From"] local name = string.lower(msg.Tags.Name) - local timestamp = msg.Timestamp assert(type(name) == "string", "Invalid name") - assert(timestamp, "Timestamp is required") assertValidFundFrom(fundFrom) - local result = arns.upgradeRecord(msg.From, name, timestamp, msg.Id, fundFrom) + local result = arns.upgradeRecord(msg.From, name, msg.Timestamp, msg.Id, fundFrom) local record = {} if result ~= nil then @@ -901,15 +892,13 @@ addEventingHandler(ActionMap.ExtendLease, utils.hasMatchingTag("Action", ActionM local fundFrom = msg.Tags["Fund-From"] local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil local years = msg.Tags.Years - local timestamp = msg.Timestamp assert(type(name) == "string", "Invalid name") assert( years and years > 0 and years < 5 and utils.isInteger(years), "Invalid years. Must be integer between 1 and 5" ) - assert(timestamp, "Timestamp is required") assertValidFundFrom(fundFrom) - local result = arns.extendLease(msg.From, name, years, timestamp, msg.Id, fundFrom) + local result = arns.extendLease(msg.From, name, years, msg.Timestamp, msg.Id, fundFrom) local recordResult = {} if result ~= nil then addRecordResultFields(msg.ioEvent, result) @@ -931,16 +920,14 @@ addEventingHandler( local fundFrom = msg.Tags["Fund-From"] local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil local quantity = msg.Tags.Quantity - local timestamp = msg.Timestamp assert(type(name) == "string", "Invalid name") assert( quantity and quantity > 0 and quantity < 9990 and utils.isInteger(quantity), "Invalid quantity. Must be an integer value greater than 0 and less than 9990" ) - assert(timestamp, "Timestamp is required") assertValidFundFrom(fundFrom) - local result = arns.increaseundernameLimit(msg.From, name, quantity, timestamp, msg.Id, fundFrom) + local result = arns.increaseundernameLimit(msg.From, name, quantity, msg.Timestamp, msg.Id, fundFrom) local recordResult = {} if result ~= nil then recordResult = result.record @@ -977,12 +964,14 @@ function assertTokenCostTags(msg) arns.assertValidArNSName(msg.Tags.Name) -- if years is provided, assert it is a number and integer between 1 and 5 if msg.Tags.Years then - assert(utils.isInteger(msg.Tags.Years), "Invalid years. Must be integer between 1 and 5") + assert(utils.isInteger(msg.Tags.Years), "Invalid years. Must be integer") + assert(msg.Tags.Years > 0 and msg.Tags.Years < 6, "Invalid years. Must be between 1 and 5") end -- if quantity provided must be a number and integer greater than 0 if msg.Tags.Quantity then - assert(utils.isInteger(msg.Tags.Quantity), "Invalid quantity. Must be integer greater than 0") + assert(utils.isInteger(msg.Tags.Quantity), "Invalid quantity. Must be integer") + assert(msg.Tags.Quantity > 0, "Invalid quantity. Must be greater than 0") end end @@ -993,7 +982,6 @@ addEventingHandler(ActionMap.TokenCost, utils.hasMatchingTag("Action", ActionMap local years = msg.Tags.Years or nil local quantity = msg.Tags.Quantity or nil local purchaseType = msg.Tags["Purchase-Type"] or "lease" - local timestamp = msg.Timestamp or msg.Tags.Timestamp local intendedAction = { intent = intent, @@ -1001,7 +989,7 @@ addEventingHandler(ActionMap.TokenCost, utils.hasMatchingTag("Action", ActionMap years = years, quantity = quantity, purchaseType = purchaseType, - currentTimestamp = timestamp, + currentTimestamp = msg.Timestamp, from = msg.From, } @@ -1021,7 +1009,6 @@ addEventingHandler(ActionMap.CostDetails, utils.hasMatchingTag("Action", ActionM local years = msg.Tags.Years or 1 local quantity = msg.Tags.Quantity local purchaseType = msg.Tags["Purchase-Type"] or "lease" - local timestamp = msg.Timestamp or msg.Tags.Timestamp assertTokenCostTags(msg) assertValidFundFrom(fundFrom) @@ -1031,7 +1018,7 @@ addEventingHandler(ActionMap.CostDetails, utils.hasMatchingTag("Action", ActionM years, quantity, purchaseType, - timestamp, + msg.Timestamp, msg.From, fundFrom ) @@ -1084,14 +1071,14 @@ addEventingHandler(ActionMap.JoinNetwork, utils.hasMatchingTag("Action", ActionM local fromAddress = msg.From local observerAddress = msg.Tags["Observer-Address"] or fromAddress local stake = msg.Tags["Operator-Stake"] - local timestamp = msg.Timestamp assert(not msg.Tags.Services or updatedServices, "Services must be a valid JSON string") msg.ioEvent:addField("Resolved-Observer-Address", observerAddress) msg.ioEvent:addField("Sender-Previous-Balance", Balances[fromAddress] or 0) - local gateway = gar.joinNetwork(fromAddress, stake, updatedSettings, updatedServices, observerAddress, timestamp) + local gateway = + gar.joinNetwork(fromAddress, stake, updatedSettings, updatedServices, observerAddress, msg.Timestamp) msg.ioEvent:addField("Sender-New-Balance", Balances[fromAddress] or 0) if gateway ~= nil then msg.ioEvent:addField("GW-Start-Timestamp", gateway.startTimestamp) @@ -1112,7 +1099,6 @@ addEventingHandler(ActionMap.JoinNetwork, utils.hasMatchingTag("Action", ActionM end) addEventingHandler(ActionMap.LeaveNetwork, utils.hasMatchingTag("Action", ActionMap.LeaveNetwork), function(msg) - local timestamp = msg.Timestamp local unsafeGatewayBeforeLeaving = gar.getGatewayUnsafe(msg.From) local gwPrevTotalDelegatedStake = 0 local gwPrevStake = 0 @@ -1122,9 +1108,8 @@ addEventingHandler(ActionMap.LeaveNetwork, utils.hasMatchingTag("Action", Action end assert(unsafeGatewayBeforeLeaving, "Gateway not found") - assert(timestamp, "Timestamp is required") - local gateway = gar.leaveNetwork(msg.From, timestamp, msg.Id) + local gateway = gar.leaveNetwork(msg.From, msg.Timestamp, msg.Id) if gateway ~= nil then msg.ioEvent:addField("GW-Vaults-Count", utils.lengthOfTable(gateway.vaults or {})) @@ -1207,8 +1192,6 @@ addEventingHandler( function(msg) local quantity = msg.Tags.Quantity local instantWithdraw = msg.Tags.Instant and msg.Tags.Instant == "true" or false - local timestamp = msg.Timestamp - assert(timestamp, "Timestamp is required") assert( quantity and utils.isInteger(quantity) and quantity > constants.minimumWithdrawalAmount, "Invalid quantity. Must be integer greater than " .. constants.minimumWithdrawalAmount @@ -1220,7 +1203,7 @@ addEventingHandler( msg.ioEvent:addField("Sender-Previous-Balance", Balances[msg.From]) - local result = gar.decreaseOperatorStake(msg.From, quantity, timestamp, msg.Id, instantWithdraw) + local result = gar.decreaseOperatorStake(msg.From, quantity, msg.Timestamp, msg.Id, instantWithdraw) local decreaseOperatorStakeResult = { gateway = result and result.gateway or {}, penaltyRate = result and result.penaltyRate or 0, @@ -1277,7 +1260,6 @@ addEventingHandler( addEventingHandler(ActionMap.DelegateStake, utils.hasMatchingTag("Action", ActionMap.DelegateStake), function(msg) local gatewayTarget = msg.Tags.Target or msg.Tags.Address local quantity = msg.Tags.Quantity - local timestamp = msg.Timestamp assert(utils.isValidAddress(gatewayTarget, true), "Invalid gateway address") assert( msg.Tags.Quantity and msg.Tags.Quantity > 0 and utils.isInteger(msg.Tags.Quantity), @@ -1286,7 +1268,7 @@ addEventingHandler(ActionMap.DelegateStake, utils.hasMatchingTag("Action", Actio msg.ioEvent:addField("Target-Formatted", gatewayTarget) - local gateway = gar.delegateStake(msg.From, gatewayTarget, quantity, timestamp) + local gateway = gar.delegateStake(msg.From, gatewayTarget, quantity, msg.Timestamp) local delegateResult = {} if gateway ~= nil then local newStake = gateway.delegates[msg.From].delegatedStake @@ -1354,13 +1336,11 @@ addEventingHandler( function(msg) local target = msg.Tags.Target or msg.Tags.Address or msg.From -- if not provided, use sender local vaultId = msg.Tags["Vault-Id"] - local timestamp = msg.Timestamp msg.ioEvent:addField("Target-Formatted", target) assert(utils.isValidAddress(target, true), "Invalid gateway address") assert(utils.isValidAddress(vaultId, true), "Invalid vault id") - assert(timestamp, "Timestamp is required") - local result = gar.instantGatewayWithdrawal(msg.From, target, vaultId, timestamp) + local result = gar.instantGatewayWithdrawal(msg.From, target, vaultId, msg.Timestamp) if result ~= nil then local vaultBalance = result.vaultBalance msg.ioEvent:addField("Stake-Amount-Withdrawn", vaultBalance) @@ -1396,7 +1376,6 @@ addEventingHandler( local target = msg.Tags.Target or msg.Tags.Address local quantity = msg.Tags.Quantity local instantWithdraw = msg.Tags.Instant and msg.Tags.Instant == "true" or false - local timestamp = msg.Timestamp msg.ioEvent:addField("Target-Formatted", target) msg.ioEvent:addField("Quantity", quantity) assert( @@ -1404,7 +1383,7 @@ addEventingHandler( "Invalid quantity. Must be integer greater than " .. constants.minimumWithdrawalAmount ) - local result = gar.decreaseDelegateStake(target, msg.From, quantity, timestamp, msg.Id, instantWithdraw) + local result = gar.decreaseDelegateStake(target, msg.From, quantity, msg.Timestamp, msg.Id, instantWithdraw) local decreaseDelegateStakeResult = { gateway = result and result.gateway or {}, penaltyRate = result and result.penaltyRate or 0, @@ -1513,9 +1492,14 @@ addEventingHandler( -- TODO: we could standardize this on our prepended handler to inject and ensure formatted addresses and converted values local observerAddress = msg.Tags["Observer-Address"] or unsafeGateway.observerAddress - local timestamp = msg.Timestamp - local result = - gar.updateGatewaySettings(msg.From, updatedSettings, updatedServices, observerAddress, timestamp, msg.Id) + local result = gar.updateGatewaySettings( + msg.From, + updatedSettings, + updatedServices, + observerAddress, + msg.Timestamp, + msg.Id + ) Send(msg, { Target = msg.From, Tags = { Action = ActionMap.UpdateGatewaySettings .. "-Notice" }, @@ -1528,16 +1512,14 @@ addEventingHandler(ActionMap.ReassignName, utils.hasMatchingTag("Action", Action local newProcessId = msg.Tags["Process-Id"] local name = string.lower(msg.Tags.Name) local initiator = msg.Tags.Initiator - local timestamp = msg.Timestamp local allowUnsafeProcessId = msg.Tags["Allow-Unsafe-Addresses"] assert(name and #name > 0, "Name is required") assert(utils.isValidAddress(newProcessId, true), "Process Id must be a valid address.") - assert(timestamp, "Timestamp is required") if initiator ~= nil then assert(utils.isValidAddress(initiator, true), "Invalid initiator address.") end - local reassignment = arns.reassignName(name, msg.From, timestamp, newProcessId, allowUnsafeProcessId) + local reassignment = arns.reassignName(name, msg.From, msg.Timestamp, newProcessId, allowUnsafeProcessId) Send(msg, { Target = msg.From, @@ -1560,13 +1542,12 @@ end) addEventingHandler(ActionMap.SaveObservations, utils.hasMatchingTag("Action", ActionMap.SaveObservations), function(msg) local reportTxId = msg.Tags["Report-Tx-Id"] local failedGateways = utils.splitAndTrimString(msg.Tags["Failed-Gateways"], ",") - local timestamp = msg.Timestamp assert(utils.isValidArweaveAddress(reportTxId), "Invalid report tx id. Must be a valid Arweave address.") for _, gateway in ipairs(failedGateways) do assert(utils.isValidAddress(gateway, true), "Invalid failed gateway address: " .. gateway) end - local observations = epochs.saveObservations(msg.From, reportTxId, failedGateways, timestamp) + local observations = epochs.saveObservations(msg.From, reportTxId, failedGateways, msg.Timestamp) if observations ~= nil then local failureSummariesCount = utils.lengthOfTable(observations.failureSummaries or {}) if failureSummariesCount > 0 then @@ -1669,12 +1650,11 @@ end) -- distribute rewards -- NOTE: THIS IS A CRITICAL HANDLER AND WILL DISCARD THE MEMORY ON ERROR addEventingHandler("distribute", utils.hasMatchingTag("Action", "Tick"), function(msg) - local msgTimestamp = msg.Timestamp local msgId = msg.Id local blockHeight = tonumber(msg["Block-Height"]) local hashchain = msg["Hash-Chain"] local lastTickedEpochIndex = LastTickedEpochIndex - local targetCurrentEpochIndex = epochs.getEpochIndexForTimestamp(msgTimestamp) + local targetCurrentEpochIndex = epochs.getEpochIndexForTimestamp(msg.Timestamp) assert(blockHeight, "Block height is required") assert(hashchain, "Hash chain is required") @@ -1708,7 +1688,7 @@ addEventingHandler("distribute", utils.hasMatchingTag("Action", "Tick"), functio local _, _, epochDistributionTimestamp = epochs.getEpochTimestampsForIndex(i) -- use the minimum of the msg timestamp or the epoch distribution timestamp, this ensures an epoch gets created for the genesis block -- and that we don't try and distribute before an epoch is created - local tickTimestamp = math.min(msgTimestamp or 0, epochDistributionTimestamp) + local tickTimestamp = math.min(msg.Timestamp, epochDistributionTimestamp) -- TODO: if we need to "recover" epochs, we can't rely on just the current message hashchain and block height, -- we should set the prescribed observers and names to empty arrays and distribute rewards accordingly local tickResult = tick.tickEpoch(tickTimestamp, blockHeight, hashchain, msgId) @@ -1901,15 +1881,21 @@ addEventingHandler(ActionMap.Record, utils.hasMatchingTag("Action", ActionMap.Re Send(msg, recordNotice) end) +-- TODO: this handler will not scale well as gateways and delegates increase, we should slice out the larger pieces (e.g. distributions should be fetched via a paginated handler) addEventingHandler(ActionMap.Epoch, utils.hasMatchingTag("Action", ActionMap.Epoch), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local epoch = epochs.getEpoch(epochIndex) + -- TODO: this check can be removed after 14 days of release once old epochs are pruned + if + not epoch.prescribedObservers + or not epoch.prescribedObservers[1] + or not epoch.prescribedObservers[1].gatewayAddress + then + epoch.prescribedObservers = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + end + -- populate the prescribed observers with weights Send(msg, { Target = msg.From, Action = "Epoch-Notice", Data = json.encode(epoch) }) end) @@ -1938,29 +1924,20 @@ addEventingHandler( ActionMap.PrescribedObservers, utils.hasMatchingTag("Action", ActionMap.PrescribedObservers), function(msg) - -- check if the epoch number is provided, if not get the epoch number from the timestamp - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) - local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local prescribedObserversWithWeights = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) Send(msg, { Target = msg.From, Action = "Prescribed-Observers-Notice", - Data = json.encode(prescribedObservers), + Data = json.encode(prescribedObserversWithWeights), }) end ) addEventingHandler(ActionMap.Observations, utils.hasMatchingTag("Action", ActionMap.Observations), function(msg) - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local observations = epochs.getObservationsForEpoch(epochIndex) Send(msg, { Target = msg.From, @@ -1972,12 +1949,8 @@ end) addEventingHandler(ActionMap.PrescribedNames, utils.hasMatchingTag("Action", ActionMap.PrescribedNames), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local prescribedNames = epochs.getPrescribedNamesForEpoch(epochIndex) Send(msg, { Target = msg.From, @@ -1988,12 +1961,8 @@ end) addEventingHandler(ActionMap.Distributions, utils.hasMatchingTag("Action", ActionMap.Distributions), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local distributions = epochs.getDistributionsForEpoch(epochIndex) Send(msg, { Target = msg.From, @@ -2101,7 +2070,6 @@ addEventingHandler("releaseName", utils.hasMatchingTag("Action", ActionMap.Relea local name = msg.Tags.Name and string.lower(msg.Tags.Name) local processId = msg.From local initiator = msg.Tags.Initiator or msg.From - local timestamp = msg.Timestamp assert(name and #name > 0, "Name is required") -- this could be an undername, so we don't want to assertValidArNSName assert(processId and utils.isValidAddress(processId, true), "Process-Id must be a valid address") @@ -2115,11 +2083,11 @@ addEventingHandler("releaseName", utils.hasMatchingTag("Action", ActionMap.Relea #primaryNames.getPrimaryNamesForBaseName(name) == 0, "Primary names are associated with this name. They must be removed before releasing the name." ) - assert(timestamp, "Timestamp is required") + -- we should be able to create the returned name here local removedRecord = arns.removeRecord(name) local removedPrimaryNamesAndOwners = primaryNames.removePrimaryNamesForBaseName(name) -- NOTE: this should be empty if there are no primary names allowed before release - local returnedName = arns.createReturnedName(name, timestamp, initiator) + local returnedName = arns.createReturnedName(name, msg.Timestamp, initiator) local returnedNameData = { removedRecord = removedRecord, removedPrimaryNamesAndOwners = removedPrimaryNamesAndOwners, @@ -2236,12 +2204,11 @@ addEventingHandler("allowDelegates", utils.hasMatchingTag("Action", ActionMap.Al end) addEventingHandler("disallowDelegates", utils.hasMatchingTag("Action", ActionMap.DisallowDelegates), function(msg) - local timestamp = msg.Timestamp local disallowedDelegates = msg.Tags["Disallowed-Delegates"] and utils.splitAndTrimString(msg.Tags["Disallowed-Delegates"], ",") assert(disallowedDelegates and #disallowedDelegates > 0, "Disallowed-Delegates is required") msg.ioEvent:addField("Input-Disallowed-Delegates-Count", utils.lengthOfTable(disallowedDelegates)) - local result = gar.disallowDelegates(disallowedDelegates, msg.From, msg.Id, timestamp) + local result = gar.disallowDelegates(disallowedDelegates, msg.From, msg.Id, msg.Timestamp) if result ~= nil then msg.ioEvent:addField("New-Disallowed-Delegates", result.removedDelegates or {}) msg.ioEvent:addField("New-Disallowed-Delegates-Count", utils.lengthOfTable(result.removedDelegates)) @@ -2279,12 +2246,10 @@ addEventingHandler(ActionMap.RedelegateStake, utils.hasMatchingTag("Action", Act local delegateAddress = msg.From local quantity = msg.Tags.Quantity or nil local vaultId = msg.Tags["Vault-Id"] - local timestamp = msg.Timestamp assert(utils.isValidAddress(sourceAddress, true), "Invalid source gateway address") assert(utils.isValidAddress(targetAddress, true), "Invalid target gateway address") assert(utils.isValidAddress(delegateAddress, true), "Invalid delegator address") - assert(timestamp, "Timestamp is required") if vaultId then assert(utils.isValidAddress(vaultId, true), "Invalid vault id") end @@ -2295,7 +2260,7 @@ addEventingHandler(ActionMap.RedelegateStake, utils.hasMatchingTag("Action", Act targetAddress = targetAddress, delegateAddress = delegateAddress, qty = quantity, - currentTimestamp = timestamp, + currentTimestamp = msg.Timestamp, vaultId = vaultId, }) @@ -2392,13 +2357,11 @@ addEventingHandler("requestPrimaryName", utils.hasMatchingTag("Action", ActionMa local fundFrom = msg.Tags["Fund-From"] local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil local initiator = msg.From - local timestamp = msg.Timestamp assert(name, "Name is required") assert(initiator, "Initiator is required") - assert(timestamp, "Timestamp is required") assertValidFundFrom(fundFrom) - local primaryNameResult = primaryNames.createPrimaryNameRequest(name, initiator, timestamp, msg.Id, fundFrom) + local primaryNameResult = primaryNames.createPrimaryNameRequest(name, initiator, msg.Timestamp, msg.Id, fundFrom) addPrimaryNameRequestData(msg.ioEvent, primaryNameResult) @@ -2433,13 +2396,13 @@ addEventingHandler( function(msg) local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil local recipient = msg.Tags.Recipient or msg.From - local timestamp = msg.Timestamp + assert(name, "Name is required") assert(recipient, "Recipient is required") assert(msg.From, "From is required") - assert(timestamp, "Timestamp is required") - local approvedPrimaryNameResult = primaryNames.approvePrimaryNameRequest(recipient, name, msg.From, timestamp) + local approvedPrimaryNameResult = + primaryNames.approvePrimaryNameRequest(recipient, name, msg.From, msg.Timestamp) addPrimaryNameRequestData(msg.ioEvent, approvedPrimaryNameResult) --- send a notice to the from diff --git a/src/primary_names.lua b/src/primary_names.lua index 103ffdbb..f2487372 100644 --- a/src/primary_names.lua +++ b/src/primary_names.lua @@ -81,6 +81,7 @@ function primaryNames.createPrimaryNameRequest(name, initiator, timestamp, msgId local record = arns.getRecord(baseName) assert(record, "ArNS record '" .. baseName .. "' does not exist") + assert(arns.recordIsActive(record, timestamp), "ArNS record '" .. baseName .. "' is not active") local requestCost = arns.getTokenCost({ intent = "Primary-Name-Request", diff --git a/src/utils.lua b/src/utils.lua index ece2fe08..6faeb564 100644 --- a/src/utils.lua +++ b/src/utils.lua @@ -12,6 +12,9 @@ end --- @param value any The value to check --- @return boolean isInteger - whether the value is an integer function utils.isInteger(value) + if value == nil then + return false + end if type(value) == "string" then value = tonumber(value) end @@ -253,11 +256,18 @@ function utils.isValidArweaveAddress(address) return type(address) == "string" and #address == 43 and string.match(address, "^[%w-_]+$") ~= nil end ---- Checks if an address is a valid Ethereum address +--- Checks if an address looks like an unformatted Ethereum address +--- @param address string The address to check +--- @return boolean isValidUnformattedEthAddress - whether the address is a valid unformatted Ethereum address +function utils.isValidUnformattedEthAddress(address) + return type(address) == "string" and #address == 42 and string.match(address, "^0x[%x]+$") ~= nil +end + +--- Checks if an address is a valid Ethereum address and is in EIP-55 checksum format --- @param address string The address to check --- @return boolean isValidEthAddress - whether the address is a valid Ethereum address function utils.isValidEthAddress(address) - return type(address) == "string" and #address == 42 and string.match(address, "^0x[%x]+$") ~= nil + return utils.isValidUnformattedEthAddress(address) and address == utils.formatEIP55Address(address) end function utils.isValidUnsafeAddress(address) @@ -315,7 +325,7 @@ end --- @param address string The address to format --- @return string formattedAddress - the EIP-55 checksum formatted address function utils.formatAddress(address) - if utils.isValidEthAddress(address) then + if utils.isValidUnformattedEthAddress(address) then return utils.formatEIP55Address(address) end return address @@ -608,6 +618,82 @@ function utils.filterDictionary(tbl, predicate) return filtered end +--- Sanitizes inputs to ensure they are valid strings +--- @param table table The table to sanitize +--- @return table sanitizedTable - the sanitized table +function utils.validateAndSanitizeInputs(table) + assert(type(table) == "table", "Table must be a table") + local sanitizedTable = {} + for key, value in pairs(table) do + assert(type(key) == "string", "Key must be a string") + assert( + type(value) == "string" or type(value) == "number" or type(value) == "boolean", + "Value must be a string, integer, or boolean" + ) + if type(value) == "string" then + assert(#key > 0, "Key cannot be empty") + assert(#value > 0, "Value cannot be empty") + assert(not string.match(key, "^%s+$"), "Key cannot be only whitespace") + assert(not string.match(value, "^%s+$"), "Value cannot be only whitespace") + end + if type(value) == "boolean" then + assert(value == true or value == false, "Boolean value must be true or false") + end + if type(value) == "number" then + assert(utils.isInteger(value), "Number must be an integer") + end + sanitizedTable[key] = value + end + + local knownAddressTags = { + "Recipient", + "Initiator", + "Target", + "Source", + "Address", + "Vault-Id", + "Process-Id", + "Observer-Address", + } + + for _, tagName in ipairs(knownAddressTags) do + -- Format all incoming addresses + sanitizedTable[tagName] = sanitizedTable[tagName] and utils.formatAddress(sanitizedTable[tagName]) or nil + end + + local knownNumberTags = { + "Quantity", + "Lock-Length", + "Operator-Stake", + "Delegated-Stake", + "Withdraw-Stake", + "Timestamp", + "Years", + "Min-Delegated-Stake", + "Port", + "Extend-Length", + "Delegate-Reward-Share-Ratio", + "Epoch-Index", + "Price-Interval-Ms", + "Block-Height", + } + for _, tagName in ipairs(knownNumberTags) do + -- Format all incoming numbers + sanitizedTable[tagName] = sanitizedTable[tagName] and tonumber(sanitizedTable[tagName]) or nil + end + + local knownBooleanTags = { + "Allow-Unsafe-Addresses", + "Force-Prune", + } + for _, tagName in ipairs(knownBooleanTags) do + sanitizedTable[tagName] = sanitizedTable[tagName] + and utils.booleanOrBooleanStringToBoolean(sanitizedTable[tagName]) + or nil + end + return sanitizedTable +end + --- @param value string|boolean --- @return boolean function utils.booleanOrBooleanStringToBoolean(value) diff --git a/src/vaults.lua b/src/vaults.lua index 55380ae9..0df54bcd 100644 --- a/src/vaults.lua +++ b/src/vaults.lua @@ -57,6 +57,7 @@ end function vaults.vaultedTransfer(from, recipient, qty, lockLengthMs, currentTimestamp, vaultId, allowUnsafeAddresses) assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") assert(qty > 0, "Quantity must be greater than 0") + assert(recipient ~= from, "Cannot transfer to self") assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") assert(not vaults.getVault(recipient, vaultId), "Vault with id " .. vaultId .. " already exists") assert( diff --git a/tests/arns.test.mjs b/tests/arns.test.mjs index a3f407fd..60614e7a 100644 --- a/tests/arns.test.mjs +++ b/tests/arns.test.mjs @@ -40,7 +40,7 @@ describe('ArNS', async () => { afterEach(async () => { await assertNoInvariants({ - timestamp: STUB_TIMESTAMP, + timestamp: 1719988800001, // after latest known timestamp from a test memory: sharedMemory, }); }); @@ -74,6 +74,7 @@ describe('ArNS', async () => { { name: 'Process-Id', value: processId }, { name: 'Years', value: '1' }, ], + Timestamp: STUB_TIMESTAMP, }, memory, }); @@ -159,11 +160,11 @@ describe('ArNS', async () => { describe('Buy-Record', () => { it('should buy a record with an Arweave address', async () => { - await runBuyRecord({ sender: STUB_ADDRESS }); + sharedMemory = (await runBuyRecord({ sender: STUB_ADDRESS })).memory; }); it('should buy a record with an Ethereum address', async () => { - await runBuyRecord({ sender: testEthAddress }); + sharedMemory = await runBuyRecord({ sender: testEthAddress }); }); it('should fail to buy a permanently registered record', async () => { @@ -222,6 +223,7 @@ describe('ArNS', async () => { 'Name is already registered', ); assert(alreadyRegistered); + sharedMemory = failedBuyRecordResult.Memory; }); it('should buy a record and default the name to lower case', async () => { @@ -260,6 +262,7 @@ describe('ArNS', async () => { type: 'lease', undernameLimit: 10, }); + sharedMemory = realRecord.Memory; }); }); @@ -325,9 +328,10 @@ describe('ArNS', async () => { }); const record = JSON.parse(result.Messages[0].Data); assert.equal(record.undernameLimit, 11); + return increaseUndernameResult.Memory; }; await assertIncreaseUndername(STUB_ADDRESS); - await assertIncreaseUndername(testEthAddress); + sharedMemory = await assertIncreaseUndername(testEthAddress); }); it('should increase the undernames by spending from stakes', async () => { @@ -359,7 +363,6 @@ describe('ArNS', async () => { { name: 'Quantity', value: `${650000000}` }, // delegate all of their balance { name: 'Address', value: STUB_OPERATOR_ADDRESS }, // our gateway address ], - Timestamp: STUB_TIMESTAMP + 1, }, memory, }); @@ -413,9 +416,10 @@ describe('ArNS', async () => { }); const record = JSON.parse(result.Messages[0].Data); assert.equal(record.undernameLimit, 11); + return increaseUndernameResult.Memory; }; await assertIncreaseUndername(STUB_ADDRESS); - await assertIncreaseUndername(testEthAddress); + sharedMemory = await assertIncreaseUndername(testEthAddress); }); }); @@ -436,6 +440,7 @@ describe('ArNS', async () => { assert(priceList[key].permabuy); assert(Object.keys(priceList[key].lease).length == 5); }); + sharedMemory = priceListResult.Memory; }); }); @@ -478,6 +483,7 @@ describe('ArNS', async () => { stakes: [], }, }); + sharedMemory = result.Memory; }); it('should return the correct cost of increasing an undername limit', async () => { @@ -514,6 +520,7 @@ describe('ArNS', async () => { const tokenCost = JSON.parse(result.Messages[0].Data); const expectedPrice = 500000000 * 0.001 * 1 * 1; assert.equal(tokenCost, expectedPrice); + sharedMemory = result.Memory; }); it('should return the correct cost of extending an existing leased record', async () => { @@ -549,6 +556,7 @@ describe('ArNS', async () => { }); const tokenCost = JSON.parse(result.Messages[0].Data); assert.equal(tokenCost, 200000000); // known cost for extending a 9 character name by 2 years (500 ARIO * 0.2 * 2) + sharedMemory = result.Memory; }); it('should get the cost of upgrading an existing leased record to permanently owned', async () => { @@ -584,6 +592,7 @@ describe('ArNS', async () => { const tokenCost = JSON.parse(upgradeNameResult.Messages[0].Data); assert.equal(tokenCost, basePermabuyPrice); + sharedMemory = upgradeNameResult.Memory; }); it('should return the correct cost of creating a primary name request', async () => { @@ -628,6 +637,7 @@ describe('ArNS', async () => { }); const undernameTokenCost = JSON.parse(undernameResult.Messages[0].Data); assert.equal(undernameTokenCost, tokenCost); + sharedMemory = undernameResult.Memory; }); }); @@ -679,6 +689,7 @@ describe('ArNS', async () => { record.endTimestamp, recordBefore.endTimestamp + 60 * 1000 * 60 * 24 * 365, ); + sharedMemory = recordResult.Memory; }); it('should properly handle extending a leased record paying with balance and stakes', async () => { @@ -687,6 +698,7 @@ describe('ArNS', async () => { memory, transferQty: 700000000, // 600000000 for name purchase + 100000000 for extending the lease stakeQty: 650000000, // delegate most of their balance so that name purchase uses balance and stakes + timestamp: STUB_TIMESTAMP, }); memory = stakeResult.memory; @@ -750,6 +762,7 @@ describe('ArNS', async () => { recordBefore.endTimestamp + 60 * 1000 * 60 * 24 * 365, record.endTimestamp, ); + sharedMemory = recordResult.Memory; }); }); @@ -812,6 +825,7 @@ describe('ArNS', async () => { undernameLimit: 10, purchasePrice: basePermabuyPrice, // expected price for a permanent 9 character name }); + sharedMemory = upgradeNameResult.Memory; }); it('should properly handle upgrading a name paying with balance and stakes', async () => { @@ -821,6 +835,7 @@ describe('ArNS', async () => { transferQty: 3_100_000_000, // 60,000,0000 for name purchase + 2,500,000,000 for upgrading the name stakeQty: 3_100_000_000 - 50_000_000, // delegate most of their balance so that name purchase uses balance and stakes stakerAddress: STUB_ADDRESS, + timestamp: STUB_TIMESTAMP, }); memory = stakeResult.memory; @@ -887,6 +902,7 @@ describe('ArNS', async () => { purchasePrice: 2500000000, // expected price for a permanent 9 character name }, ); + sharedMemory = upgradeNameResult.Memory; }); }); @@ -1055,6 +1071,7 @@ describe('ArNS', async () => { const balancesResult = await handle({ options: { Tags: [{ name: 'Action', value: 'Balances' }], + Timestamp: newBuyTimestamp, }, memory: newBuyResult.Memory, }); @@ -1064,9 +1081,11 @@ describe('ArNS', async () => { initialRecord.purchasePrice + expectedRewardForProtocol; const balances = JSON.parse(balancesResult.Messages[0].Data); + assert.equal(balances[initiator], expectedRewardForInitiator); assert.equal(balances[PROCESS_ID], expectedProtocolBalance); assert.equal(balances[newBuyerAddress], 0); + sharedMemory = balancesResult.Memory; }); const runReturnedNameTest = async ({ fundFrom }) => { @@ -1075,6 +1094,7 @@ describe('ArNS', async () => { processId: ''.padEnd(43, 'a'), type: 'lease', years: 1, + timestamp: STUB_TIMESTAMP, memory: sharedMemory, }); @@ -1148,6 +1168,7 @@ describe('ArNS', async () => { recipient: bidderAddress, quantity: expectedPurchasePrice, memory: tickResult.Memory, + timestamp: bidTimestamp, }); let memoryToUse = transferMemory; @@ -1158,6 +1179,7 @@ describe('ArNS', async () => { transferQty: 0, stakeQty: expectedPurchasePrice, stakerAddress: bidderAddress, + timestamp: bidTimestamp, }); memoryToUse = stakeResult.memory; } @@ -1291,6 +1313,7 @@ describe('ArNS', async () => { const balancesResult = await handle({ options: { Tags: [{ name: 'Action', value: 'Balances' }], + Timestamp: bidTimestamp, }, memory: buyReturnedNameResult.Memory, }); @@ -1302,14 +1325,15 @@ describe('ArNS', async () => { const balances = JSON.parse(balancesResult.Messages[0].Data); assert.equal(balances[PROCESS_ID], expectedProtocolBalance); assert.equal(balances[bidderAddress], 0); + return balancesResult.Memory; }; it('should create a lease expiration initiated returned name and accept buy records for it', async () => { - await runReturnedNameTest({}); + sharedMemory = await runReturnedNameTest({}); }); it('should create a lease expiration initiated returned name and accept a buy record funded by stakes', async () => { - await runReturnedNameTest({ fundFrom: 'stakes' }); + sharedMemory = await runReturnedNameTest({ fundFrom: 'stakes' }); }); }); @@ -1429,6 +1453,7 @@ describe('ArNS', async () => { ); const expectedFloorPrice = baseLeasePriceFor9CharNameFor1Year; assert.equal(tokenCostForReturnedNameAfterThePeriod, expectedFloorPrice); + sharedMemory = tokenCostResultForReturnedNameAfterThePeriod.Memory; }); }); @@ -1461,6 +1486,7 @@ describe('ArNS', async () => { ); assert.equal(releaseNameErrorTag, undefined); assert.equal(reassignNameResult.Messages?.[0]?.Target, processId); + sharedMemory = reassignNameResult.Memory; }); it('should reassign an arns name to a new process id with initiator', async () => { @@ -1493,6 +1519,7 @@ describe('ArNS', async () => { assert.equal(releaseNameErrorTag, undefined); assert.equal(reassignNameResult.Messages?.[0]?.Target, processId); assert.equal(reassignNameResult.Messages?.[1]?.Target, STUB_MESSAGE_ID); // Check for the message sent to the initiator + sharedMemory = reassignNameResult.Memory; }); it('should not reassign an arns name with invalid ownership', async () => { @@ -1523,6 +1550,7 @@ describe('ArNS', async () => { (tag) => tag.name === 'Error', ); assert.ok(releaseNameErrorTag, 'Error tag should be present'); + sharedMemory = reassignNameResult.Memory; }); it('should not reassign an arns name with invalid new process id', async () => { @@ -1553,12 +1581,14 @@ describe('ArNS', async () => { (tag) => tag.name === 'Error', ); assert.ok(releaseNameErrorTag, 'Error tag should be present'); + sharedMemory = reassignNameResult.Memory; }); }); describe('Paginated-Records', () => { it('should paginate records correctly', async () => { // buy 3 records + let lastTimestamp = STUB_TIMESTAMP; let buyRecordsMemory = sharedMemory; // updated after each purchase const recordsCount = 3; for (let i = 0; i < recordsCount; i++) { @@ -1569,11 +1599,12 @@ describe('ArNS', async () => { { name: 'Name', value: `test-name-${i}` }, { name: 'Process-Id', value: ''.padEnd(43, `${i}`) }, ], - Timestamp: STUB_TIMESTAMP + i * 1000, // order of names is based on timestamp + Timestamp: lastTimestamp + i * 1000, // order of names is based on timestamp }, memory: buyRecordsMemory, }); buyRecordsMemory = buyRecordsResult.Memory; + lastTimestamp = lastTimestamp + i * 1000; } // call the paginated records handler repeatedly until all records are fetched @@ -1587,6 +1618,7 @@ describe('ArNS', async () => { { name: 'Cursor', value: cursor }, { name: 'Limit', value: 1 }, ], + Timestamp: lastTimestamp, }, memory: buyRecordsMemory, }); @@ -1625,6 +1657,7 @@ describe('ArNS', async () => { paginatedRecords.map((record) => record.name), expectedNames, ); + sharedMemory = buyRecordsMemory; }); }); @@ -1661,6 +1694,7 @@ describe('ArNS', async () => { { name: 'Action', value: 'Gateway' }, { name: 'Address', value: joinedGateway }, ], + Timestamp: afterDistributionTimestamp, }, memory: firstTickAndDistribution.Memory, }); @@ -1680,7 +1714,7 @@ describe('ArNS', async () => { const transferMemory = await transfer({ recipient: nonEligibleAddress, quantity: 200_000_000_000, - timestamp: afterDistributionTimestamp - 1, + timestamp: afterDistributionTimestamp, memory: firstTickAndDistribution.Memory, }); arnsDiscountMemory = transferMemory; @@ -1713,6 +1747,7 @@ describe('ArNS', async () => { name: 'ArNS Discount', }, ]); + sharedMemory = result.Memory; }); it('should return the correct cost for a buy record by a non-eligible gateway', async () => { @@ -1725,6 +1760,7 @@ describe('ArNS', async () => { { name: 'Intent', value: 'Buy-Record' }, { name: 'Name', value: 'test-name' }, ], + Timestamp: afterDistributionTimestamp, }, memory: arnsDiscountMemory, }); @@ -1732,6 +1768,7 @@ describe('ArNS', async () => { const costDetails = JSON.parse(result.Messages[0].Data); assert.equal(costDetails.tokenCost, baseLeasePriceFor9CharNameFor1Year); assert.deepEqual(costDetails.discounts, []); + sharedMemory = result.Memory; }); describe('for an existing record', () => { @@ -1810,6 +1847,7 @@ describe('ArNS', async () => { name: 'ArNS Discount', }, ]); + sharedMemory = result.Memory; }); it('should not apply the discount to extending the lease for a non-eligible gateway', async () => { @@ -1825,6 +1863,7 @@ describe('ArNS', async () => { const { tokenCost, discounts } = JSON.parse(result.Messages[0].Data); assert.equal(tokenCost, baseLeaseOneYearExtensionPrice); assert.deepEqual(discounts, []); + sharedMemory = result.Memory; }); it('balances should be updated when the extend lease action is performed', async () => { @@ -1878,6 +1917,7 @@ describe('ArNS', async () => { nonEligibleGatewayBalanceBefore - baseLeaseOneYearExtensionPrice, nonEligibleBalanceAfter, ); + sharedMemory = nonEligibleGatewayResult.Memory; }); describe('upgrading the lease to a permabuy', () => { @@ -1910,6 +1950,7 @@ describe('ArNS', async () => { name: 'ArNS Discount', }, ]); + sharedMemory = result.Memory; }); it('should not apply the discount to increasing the undername limit for a non-eligible gateway', async () => { @@ -1918,6 +1959,7 @@ describe('ArNS', async () => { From: nonEligibleAddress, Owner: nonEligibleAddress, Tags: upgradeToPermabuyTags, + Timestamp: upgradeToPermabuyTimestamp, }, memory: buyRecordResult.Memory, }); @@ -1926,6 +1968,7 @@ describe('ArNS', async () => { ); assert.equal(tokenCost, basePermabuyPrice); assert.deepEqual(discounts, []); + sharedMemory = result.Memory; }); }); @@ -1961,6 +2004,7 @@ describe('ArNS', async () => { name: 'ArNS Discount', }, ]); + sharedMemory = result.Memory; }); it('should not apply the discount to increasing the undername limit for a non-eligible gateway', async () => { @@ -1979,6 +2023,7 @@ describe('ArNS', async () => { ); assert.equal(tokenCost, undernameCostsForOneYear); assert.deepEqual(discounts, []); + sharedMemory = result.Memory; }); }); }); @@ -2000,6 +2045,7 @@ describe('ArNS', async () => { assert.equal(sortBy, 'name'); assert.equal(sortOrder, 'desc'); assert.equal(totalItems, 0); + sharedMemory = result.Memory; }); }); }); diff --git a/tests/epochs.test.mjs b/tests/epochs.test.mjs new file mode 100644 index 00000000..5a01ade0 --- /dev/null +++ b/tests/epochs.test.mjs @@ -0,0 +1,151 @@ +import { + buyRecord, + getEpoch, + joinNetwork, + getPrescribedObservers, + getPrescribedNames, + tick, + startMemory, + totalTokenSupply, + getEpochSettings, +} from './helpers.mjs'; +import { describe, it, before } from 'node:test'; +import assert from 'node:assert'; +import { + INITIAL_OPERATOR_STAKE, + STUB_ADDRESS, + STUB_OPERATOR_ADDRESS, + STUB_TIMESTAMP, +} from '../tools/constants.mjs'; + +const firstEpochStartTimestamp = 1719900000000; +const epochLength = 1000 * 60 * 60 * 24; // 24 hours +const distributionDelay = 1000 * 60 * 40; // 40 minutes + +describe('epochs', () => { + let sharedMemory; + + before(async () => { + const { Memory: totalTokenSupplyMemory } = await totalTokenSupply({ + memory: startMemory, + }); + // have a gateway join, and add an arns name which will be used to prescribe names and observers + const { memory: gatewayJoinMemory } = await joinNetwork({ + memory: totalTokenSupplyMemory, + address: STUB_OPERATOR_ADDRESS, + }); + const { memory: buyRecordMemory } = await buyRecord({ + memory: gatewayJoinMemory, + name: 'prescribed-name', + type: 'permabuy', + from: STUB_OPERATOR_ADDRESS, + }); + const { memory: tickMemory } = await tick({ + memory: buyRecordMemory, + timestamp: firstEpochStartTimestamp, + }); + sharedMemory = tickMemory; + }); + + describe('Epoch', () => { + it('should return the current epoch', async () => { + const epoch = await getEpoch({ + memory: sharedMemory, + timestamp: firstEpochStartTimestamp, + }); + assert.deepStrictEqual(epoch, { + epochIndex: 0, + startTimestamp: firstEpochStartTimestamp, + endTimestamp: firstEpochStartTimestamp + epochLength, + startHeight: 1, + distributionTimestamp: + firstEpochStartTimestamp + epochLength + distributionDelay, + prescribedObservers: [ + { + observerAddress: STUB_ADDRESS, + gatewayAddress: STUB_OPERATOR_ADDRESS, + stakeWeight: 1, + gatewayRewardRatioWeight: 1, + observerRewardRatioWeight: 1, + compositeWeight: 4, + normalizedCompositeWeight: 1, + tenureWeight: 4, + stake: INITIAL_OPERATOR_STAKE, + startTimestamp: STUB_TIMESTAMP, + }, + ], + prescribedNames: ['prescribed-name'], + observations: { + failureSummaries: [], + reports: [], + }, + distributions: { + totalEligibleGatewayReward: 22500900000, + totalEligibleGateways: 1, + totalEligibleObserverReward: 2500100000, + totalEligibleRewards: 25001000000, + rewards: { + eligible: { + [STUB_OPERATOR_ADDRESS]: { + delegateRewards: [], + operatorReward: 25001000000, // 0.001 of the protocol balance after the transfers and name purchase + }, + }, + }, + }, + }); + + // TODO (PE-7321): add a test for an empty epoch before names and gateways have been prescribed + }); + }); + + describe('Prescribed Observers', () => { + it('should return the correct epoch for the current epoch with weights', async () => { + const prescribedObservers = await getPrescribedObservers({ + memory: sharedMemory, + timestamp: firstEpochStartTimestamp, + }); + assert.deepStrictEqual(prescribedObservers, [ + { + compositeWeight: 4, + gatewayAddress: STUB_OPERATOR_ADDRESS, + gatewayRewardRatioWeight: 1, + normalizedCompositeWeight: 1, + observerAddress: STUB_ADDRESS, + observerRewardRatioWeight: 1, + stakeWeight: 1, + stake: INITIAL_OPERATOR_STAKE, + startTimestamp: STUB_TIMESTAMP, + tenureWeight: 4, + }, + ]); + }); + }); + + describe('Prescribed Names', () => { + it('should return the correct epoch for the first epoch', async () => { + const prescribedNames = await getPrescribedNames({ + memory: sharedMemory, + timestamp: firstEpochStartTimestamp, + }); + assert.deepStrictEqual(prescribedNames, ['prescribed-name']); + }); + }); + + describe('Epoch-Settings', () => { + it('should return the correct epoch settings', async () => { + const epochSettings = await getEpochSettings({ + memory: sharedMemory, + timestamp: firstEpochStartTimestamp, + }); + assert.deepStrictEqual(epochSettings, { + maxObservers: 50, + epochZeroStartTimestamp: firstEpochStartTimestamp, + durationMs: epochLength, + distributionDelayMs: distributionDelay, + prescribedNameCount: 2, + pruneEpochsCount: 14, + }); + }); + }); +}); diff --git a/tests/gar.test.mjs b/tests/gar.test.mjs index 664161fc..380b687d 100644 --- a/tests/gar.test.mjs +++ b/tests/gar.test.mjs @@ -45,6 +45,7 @@ describe('GatewayRegistry', async () => { const STUB_ADDRESS_9 = ''.padEnd(43, '9'); let sharedMemory = startMemory; // memory we'll use across unique tests; + let lastTimestamp = STUB_TIMESTAMP; beforeEach(async () => { const { Memory: totalTokenSupplyMemory } = await totalTokenSupply({ @@ -56,11 +57,12 @@ describe('GatewayRegistry', async () => { }); // NOTE: all tests will start with this gateway joined to the network - use `sharedMemory` for the first interaction for each test to avoid having to join the network again sharedMemory = joinNetworkMemory; + lastTimestamp = STUB_TIMESTAMP + 1000 * 60; // Default 60s after the stubbed timestamp, some tests will override this }); afterEach(async () => { await assertNoInvariants({ - timestamp: STUB_TIMESTAMP, + timestamp: lastTimestamp, memory: sharedMemory, }); }); @@ -320,6 +322,7 @@ describe('GatewayRegistry', async () => { const gateway = await getGateway({ memory: sharedMemory, address: STUB_ADDRESS, + timestamp: STUB_TIMESTAMP, }); // leave at timestamp @@ -334,6 +337,7 @@ describe('GatewayRegistry', async () => { const leavingGateway = await getGateway({ memory: leaveNetworkMemory, address: STUB_ADDRESS, + timestamp: leavingTimestamp, }); assert.deepStrictEqual(leavingGateway, { ...gateway, @@ -347,6 +351,7 @@ describe('GatewayRegistry', async () => { await getGatewayVaultsItems({ memory: leaveNetworkMemory, gatewayAddress: STUB_ADDRESS, + timestamp: leavingTimestamp, }), [ { @@ -360,6 +365,7 @@ describe('GatewayRegistry', async () => { ); sharedMemory = leaveNetworkMemory; + lastTimestamp = leavingTimestamp; }); }); @@ -372,11 +378,13 @@ describe('GatewayRegistry', async () => { expectedDelegates, expectedAllowedDelegates, inputMemory = sharedMemory, + timestamp = STUB_TIMESTAMP, }) { // gateway before const gateway = await getGateway({ address: STUB_ADDRESS, memory: inputMemory, + timestamp, }); const { memory: updatedSettingsMemory } = await updateGatewaySettings({ @@ -386,12 +394,14 @@ describe('GatewayRegistry', async () => { { name: 'Action', value: 'Update-Gateway-Settings' }, ...settingsTags, ], + timestamp, }); // check the gateway record from contract const updatedGateway = await getGateway({ address: STUB_ADDRESS, memory: updatedSettingsMemory, + timestamp, }); // should match old gateway, with new settings @@ -409,10 +419,10 @@ describe('GatewayRegistry', async () => { for (const delegateAddress of delegateAddresses) { const maybeDelegateResult = await delegateStake({ memory: nextMemory, - timestamp: STUB_TIMESTAMP, delegatorAddress: delegateAddress, quantity: 10_000_000, gatewayAddress: STUB_ADDRESS, + timestamp, }).catch(() => {}); if (maybeDelegateResult?.memory) { nextMemory = maybeDelegateResult.memory; @@ -421,6 +431,7 @@ describe('GatewayRegistry', async () => { const updatedGatewayDelegates = await getDelegatesItems({ memory: nextMemory, gatewayAddress: STUB_ADDRESS, + timestamp, }); assert.deepStrictEqual( updatedGatewayDelegates @@ -432,7 +443,7 @@ describe('GatewayRegistry', async () => { await getAllowedDelegates({ memory: nextMemory, from: STUB_ADDRESS, - timestamp: STUB_TIMESTAMP, + timestamp, gatewayAddress: STUB_ADDRESS, }); const updatedAllowedDelegates = JSON.parse( @@ -535,6 +546,7 @@ describe('GatewayRegistry', async () => { gatewayAddress: STUB_ADDRESS, }); const updatedMemory = await updateGatewaySettingsTest({ + timestamp: STUB_TIMESTAMP + 1, inputMemory: stakedMemory, settingsTags: [ { name: 'Allow-Delegated-Staking', value: 'allowlist' }, @@ -561,11 +573,12 @@ describe('GatewayRegistry', async () => { Tags: [ { name: 'Action', value: 'Paginated-Delegations' }, { name: 'Limit', value: '100' }, - { name: 'Sort-Order', value: 'desc' }, + { name: 'Sort-Order', value: 'asc' }, { name: 'Sort-By', value: 'startTimestamp' }, ], }, memory: updatedMemory, + timestamp: STUB_TIMESTAMP + 5, }); assertNoResultError(delegationsResult); assert.deepStrictEqual( @@ -581,11 +594,11 @@ describe('GatewayRegistry', async () => { { type: 'vault', gatewayAddress: STUB_ADDRESS, - delegationId: `${STUB_ADDRESS}_${STUB_TIMESTAMP}`, + delegationId: `${STUB_ADDRESS}_${STUB_TIMESTAMP + 1}`, vaultId: 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', balance: 10_000_000, - endTimestamp: 90 * 24 * 60 * 60 * 1000 + STUB_TIMESTAMP, - startTimestamp: STUB_TIMESTAMP, + endTimestamp: 90 * 24 * 60 * 60 * 1000 + STUB_TIMESTAMP + 1, + startTimestamp: STUB_TIMESTAMP + 1, }, ], JSON.parse(delegationsResult.Messages[0].Data).items, @@ -603,6 +616,7 @@ describe('GatewayRegistry', async () => { delegateAddresses: [STUB_ADDRESS_6], // not allowed to delegate expectedDelegates: [STUB_ADDRESS_7, STUB_ADDRESS_8, STUB_ADDRESS_9], // Leftover from previous test and being forced to exit expectedAllowedDelegates: [], + timestamp: STUB_TIMESTAMP + 3, }); }); @@ -625,11 +639,13 @@ describe('GatewayRegistry', async () => { delegateAddresses: [STUB_ADDRESS_9], // no one is allowed yet expectedDelegates: [STUB_ADDRESS_8], // 8 is exiting expectedAllowedDelegates: [], + timestamp: STUB_TIMESTAMP + 1, }); const delegateItems = await getDelegatesItems({ memory: updatedMemory, gatewayAddress: STUB_ADDRESS, + timestamp: STUB_TIMESTAMP + 1, }); assert.deepStrictEqual( [ @@ -645,7 +661,7 @@ describe('GatewayRegistry', async () => { const { result: getAllowedDelegatesResult } = await getAllowedDelegates({ memory: updatedMemory, from: STUB_ADDRESS, - timestamp: STUB_TIMESTAMP, + timestamp: STUB_TIMESTAMP + 1, gatewayAddress: STUB_ADDRESS, }); assert.deepStrictEqual( @@ -722,6 +738,7 @@ describe('GatewayRegistry', async () => { const updatedGateway = await getGateway({ address: STUB_ADDRESS, memory: decreaseStakeMemory, + timestamp: decreaseTimestamp, }); assert.deepStrictEqual(updatedGateway, { ...gatewayBefore, @@ -731,6 +748,7 @@ describe('GatewayRegistry', async () => { await getGatewayVaultsItems({ memory: decreaseStakeMemory, gatewayAddress: STUB_ADDRESS, + timestamp: decreaseTimestamp, }), [ { @@ -878,6 +896,7 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: delegatedStakeMemory, + timestamp: delegationTimestamp, }); assert.deepStrictEqual(gatewayAfter, { ...gatewayBefore, @@ -886,6 +905,7 @@ describe('GatewayRegistry', async () => { const delegateItems = await getDelegatesItems({ memory: delegatedStakeMemory, gatewayAddress: STUB_ADDRESS, + timestamp: delegationTimestamp, }); assert.deepStrictEqual( [ @@ -917,6 +937,7 @@ describe('GatewayRegistry', async () => { const gatewayBefore = await getGateway({ address: STUB_ADDRESS, memory: delegatedStakeMemory, + timestamp: STUB_TIMESTAMP, }); const { memory: decreaseStakeMemory } = await decreaseDelegateStake({ memory: delegatedStakeMemory, @@ -930,6 +951,7 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: decreaseStakeMemory, + timestamp: decreaseStakeTimestamp, }); assert.deepStrictEqual(gatewayAfter, { ...gatewayBefore, @@ -938,6 +960,7 @@ describe('GatewayRegistry', async () => { const delegateItems = await getDelegatesItems({ memory: decreaseStakeMemory, gatewayAddress: STUB_ADDRESS, + timestamp: decreaseStakeTimestamp, }); assert.deepStrictEqual(delegateItems, [ @@ -954,6 +977,7 @@ describe('GatewayRegistry', async () => { const delegationsForDelegator = await getDelegations({ memory: decreaseStakeMemory, address: delegatorAddress, + timestamp: decreaseStakeTimestamp, }); assert.deepStrictEqual(delegationsForDelegator.items, [ { @@ -974,6 +998,7 @@ describe('GatewayRegistry', async () => { }, ]); sharedMemory = decreaseStakeMemory; + lastTimestamp = decreaseStakeTimestamp; }); it('should fail to withdraw a delegated stake if below the minimum withdrawal limitation', async () => { @@ -993,6 +1018,7 @@ describe('GatewayRegistry', async () => { const gatewayBefore = await getGateway({ address: STUB_ADDRESS, memory: delegatedStakeMemory, + timestamp: STUB_TIMESTAMP, }); const { memory: decreaseStakeMemory, result } = await decreaseDelegateStake({ @@ -1018,9 +1044,11 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: decreaseStakeMemory, + timestamp: decreaseStakeTimestamp, }); assert.deepStrictEqual(gatewayAfter, gatewayBefore); sharedMemory = decreaseStakeMemory; + lastTimestamp = decreaseStakeTimestamp; }); }); @@ -1062,11 +1090,14 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: cancelWithdrawalMemory, + timestamp: decreaseStakeTimestamp, }); // no changes to the gateway after a withdrawal is cancelled assert.deepStrictEqual(gatewayAfter, gatewayBefore); sharedMemory = cancelWithdrawalMemory; + lastTimestamp = decreaseStakeTimestamp; }); + it('should allow cancelling an operator withdrawal', async () => { const decreaseStakeTimestamp = STUB_TIMESTAMP + 1000 * 60 * 15; // 15 minutes after stubbedTimestamp const stakeQty = INITIAL_OPERATOR_STAKE; @@ -1105,6 +1136,7 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: cancelWithdrawalMemory, + timestamp: decreaseStakeTimestamp, }); // no changes to the gateway after a withdrawal is cancelled assert.deepStrictEqual(gatewayAfter, { @@ -1112,6 +1144,7 @@ describe('GatewayRegistry', async () => { operatorStake: INITIAL_OPERATOR_STAKE + decreaseQty, // the decrease was cancelled and returned to the operator }); sharedMemory = cancelWithdrawalMemory; + lastTimestamp = decreaseStakeTimestamp; }); }); @@ -1196,7 +1229,7 @@ describe('GatewayRegistry', async () => { const { memory: addGatewayMemory2 } = await joinNetwork({ address: secondGatewayAddress, memory: sharedMemory, - timestamp: STUB_TIMESTAMP - 1, + timestamp: STUB_TIMESTAMP + 1, // join the network 1ms after the first gateway }); let cursor; let fetchedGateways = []; @@ -1207,9 +1240,12 @@ describe('GatewayRegistry', async () => { { name: 'Action', value: 'Paginated-Gateways' }, { name: 'Cursor', value: cursor }, { name: 'Limit', value: '1' }, + { name: 'Sort-By', value: 'startTimestamp' }, + { name: 'Sort-Order', value: 'asc' }, ], }, memory: addGatewayMemory2, + timestamp: STUB_TIMESTAMP + 1, }); // parse items, nextCursor const { items, nextCursor, hasMore, sortBy, sortOrder, totalItems } = @@ -1217,7 +1253,7 @@ describe('GatewayRegistry', async () => { assert.equal(totalItems, 2); assert.equal(items.length, 1); assert.equal(sortBy, 'startTimestamp'); - assert.equal(sortOrder, 'desc'); + assert.equal(sortOrder, 'asc'); // older gateways are first assert.equal(hasMore, !!nextCursor); cursor = nextCursor; fetchedGateways.push(...items); @@ -1283,10 +1319,11 @@ describe('GatewayRegistry', async () => { // Assert prescribed observers const prescribedObservers = JSON.parse(futureTick.Messages[0].Data) .maybeNewEpoch.prescribedObservers; - assert.equal(prescribedObservers.length, 2); - const prescribedObserverAddresses = prescribedObservers.map( - (o) => o.observerAddress, - ); + assert.deepEqual(prescribedObservers, { + [STUB_ADDRESS]: STUB_ADDRESS, + [observerAddress]: gatewayAddress, + }); + const prescribedObserverAddresses = Object.keys(prescribedObservers); assert.ok(prescribedObserverAddresses.includes(STUB_ADDRESS)); assert.ok(prescribedObserverAddresses.includes(observerAddress)); gatewayMemory = futureTick.Memory; @@ -1403,7 +1440,6 @@ describe('GatewayRegistry', async () => { const { memory: addGatewayMemory2 } = await joinNetwork({ address: secondGatewayAddress, memory: sharedMemory, - timestamp: STUB_TIMESTAMP - 1, }); // Stake to both gateways @@ -1447,6 +1483,7 @@ describe('GatewayRegistry', async () => { { name: 'Sort-Order', value: sortOrder }, ...(cursor ? [{ name: 'Cursor', value: `${cursor}` }] : []), ], + Timestamp: STUB_TIMESTAMP + 2, }, memory: decreaseStakeMemory, }); @@ -1604,6 +1641,7 @@ describe('GatewayRegistry', async () => { const delegateItems = await getDelegatesItems({ memory: delegatedStakeMemory, gatewayAddress: sourceAddress, + timestamp: STUB_TIMESTAMP, }); assert.deepStrictEqual( [ @@ -1728,6 +1766,7 @@ describe('GatewayRegistry', async () => { const delegateItems = await getDelegatesItems({ memory: delegatedStakeMemory, gatewayAddress: sourceAddress, + timestamp: STUB_TIMESTAMP, }); assert.deepStrictEqual( [ @@ -1770,6 +1809,7 @@ describe('GatewayRegistry', async () => { await getDelegatesItems({ memory: redelegateStakeMemory, gatewayAddress: targetAddress, + timestamp: STUB_TIMESTAMP + 2, }), [ { @@ -1790,6 +1830,7 @@ describe('GatewayRegistry', async () => { await getDelegatesItems({ memory: redelegateStakeMemory, gatewayAddress: sourceAddress, + timestamp: STUB_TIMESTAMP + 2, }), [], ); diff --git a/tests/handlers.test.mjs b/tests/handlers.test.mjs index 1b2492dc..d8b79957 100644 --- a/tests/handlers.test.mjs +++ b/tests/handlers.test.mjs @@ -1,10 +1,6 @@ -import { handle } from './helpers.mjs'; +import { ARIOToMARIO, handle } from './helpers.mjs'; import { describe, it } from 'node:test'; import assert from 'node:assert'; -import { - AO_LOADER_HANDLER_ENV, - DEFAULT_HANDLE_OPTIONS, -} from '../tools/constants.mjs'; describe('handlers', async () => { it('should maintain order of handlers, with _eval and _default first, followed by prune', async () => { @@ -21,15 +17,18 @@ describe('handlers', async () => { const { Handlers: handlersList } = JSON.parse(handlers.Messages[0].Data); assert.ok(handlersList.includes('_eval')); assert.ok(handlersList.includes('_default')); + assert.ok(handlersList.includes('sanitize')); assert.ok(handlersList.includes('prune')); const evalIndex = handlersList.indexOf('_eval'); const defaultIndex = handlersList.indexOf('_default'); + const sanitizeIndex = handlersList.indexOf('sanitize'); const pruneIndex = handlersList.indexOf('prune'); - const expectedHandlerCount = 71; // TODO: update this if more handlers are added + const expectedHandlerCount = 72; // TODO: update this if more handlers are added assert.ok(evalIndex === 0); assert.ok(defaultIndex === 1); - assert.ok(pruneIndex === 2); + assert.ok(sanitizeIndex === 2); + assert.ok(pruneIndex === 3); assert.ok( handlersList.length === expectedHandlerCount, 'should have ' + @@ -55,7 +54,7 @@ describe('handlers', async () => { const tokenSupplyData = JSON.parse( tokenSupplyResult.Messages?.[0]?.Data, ); - assert.ok(tokenSupplyData === 1000000000 * 1000000); + assert.ok(tokenSupplyData === ARIOToMARIO(1000000000)); }); }); @@ -84,11 +83,11 @@ describe('handlers', async () => { const supplyData = JSON.parse(supplyResult.Messages?.[0]?.Data); assert.ok( - supplyData.total === 1000000000 * 1000000, + supplyData.total === ARIOToMARIO(1000000000), 'total supply should be 1 billion ARIO but was ' + supplyData.total, ); assert.ok( - supplyData.circulating === 1000000000 * 1000000 - 50000000000000, + supplyData.circulating === ARIOToMARIO(1000000000) - 50000000000000, 'circulating supply should be 0.95 billion ARIO but was ' + supplyData.circulating, ); diff --git a/tests/helpers.mjs b/tests/helpers.mjs index 1319c11d..f5be73ae 100644 --- a/tests/helpers.mjs +++ b/tests/helpers.mjs @@ -9,6 +9,7 @@ import { STUB_TIMESTAMP, STUB_MESSAGE_ID, validGatewayTags, + STUB_PROCESS_ID, } from '../tools/constants.mjs'; const initialOperatorStake = 100_000_000_000; @@ -23,6 +24,9 @@ export const genesisEpochTimestamp = 1719900000000; // Tuesday, July 2, 2024, 06 export const epochLength = 1000 * 60 * 60 * 24; // 24 hours export const distributionDelay = 1000 * 60 * 40; // 40 minutes +export const mARIOPerARIO = 1_000_000; +export const ARIOToMARIO = (amount) => amount * mARIOPerARIO; + const { handle: originalHandle, memory } = await createAosLoader(); export const startMemory = memory; @@ -39,7 +43,10 @@ export async function handle({ options = {}, memory = startMemory, shouldAssertNoResultError = true, + timestamp = STUB_TIMESTAMP, }) { + options.Timestamp ??= timestamp; + const result = await originalHandle( memory, { @@ -93,14 +100,20 @@ export function assertValidSupplyEventData(result) { } export const getBalances = async ({ memory, timestamp = STUB_TIMESTAMP }) => { + assert(memory, 'Memory is required'); const result = await handle({ options: { Tags: [{ name: 'Action', value: 'Balances' }], - Timestamp: timestamp, }, + timestamp, memory, }); + const balancesData = result.Messages?.[0]?.Data; + if (!balancesData) { + const { Memory, ...rest } = result; + assert(false, `Something went wrong: ${JSON.stringify(rest, null, 2)}`); + } const balances = JSON.parse(result.Messages?.[0]?.Data); return balances; }; @@ -119,6 +132,7 @@ export const transfer = async ({ quantity = initialOperatorStake, memory = startMemory, cast = false, + timestamp = STUB_TIMESTAMP, } = {}) => { if (quantity === 0) { // Nothing to do @@ -135,9 +149,11 @@ export const transfer = async ({ { name: 'Quantity', value: quantity }, { name: 'Cast', value: cast }, ], + Timestamp: timestamp, }, memory, }); + assertNoResultError(transferResult); return transferResult.Memory; }; @@ -149,11 +165,11 @@ export const joinNetwork = async ({ tags = validGatewayTags({ observerAddress }), quantity = 100_000_000_000, }) => { - // give them the join network token amount const transferMemory = await transfer({ recipient: address, quantity, memory, + timestamp, }); const joinNetworkResult = await handle({ options: { @@ -164,6 +180,7 @@ export const joinNetwork = async ({ }, memory: transferMemory, }); + assertNoResultError(joinNetworkResult); return { memory: joinNetworkResult.Memory, result: joinNetworkResult, @@ -186,6 +203,7 @@ export const setUpStake = async ({ quantity: transferQty, memory, cast: true, + timestamp, }); // Stake a gateway for the user to delegate to @@ -193,7 +211,7 @@ export const setUpStake = async ({ memory, address: gatewayAddress, tags: gatewayTags, - timestamp: timestamp - 1, + timestamp: timestamp, }); assertNoResultError(joinNetworkResult); memory = joinNetworkResult.memory; @@ -270,17 +288,21 @@ export const getDelegates = async ({ }; }; -export const getDelegatesItems = async ({ memory, gatewayAddress }) => { +export const getDelegatesItems = async ({ + memory, + gatewayAddress, + timestamp = STUB_TIMESTAMP, +}) => { const { result } = await getDelegates({ memory, from: STUB_ADDRESS, - timestamp: STUB_TIMESTAMP, + timestamp, gatewayAddress, }); return JSON.parse(result.Messages?.[0]?.Data).items; }; -export const getDelegations = async ({ memory, address }) => { +export const getDelegations = async ({ memory, address, timestamp }) => { const result = await handle({ options: { Tags: [ @@ -289,6 +311,7 @@ export const getDelegations = async ({ memory, address }) => { ], }, memory, + timestamp, }); return JSON.parse(result.Messages?.[0]?.Data); }; @@ -299,6 +322,7 @@ export const getVaults = async ({ limit, sortBy, sortOrder, + timestamp = STUB_TIMESTAMP, }) => { const { Memory, ...rest } = await handle({ options: { @@ -309,6 +333,7 @@ export const getVaults = async ({ ...(sortBy ? [{ name: 'Sort-By', value: sortBy }] : []), ...(sortOrder ? [{ name: 'Sort-Order', value: sortOrder }] : []), ], + Timestamp: timestamp, }, memory, }); @@ -318,13 +343,18 @@ export const getVaults = async ({ }; }; -export const getGatewayVaultsItems = async ({ memory, gatewayAddress }) => { +export const getGatewayVaultsItems = async ({ + memory, + gatewayAddress, + timestamp = STUB_TIMESTAMP, +}) => { const gatewayVaultsResult = await handle({ options: { Tags: [ { name: 'Action', value: 'Paginated-Gateway-Vaults' }, { name: 'Address', value: gatewayAddress }, ], + Timestamp: timestamp, }, memory, }); @@ -422,6 +452,7 @@ export const delegateStake = async ({ recipient: delegatorAddress, quantity, memory, + timestamp, }); const delegateResult = await handle({ @@ -455,8 +486,8 @@ export const getGateway = async ({ { name: 'Action', value: 'Gateway' }, { name: 'Address', value: address }, ], - Timestamp: timestamp, }, + timestamp, memory, }); const gateway = JSON.parse(gatewayResult.Messages?.[0]?.Data); @@ -689,7 +720,7 @@ export const buyRecord = async ({ memory, from, name, - processId, + processId = STUB_PROCESS_ID, type = 'lease', years = 1, timestamp = STUB_TIMESTAMP, @@ -705,10 +736,11 @@ export const buyRecord = async ({ { name: 'Process-Id', value: processId }, { name: 'Years', value: `${years}` }, ], - Timestamp: timestamp, }, + timestamp, memory, }); + assertNoResultError(buyRecordResult); return { result: buyRecordResult, memory: buyRecordResult.Memory, @@ -757,3 +789,100 @@ export const totalTokenSupply = async ({ memory, timestamp = 0 }) => { memory, }); }; + +export const tick = async ({ + memory, + timestamp = STUB_TIMESTAMP, + forcePrune = false, +}) => { + const tickResult = await handle({ + options: { + Tags: [{ name: 'Action', value: 'Tick' }], + Timestamp: timestamp, + ...(forcePrune ? { name: 'Force-Prune', value: 'true' } : {}), + }, + memory, + }); + return { + memory: tickResult.Memory, + result: tickResult, + }; +}; + +export const getEpoch = async ({ + memory, + timestamp = STUB_TIMESTAMP, + epochIndex, +}) => { + const epochResult = await handle({ + options: { + Tags: [ + { name: 'Action', value: 'Epoch' }, + ...(epochIndex !== undefined + ? [{ name: 'Epoch-Index', value: epochIndex }] + : []), + ], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(epochResult); + return JSON.parse(epochResult.Messages[0].Data); +}; + +export const getPrescribedObservers = async ({ + memory, + timestamp = STUB_TIMESTAMP, + epochIndex, +}) => { + const prescribedObserversResult = await handle({ + options: { + Tags: [ + { name: 'Action', value: 'Epoch-Prescribed-Observers' }, + ...(epochIndex !== undefined + ? [{ name: 'Epoch-Index', value: epochIndex }] + : []), + ], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(prescribedObserversResult); + return JSON.parse(prescribedObserversResult.Messages[0].Data); +}; + +export const getPrescribedNames = async ({ + memory, + timestamp = STUB_TIMESTAMP, + epochIndex, +}) => { + const prescribedNamesResult = await handle({ + options: { + Tags: [ + { name: 'Action', value: 'Epoch-Prescribed-Names' }, + ...(epochIndex !== undefined + ? [{ name: 'Epoch-Index', value: epochIndex }] + : []), + ], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(prescribedNamesResult); + return JSON.parse(prescribedNamesResult.Messages[0].Data); +}; + +export const getEpochSettings = async ({ + memory, + timestamp = STUB_TIMESTAMP, +}) => { + const epochSettingsResult = await handle({ + options: { + Tags: [{ name: 'Action', value: 'Epoch-Settings' }], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(epochSettingsResult); + return JSON.parse(epochSettingsResult.Messages[0].Data); +}; diff --git a/tests/invariants.mjs b/tests/invariants.mjs index ea7f22f0..85338804 100644 --- a/tests/invariants.mjs +++ b/tests/invariants.mjs @@ -1,5 +1,5 @@ import assert from 'node:assert'; -import { getBalances, getVaults, handle } from './helpers.mjs'; +import { ARIOToMARIO, getBalances, getVaults, handle } from './helpers.mjs'; function assertValidBalance(balance, expectedMin = 1) { assert( @@ -25,7 +25,7 @@ function assertValidTimestampsAtTimestamp({ ); assert( endTimestamp === null || endTimestamp > startTimestamp, - `Invariant violated: endTimestamp of ${endTimestamp} for vault ${address}`, + `Invariant violated: endTimestamp of ${endTimestamp} is not greater than startTimestamp ${startTimestamp}`, ); } @@ -45,6 +45,7 @@ async function assertNoBalanceVaultInvariants({ timestamp, memory }) { const { result } = await getVaults({ memory, limit: 1_000_000, // egregiously large limit to make sure we get them all + timestamp, }); for (const vault of JSON.parse(result.Messages?.[0]?.Data).items) { @@ -84,8 +85,8 @@ async function assertNoTotalSupplyInvariants({ timestamp, memory }) { const supplyData = JSON.parse(supplyResult.Messages?.[0]?.Data); assert.ok( - supplyData.total === 1000000000 * 1000000, - 'total supply should be 1,000,000,000,000,000 mIO but was ' + + supplyData.total === ARIOToMARIO(1000000000), + 'total supply should be 1,000,000,000,000,000 mARIO but was ' + supplyData.total, ); assertValidBalance(supplyData.circulating, 0); diff --git a/tests/monitor/monitor.test.mjs b/tests/monitor/monitor.test.mjs index 77af6e6e..0d4caa7f 100644 --- a/tests/monitor/monitor.test.mjs +++ b/tests/monitor/monitor.test.mjs @@ -1,11 +1,17 @@ -import { AOProcess, IO, IO_TESTNET_PROCESS_ID } from '@ar.io/sdk'; +import { AOProcess, ARIO, ARIO_DEVNET_PROCESS_ID, Logger } from '@ar.io/sdk'; import { connect } from '@permaweb/aoconnect'; import { strict as assert } from 'node:assert'; import { describe, it, before, after } from 'node:test'; import { DockerComposeEnvironment, Wait } from 'testcontainers'; -const processId = process.env.IO_PROCESS_ID || IO_TESTNET_PROCESS_ID; -const io = IO.init({ +// set debug level logs for to get detailed messages +Logger.default.setLogLevel('info'); + +export const mARIOPerARIO = 1_000_000; +export const ARIOToMARIO = (amount) => amount * mARIOPerARIO; + +const processId = process.env.ARIO_NETWORK_PROCESS_ID || ARIO_DEVNET_PROCESS_ID; +const io = ARIO.init({ process: new AOProcess({ processId, ao: connect({ @@ -65,10 +71,13 @@ describe('setup', () => { ); const evalIndex = handlersList.indexOf('_eval'); const defaultIndex = handlersList.indexOf('_default'); + const sanitizeIndex = handlersList.indexOf('sanitize'); const pruneIndex = handlersList.indexOf('prune'); assert( - pruneIndex > evalIndex && pruneIndex === defaultIndex + 1, - `Prune index (${pruneIndex}) is not the first handler after _default (${defaultIndex + 1})`, + pruneIndex === sanitizeIndex + 1 && + sanitizeIndex === defaultIndex + 1 && + defaultIndex === evalIndex + 1, + `Prune index (${pruneIndex}) and sanitize index (${sanitizeIndex}) are not the first and second handlers after _default (${handlersList})`, ); }); }); @@ -132,7 +141,7 @@ describe('setup', () => { it('should always be 1 billion ARIO', async () => { const supplyData = await io.getTokenSupply(); assert( - supplyData.total === 1000000000 * 1000000, + supplyData.total === ARIOToMARIO(1000000000), `Total supply is not 1 billion ARIO: ${supplyData.total}`, ); assert( @@ -218,7 +227,7 @@ describe('setup', () => { supplyData.protocolBalance; assert( supplyData.total === computedTotal && - computedTotal === 1000000000 * 1000000, + computedTotal === ARIOToMARIO(1000000000), `Computed total supply (${computedTotal}) is not equal to the sum of protocol balance, circulating, locked, staked, and delegated and withdrawn provided by the contract (${supplyData.total}) and does not match the expected total of 1 billion ARIO`, ); @@ -353,7 +362,7 @@ describe('setup', () => { (Date.now() - epochZeroStartTimestamp) / durationMs, ); - let cursor = ''; + let cursor = undefined; let totalGateways = 0; const uniqueGateways = new Set(); do { @@ -532,7 +541,7 @@ describe('setup', () => { ); const testLogicPromise = (async () => { - let cursor = ''; + let cursor = undefined; let totalArns = 0; const uniqueNames = new Set(); do { diff --git a/tests/primary.test.mjs b/tests/primary.test.mjs index 35f3669f..6c2b9111 100644 --- a/tests/primary.test.mjs +++ b/tests/primary.test.mjs @@ -14,6 +14,7 @@ import { assertNoInvariants } from './invariants.mjs'; describe('primary names', function () { let sharedMemory; + let endingMemory; beforeEach(async () => { const { Memory: totalTokenSupplyMemory } = await totalTokenSupply({ memory: startMemory, @@ -23,8 +24,8 @@ describe('primary names', function () { afterEach(async () => { await assertNoInvariants({ - timestamp: STUB_TIMESTAMP, - memory: sharedMemory, + timestamp: STUB_TIMESTAMP + 1000 * 60 * 60 * 24 * 365, + memory: endingMemory, }); }); @@ -33,6 +34,7 @@ describe('primary names', function () { processId, type = 'permabuy', years = 1, + timestamp = STUB_TIMESTAMP, memory = sharedMemory, }) => { const buyRecordResult = await handle({ @@ -44,6 +46,7 @@ describe('primary names', function () { { name: 'Years', value: years }, { name: 'Process-Id', value: processId }, ], + Timestamp: timestamp, }, memory, }); @@ -66,6 +69,7 @@ describe('primary names', function () { recipient: caller, quantity: 100000000, // primary name cost memory, + timestamp, }); memory = transferMemory; } @@ -88,7 +92,7 @@ describe('primary names', function () { }; }; - const getPrimaryNameRequest = async ({ initiator, memory }) => { + const getPrimaryNameRequest = async ({ initiator, memory, timestamp }) => { const getPrimaryNameRequestResult = await handle({ options: { Tags: [ @@ -97,6 +101,7 @@ describe('primary names', function () { ], }, memory, + timestamp, }); return { result: getPrimaryNameRequestResult, @@ -130,11 +135,17 @@ describe('primary names', function () { }; }; - const removePrimaryNames = async ({ names, caller, memory }) => { + const removePrimaryNames = async ({ + names, + caller, + memory, + timestamp = STUB_TIMESTAMP, + }) => { const removePrimaryNamesResult = await handle({ options: { From: caller, Owner: caller, + Timestamp: timestamp, Tags: [ { name: 'Action', value: 'Remove-Primary-Names' }, { name: 'Names', value: names.join(',') }, @@ -151,6 +162,7 @@ describe('primary names', function () { const getPrimaryNameForAddress = async ({ address, memory, + timestamp = STUB_TIMESTAMP, shouldAssertNoResultError = true, }) => { const getPrimaryNameResult = await handle({ @@ -159,6 +171,7 @@ describe('primary names', function () { { name: 'Action', value: 'Primary-Name' }, { name: 'Address', value: address }, ], + Timestamp: timestamp, }, memory, shouldAssertNoResultError, @@ -169,13 +182,18 @@ describe('primary names', function () { }; }; - const getOwnerOfPrimaryName = async ({ name, memory }) => { + const getOwnerOfPrimaryName = async ({ + name, + memory, + timestamp = STUB_TIMESTAMP, + }) => { const getOwnerResult = await handle({ options: { Tags: [ { name: 'Action', value: 'Primary-Name' }, { name: 'Name', value: name }, ], + Timestamp: timestamp, }, memory, }); @@ -188,9 +206,11 @@ describe('primary names', function () { it('should allow creating and approving a primary name for an existing base name when the recipient is not the base name owner and is funding from stakes', async function () { const processId = ''.padEnd(43, 'a'); const recipient = ''.padEnd(43, 'b'); - const { memory: buyRecordMemory } = await buyRecord({ + const requestTimestamp = 1234567890; + const { memory: buyRecordMemory, result } = await buyRecord({ name: 'test-name', processId, + timestamp: requestTimestamp, }); const stakeResult = await setUpStake({ @@ -198,12 +218,13 @@ describe('primary names', function () { stakerAddress: recipient, transferQty: 550000000, stakeQty: 500000000, + timestamp: requestTimestamp, }); const { result: requestPrimaryNameResult } = await requestPrimaryName({ name: 'test-name', caller: recipient, - timestamp: 1234567890, + timestamp: requestTimestamp, memory: stakeResult.memory, fundFrom: 'stakes', }); @@ -235,6 +256,7 @@ describe('primary names', function () { { initiator: recipient, memory: requestPrimaryNameResult.Memory, + timestamp: requestTimestamp, }, ); @@ -315,6 +337,7 @@ describe('primary names', function () { await getPrimaryNameForAddress({ address: recipient, memory: approvePrimaryNameRequestResult.Memory, + timestamp: approvedTimestamp, }); const primaryNameLookupResult = JSON.parse( @@ -326,23 +349,28 @@ describe('primary names', function () { }); // reverse lookup the owner of the primary name - const { result: ownerOfPrimaryNameResult } = await getOwnerOfPrimaryName({ - name: 'test-name', - memory: approvePrimaryNameRequestResult.Memory, - }); + const { result: ownerOfPrimaryNameResult, memory } = + await getOwnerOfPrimaryName({ + name: 'test-name', + memory: approvePrimaryNameRequestResult.Memory, + timestamp: approvedTimestamp, + }); const ownerResult = JSON.parse(ownerOfPrimaryNameResult.Messages[0].Data); assert.deepStrictEqual(ownerResult, { ...expectedNewPrimaryName, processId, }); + endingMemory = memory; }); it('should immediately approve a primary name for an existing base name when the caller of the request is the base name owner', async function () { const processId = ''.padEnd(43, 'a'); + const requestTimestamp = 1234567890; const { memory: buyRecordMemory } = await buyRecord({ name: 'test-name', processId, + timestamp: requestTimestamp, }); const approvalTimestamp = 1234567899; @@ -421,6 +449,7 @@ describe('primary names', function () { await getPrimaryNameForAddress({ address: processId, memory: requestPrimaryNameResult.Memory, + timestamp: approvalTimestamp, }); const primaryNameLookupResult = JSON.parse( @@ -432,30 +461,35 @@ describe('primary names', function () { }); // reverse lookup the owner of the primary name - const { result: ownerOfPrimaryNameResult } = await getOwnerOfPrimaryName({ - name: 'test-name', - memory: requestPrimaryNameResult.Memory, - }); + const { result: ownerOfPrimaryNameResult, memory } = + await getOwnerOfPrimaryName({ + name: 'test-name', + memory: requestPrimaryNameResult.Memory, + timestamp: approvalTimestamp, + }); const ownerResult = JSON.parse(ownerOfPrimaryNameResult.Messages[0].Data); assert.deepStrictEqual(ownerResult, { ...expectedNewPrimaryName, processId, }); + endingMemory = memory; }); it('should allow removing a primary named by the owner or the owner of the base record', async function () { const processId = ''.padEnd(43, 'a'); const recipient = ''.padEnd(43, 'b'); + const requestTimestamp = 1234567890; const { memory: buyRecordMemory } = await buyRecord({ name: 'test-name', processId, + timestamp: requestTimestamp, }); // create a primary name claim const { result: requestPrimaryNameResult } = await requestPrimaryName({ name: 'test-name', caller: recipient, - timestamp: 1234567890, + timestamp: requestTimestamp, memory: buyRecordMemory, }); // claim the primary name @@ -464,7 +498,7 @@ describe('primary names', function () { name: 'test-name', caller: processId, recipient: recipient, - timestamp: 1234567890, + timestamp: requestTimestamp, memory: requestPrimaryNameResult.Memory, }); @@ -473,6 +507,7 @@ describe('primary names', function () { names: ['test-name'], caller: processId, memory: approvePrimaryNameRequestResult.Memory, + timestamp: requestTimestamp, }); // assert no error @@ -507,7 +542,7 @@ describe('primary names', function () { Action: 'Remove-Primary-Names', Cast: false, Cron: false, - 'Epoch-Index': -19657, + 'Epoch-Index': -5618, From: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'From-Formatted': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'Message-Id': 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', @@ -515,15 +550,16 @@ describe('primary names', function () { 'Num-Removed-Primary-Names': 1, 'Removed-Primary-Names': ['test-name'], 'Removed-Primary-Name-Owners': [recipient], - Timestamp: 21600000, + Timestamp: requestTimestamp, 'Total-Primary-Name-Requests': 0, 'Total-Primary-Names': 0, }); // assert the primary name is no longer set - const { result: primaryNameForAddressResult } = + const { result: primaryNameForAddressResult, memory } = await getPrimaryNameForAddress({ address: recipient, memory: removePrimaryNameResult.Memory, + timestamp: requestTimestamp, shouldAssertNoResultError: false, // we expect an error here, don't throw }); @@ -531,6 +567,7 @@ describe('primary names', function () { (tag) => tag.name === 'Error', ).value; assert.ok(errorTag, 'Expected an error tag'); + endingMemory = memory; }); describe('getPaginatedPrimaryNames', function () { @@ -557,6 +594,7 @@ describe('primary names', function () { sortBy: 'owner', sortOrder: 'asc', }); + endingMemory = getPaginatedPrimaryNamesResult.Memory; }); }); @@ -584,6 +622,7 @@ describe('primary names', function () { sortBy: 'startTimestamp', sortOrder: 'asc', }); + endingMemory = getPaginatedPrimaryNameRequestsResult.Memory; }); }); }); diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 5ec35998..bc3a94dc 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -4,8 +4,6 @@ import assert from 'node:assert'; import { DEFAULT_HANDLE_OPTIONS, STUB_ADDRESS, - validGatewayTags, - PROCESS_OWNER, PROCESS_ID, STUB_TIMESTAMP, INITIAL_OPERATOR_STAKE, @@ -20,9 +18,15 @@ import { joinNetwork, buyRecord, handle, + transfer, startMemory, returnedNamesPeriod, totalTokenSupply, + getEpoch, + tick, + saveObservations, + getEpochSettings, + leaveNetwork, } from './helpers.mjs'; import { assertNoInvariants } from './invariants.mjs'; @@ -41,52 +45,23 @@ describe('Tick', async () => { afterEach(async () => { await assertNoInvariants({ - timestamp: STUB_TIMESTAMP, + timestamp: genesisEpochStart + 1000 * 60 * 60 * 24 * 365, memory: sharedMemory, }); }); - const transfer = async ({ - recipient = STUB_ADDRESS, - quantity = 100_000_000_000, - memory = sharedMemory, - } = {}) => { - const transferResult = await handle({ - options: { - From: PROCESS_OWNER, - Owner: PROCESS_OWNER, - Tags: [ - { name: 'Action', value: 'Transfer' }, - { name: 'Recipient', value: recipient }, - { name: 'Quantity', value: quantity }, - { name: 'Cast', value: false }, - ], - }, - memory, - }); - - // assert no error tag - const errorTag = transferResult.Messages?.[0]?.Tags?.find( - (tag) => tag.Name === 'Error', - ); - assert.strictEqual(errorTag, undefined); - - return transferResult.Memory; - }; - it('should prune record that are expired and after the grace period and create returned names for them', async () => { - let memory = sharedMemory; - const buyRecordResult = await handle({ - options: { - Tags: [ - { name: 'Action', value: 'Buy-Record' }, - { name: 'Name', value: 'test-name' }, - { name: 'Purchase-Type', value: 'lease' }, - { name: 'Years', value: '1' }, - { name: 'Process-Id', value: ''.padEnd(43, 'a') }, - ], - }, + const memory = await transfer({ + recipient: STUB_ADDRESS, + quantity: 100_000_000_000, + memory: sharedMemory, + }); + const buyRecordResult = await buyRecord({ memory, + name: 'test-name', + type: 'lease', + from: STUB_ADDRESS, + processId: ''.padEnd(43, 'a'), }); const realRecord = await handle({ options: { @@ -95,7 +70,7 @@ describe('Tick', async () => { { name: 'Name', value: 'test-name' }, ], }, - memory: buyRecordResult.Memory, + memory: buyRecordResult.memory, }); const buyRecordData = JSON.parse(realRecord.Messages[0].Data); assert.deepEqual(buyRecordData, { @@ -110,14 +85,9 @@ describe('Tick', async () => { // mock the passage of time and tick with a future timestamp const futureTimestamp = buyRecordData.endTimestamp + 1000 * 60 * 60 * 24 * 14 + 1; - const futureTickResult = await handle({ - options: { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: futureTimestamp.toString() }, - ], - }, - memory: buyRecordResult.Memory, + const { result: futureTickResult } = await tick({ + memory: buyRecordResult.memory, + timestamp: futureTimestamp, }); const tickEvent = JSON.parse( @@ -136,6 +106,7 @@ describe('Tick', async () => { { name: 'Action', value: 'Record' }, { name: 'Name', value: 'test-name' }, ], + Timestamp: futureTimestamp, }, memory: futureTickResult.Memory, }); @@ -164,46 +135,36 @@ describe('Tick', async () => { initiator: PROCESS_ID, premiumMultiplier: 50, }); + sharedMemory = returnedNameData.Memory; }); it('should prune gateways that are expired', async () => { const memory = await transfer({ recipient: STUB_ADDRESS, quantity: 100_000_000_000, + memory: sharedMemory, }); - - const joinNetworkResult = await handle({ - options: { - Tags: validGatewayTags(), - From: STUB_ADDRESS, - Owner: STUB_ADDRESS, - }, + const joinNetworkResult = await joinNetwork({ memory, + address: STUB_ADDRESS, }); - // assert no error tag - assertNoResultError(joinNetworkResult); - // check the gateway record from contract const gateway = await getGateway({ - memory: joinNetworkResult.Memory, + memory: joinNetworkResult.memory, address: STUB_ADDRESS, }); assert.deepEqual(gateway.status, 'joined'); // leave the network - const leaveNetworkResult = await handle({ - options: { - From: STUB_ADDRESS, - Owner: STUB_ADDRESS, - Tags: [{ name: 'Action', value: 'Leave-Network' }], - }, - memory: joinNetworkResult.Memory, + const leaveNetworkResult = await leaveNetwork({ + memory: joinNetworkResult.memory, + address: STUB_ADDRESS, }); // check the gateways status is leaving const leavingGateway = await getGateway({ - memory: leaveNetworkResult.Memory, + memory: leaveNetworkResult.memory, address: STUB_ADDRESS, }); assert.deepEqual(leavingGateway.status, 'leaving'); @@ -211,23 +172,20 @@ describe('Tick', async () => { // expedite the timestamp to the future const futureTimestamp = leavingGateway.endTimestamp + 1; - const futureTick = await handle({ - options: { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: futureTimestamp.toString() }, - ], - }, - memory: leaveNetworkResult.Memory, + const futureTick = await tick({ + memory: leaveNetworkResult.memory, + timestamp: futureTimestamp, }); // check the gateway is pruned const prunedGateway = await getGateway({ - memory: futureTick.Memory, + memory: futureTick.memory, address: STUB_ADDRESS, + timestamp: futureTimestamp, }); assert.deepEqual(undefined, prunedGateway); + sharedMemory = futureTick.memory; }); // vaulting is not working as expected, need to fix before enabling this test @@ -315,18 +273,16 @@ describe('Tick', async () => { ); // mock the passage of time and tick with a future timestamp const futureTimestamp = vaultData.endTimestamp + 1; - const futureTick = await handle({ - options: { - Tags: [{ name: 'Action', value: 'Tick' }], - Timestamp: futureTimestamp, - }, + const futureTick = await tick({ memory: createVaultResult.Memory, + timestamp: futureTimestamp, }); // check the vault is pruned const prunedVault = await handle({ options: { Tags: [{ name: 'Action', value: 'Vault' }], + Timestamp: futureTimestamp, }, memory: futureTick.Memory, shouldAssertNoResultError: false, @@ -344,11 +300,13 @@ describe('Tick', async () => { { name: 'Action', value: 'Balance' }, { name: 'Target', value: DEFAULT_HANDLE_OPTIONS.Owner }, ], + Timestamp: futureTimestamp, }, memory: futureTick.Memory, }); const balanceData = JSON.parse(ownerBalance.Messages[0].Data); assert.equal(balanceData, balanceBeforeData); + sharedMemory = ownerBalance.Memory; }); /** @@ -368,6 +326,7 @@ describe('Tick', async () => { const initialMemory = await transfer({ recipient: STUB_ADDRESS, quantity: 100_000_000_000, + memory: sharedMemory, }); const delegateAddress = 'delegate-address-'.padEnd(43, '1'); @@ -401,36 +360,26 @@ describe('Tick', async () => { assertNoResultError(newDelegateResult); // fast forward to the start of the first epoch - const epochSettings = await handle({ - options: { - Tags: [{ name: 'Action', value: 'Epoch-Settings' }], - }, + const epochSettings = await getEpochSettings({ + memory: newDelegateResult.Memory, + timestamp: delegateTimestamp, }); - const epochSettingsData = JSON.parse(epochSettings.Messages?.[0]?.Data); - const genesisEpochTimestamp = epochSettingsData.epochZeroStartTimestamp; + const genesisEpochTimestamp = epochSettings.epochZeroStartTimestamp; // now tick to create the first epoch after the epoch start timestamp const createEpochTimestamp = genesisEpochTimestamp + 1; - const newEpochTick = await handle({ - options: { - Timestamp: createEpochTimestamp, // one millisecond after the epoch start timestamp, should create the epoch and set the prescribed observers and names - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Force-Prune', value: 'true' }, // simply exercise this though it's not critical to the test - ], - }, + const newEpochTick = await tick({ memory: newDelegateResult.Memory, + timestamp: createEpochTimestamp, + forcePrune: true, }); // assert no error tag assertNoResultError(newEpochTick); // assert the new epoch is created - const epoch = await handle({ - options: { - Timestamp: createEpochTimestamp, // one millisecond after the epoch start timestamp - Tags: [{ name: 'Action', value: 'Epoch' }], - }, - memory: newEpochTick.Memory, + const epochData = await getEpoch({ + memory: newEpochTick.memory, + timestamp: createEpochTimestamp, }); // get the epoch timestamp and assert it is in 24 hours @@ -442,7 +391,6 @@ describe('Tick', async () => { (totalGatewayRewards + totalObserverRewards) / 1; // only one gateway in the network const expectedGatewayOperatorReward = totalEligibleGatewayRewards * 0.75; // 75% of the eligible rewards go to the operator const expectedGatewayDelegateReward = totalEligibleGatewayRewards * 0.25; // 25% of the eligible rewards go to the delegates - const epochData = JSON.parse(epoch.Messages[0].Data); assert.deepStrictEqual(epochData, { epochIndex: 0, startHeight: 1, @@ -456,19 +404,18 @@ describe('Tick', async () => { }, prescribedObservers: [ { - // TODO: we could just return the addresses here observerAddress: STUB_ADDRESS, + gatewayAddress: STUB_ADDRESS, + stakeWeight: 3, + gatewayRewardRatioWeight: 1, observerRewardRatioWeight: 1, + compositeWeight: 12, normalizedCompositeWeight: 1, - gatewayRewardRatioWeight: 1, - gatewayAddress: STUB_ADDRESS, - stake: INITIAL_OPERATOR_STAKE * 3, tenureWeight: 4, - compositeWeight: 12, - startTimestamp: 21600000, - stakeWeight: 3, + stake: INITIAL_OPERATOR_STAKE, + startTimestamp: STUB_TIMESTAMP, }, - ], // the only gateway in the network + ], prescribedNames: [], // no names in the network distributions: { totalEligibleGateways: 1, @@ -491,20 +438,11 @@ describe('Tick', async () => { // have the gateway submit an observation const reportTxId = 'report-tx-id-'.padEnd(43, '1'); const observationTimestamp = createEpochTimestamp + 7 * 1000 * 60 * 60; // 7 hours after the epoch start timestamp - const observation = await handle({ - options: { - From: STUB_ADDRESS, - Owner: STUB_ADDRESS, - Timestamp: observationTimestamp, - Tags: [ - { name: 'Action', value: 'Save-Observations' }, - { - name: 'Report-Tx-Id', - value: reportTxId, - }, - ], - }, - memory: epoch.Memory, + const observation = await saveObservations({ + memory: newEpochTick.memory, + timestamp: observationTimestamp, + from: STUB_ADDRESS, + reportTxId, }); // assert no error tag @@ -512,33 +450,21 @@ describe('Tick', async () => { // now jump ahead to the epoch distribution timestamp const distributionTimestamp = epochData.distributionTimestamp; - const distributionTick = await handle({ - options: { - Tags: [{ name: 'Action', value: 'Tick' }], - Timestamp: distributionTimestamp, - }, - memory: observation.Memory, + const distributionTick = await tick({ + memory: observation.memory, + timestamp: distributionTimestamp, }); // assert no error tag assertNoResultError(distributionTick); - // check the rewards were distributed correctly - const rewards = await handle({ - options: { - Timestamp: distributionTimestamp, - Tags: [ - { name: 'Action', value: 'Epoch' }, - { - name: 'Epoch-Index', - value: '0', - }, - ], - }, - memory: distributionTick.Memory, + // check the rewards were distributed correctly and weights are updated + const distributedEpochData = await getEpoch({ + memory: distributionTick.memory, + timestamp: distributionTimestamp, + epochIndex: 0, }); - const distributedEpochData = JSON.parse(rewards.Messages[0].Data); assert.deepStrictEqual(distributedEpochData, { ...epochData, distributions: { @@ -559,21 +485,27 @@ describe('Tick', async () => { [STUB_ADDRESS]: reportTxId, }, }, + prescribedObservers: [ + { + ...epochData.prescribedObservers[0], + stake: INITIAL_OPERATOR_STAKE + expectedGatewayOperatorReward, + compositeWeight: 22, + stakeWeight: 5.5, + }, + ], }); // assert the new epoch was created - const newEpoch = await handle({ - options: { - Tags: [{ name: 'Action', value: 'Epoch' }], - Timestamp: distributionTimestamp, - }, - memory: distributionTick.Memory, + const newEpoch = await getEpoch({ + memory: distributionTick.memory, + timestamp: distributionTimestamp, + epochIndex: 1, }); - const newEpochData = JSON.parse(newEpoch.Messages[0].Data); - assert.equal(newEpochData.epochIndex, 1); + assert.equal(newEpoch.epochIndex, 1); // assert the gateway stakes were updated and match the distributed rewards const gateway = await getGateway({ - memory: distributionTick.Memory, + memory: distributionTick.memory, address: STUB_ADDRESS, + timestamp: distributionTimestamp, }); assert.deepStrictEqual(gateway, { status: 'joined', @@ -613,8 +545,9 @@ describe('Tick', async () => { }); const delegateItems = await getDelegatesItems({ - memory: distributionTick.Memory, + memory: distributionTick.memory, gatewayAddress: STUB_ADDRESS, + timestamp: distributionTimestamp, }); assert.deepEqual(delegateItems, [ { @@ -623,6 +556,7 @@ describe('Tick', async () => { address: delegateAddress, }, ]); + sharedMemory = distributionTick.memory; }); it('should not increase demandFactor and baseRegistrationFee when records are bought until the end of the epoch', async () => { @@ -651,6 +585,7 @@ describe('Tick', async () => { recipient: fundedUser, quantity: 100_000_000_000_000, memory: genesisEpochTick.Memory, + timestamp: genesisEpochStart, }); // Buy records in this epoch @@ -662,6 +597,7 @@ describe('Tick', async () => { name: `test-name-${i}`, purchaseType: 'permabuy', processId: processId, + timestamp: genesisEpochStart, }); buyRecordMemory = buyRecordResult.Memory; } @@ -709,6 +645,7 @@ describe('Tick', async () => { timestamp: firstEpochEndTimestamp + 1, }); assert.equal(firstEpochEndDemandFactorResult, 1.0500000000000000444); + sharedMemory = firstEpochEndTick.Memory; }); it('should reset to baseRegistrationFee when demandFactor is 0.5 for consecutive epochs', async () => { @@ -731,17 +668,11 @@ describe('Tick', async () => { // Tick to the epoch where demandFactor is 0.5 for (let i = 0; i <= 49; i++) { const epochTimestamp = genesisEpochStart + (epochDurationMs + 1) * i; - const { Memory } = await handle({ - options: { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: epochTimestamp.toString() }, - ], - Timestamp: epochTimestamp, - }, + const { result: tickResult } = await tick({ memory: tickMemory, + timestamp: epochTimestamp, }); - tickMemory = Memory; + tickMemory = tickResult.Memory; if (i === 45) { const demandFactor = await getDemandFactor({ @@ -773,5 +704,6 @@ describe('Tick', async () => { assert.equal(demandFactorAfterFeeAdjustment, 1); assert.equal(baseFeeAfterConsecutiveTicksWithNoPurchases, 300_000_000); + sharedMemory = tickMemory; }); }); diff --git a/tests/transfer.test.mjs b/tests/transfer.test.mjs index c558c8d6..19db3280 100644 --- a/tests/transfer.test.mjs +++ b/tests/transfer.test.mjs @@ -1,9 +1,19 @@ import { handle, startMemory } from './helpers.mjs'; -import { describe, it } from 'node:test'; +import { afterEach, describe, it } from 'node:test'; import assert from 'node:assert'; -import { STUB_ADDRESS, PROCESS_OWNER } from '../tools/constants.mjs'; +import { + STUB_ADDRESS, + PROCESS_OWNER, + STUB_TIMESTAMP, +} from '../tools/constants.mjs'; +import { assertNoInvariants } from './invariants.mjs'; describe('Transfers', async () => { + let endingMemory; + afterEach(() => { + assertNoInvariants({ memory: endingMemory, timestamp: STUB_TIMESTAMP }); + }); + it('should transfer tokens to another wallet', async () => { const checkTransfer = async (recipient, sender, quantity) => { let mem = startMemory; @@ -59,6 +69,7 @@ describe('Transfers', async () => { const balances = JSON.parse(result.Messages[0].Data); assert.equal(balances[recipient], quantity); assert.equal(balances[sender], senderBalanceData - quantity); + return result.Memory; }; const arweave1 = STUB_ADDRESS; @@ -69,7 +80,7 @@ describe('Transfers', async () => { await checkTransfer(arweave1, arweave2, 100000000); await checkTransfer(eth1, arweave2, 100000000); - await checkTransfer(eth2, eth1, 100000000); + endingMemory = await checkTransfer(eth2, eth1, 100000000); }); it('should not transfer tokens to another wallet if the sender does not have enough tokens', async () => { @@ -107,6 +118,7 @@ describe('Transfers', async () => { // the new balance won't be defined assert.equal(balances[recipient] || 0, 0); assert.equal(balances[sender], senderBalanceData); + endingMemory = result.Memory; }); for (const allowUnsafeAddresses of [false, undefined]) { @@ -151,6 +163,7 @@ describe('Transfers', async () => { const balances = JSON.parse(result.Messages[0].Data); assert.equal(balances[recipient] || 0, 0); assert.equal(balances[sender], senderBalanceData); + endingMemory = result.Memory; }); } @@ -196,6 +209,7 @@ describe('Transfers', async () => { const balances = JSON.parse(result.Messages[0].Data); assert.equal(balances[recipient] || 0, 100000000); assert.equal(balances[sender], senderBalanceData - 100000000); + endingMemory = result.Memory; }); it('should not transfer when an invalid quantity is provided', async () => { @@ -232,5 +246,6 @@ describe('Transfers', async () => { const balances = JSON.parse(result.Messages[0].Data); assert.equal(balances[recipient] || 0, 0); assert.equal(balances[sender], senderBalanceData); + endingMemory = result.Memory; }); }); diff --git a/tests/vaults.test.mjs b/tests/vaults.test.mjs index 7f64658d..1340a0ae 100644 --- a/tests/vaults.test.mjs +++ b/tests/vaults.test.mjs @@ -18,6 +18,7 @@ import { assertNoInvariants } from './invariants.mjs'; describe('Vaults', async () => { let sharedMemory = startMemory; + let endingMemory; beforeEach(async () => { const { Memory: totalTokenSupplyMemory } = await totalTokenSupply({ memory: startMemory, @@ -28,7 +29,7 @@ describe('Vaults', async () => { afterEach(async () => { await assertNoInvariants({ timestamp: STUB_TIMESTAMP, - memory: sharedMemory, + memory: endingMemory, }); }); @@ -42,6 +43,7 @@ describe('Vaults', async () => { ], }, memory, + shouldAssertNoResultError: false, }); assertNoResultError(vault); // make sure it is a vault @@ -110,6 +112,7 @@ describe('Vaults', async () => { createVaultResultData.endTimestamp, createVaultResult.startTimestamp + lockLengthMs, ); + endingMemory = createVaultResult.Memory; }); it('should throw an error if vault size is too small', async () => { @@ -153,6 +156,7 @@ describe('Vaults', async () => { balanceAfterVault.Messages[0].Data, ); assert.deepEqual(balanceAfterVaultData, balanceBeforeData); + endingMemory = balanceAfterVault.Memory; }); }); @@ -211,6 +215,7 @@ describe('Vaults', async () => { createVaultResultData.balance, quantity, ); + endingMemory = extendVaultResult.Memory; }); }); @@ -272,6 +277,7 @@ describe('Vaults', async () => { increaseVaultBalanceResultData.balance, createVaultResultData.balance + quantity, ); + endingMemory = increaseVaultBalanceResult.Memory; }); }); @@ -329,6 +335,7 @@ describe('Vaults', async () => { createdVaultData.endTimestamp, STUB_TIMESTAMP + lockLengthMs, ); + endingMemory = createVaultedTransferResult.Memory; }); it('should fail if the vault size is too small', async () => { @@ -352,6 +359,7 @@ describe('Vaults', async () => { 'Invalid quantity. Must be integer greater than or equal to 100000000 mARIO', ), ); + endingMemory = createVaultedTransferResult.Memory; }); it('should fail if the recipient address is invalid and Allow-Unsafe-Addresses is not provided', async () => { @@ -372,6 +380,7 @@ describe('Vaults', async () => { ); assert.ok(errorTag); assert(errorTag.value.includes('Invalid recipient')); + endingMemory = createVaultedTransferResult.Memory; }); it('should create a vault for the recipient with an invalid address and Allow-Unsafe-Addresses is provided', async () => { @@ -400,6 +409,7 @@ describe('Vaults', async () => { createdVaultData.endTimestamp, STUB_TIMESTAMP + lockLengthMs, ); + endingMemory = createVaultedTransferResult.Memory; }); }); @@ -443,7 +453,7 @@ describe('Vaults', async () => { let cursor = ''; let fetchedVaults = []; while (true) { - const { result: paginatedVaultsResult } = await getVaults({ + const { result: paginatedVaultsResult, memory } = await getVaults({ memory: paginatedVaultMemory, cursor, limit: 1, @@ -460,6 +470,7 @@ describe('Vaults', async () => { assert.equal(hasMore, !!nextCursor); cursor = nextCursor; fetchedVaults.push(...items); + endingMemory = memory; if (!cursor) break; } @@ -485,7 +496,7 @@ describe('Vaults', async () => { let cursor = ''; let fetchedVaults = []; while (true) { - const { result: paginatedVaultsResult } = await getVaults({ + const { result: paginatedVaultsResult, memory } = await getVaults({ memory: paginatedVaultMemory, cursor, limit: 1, @@ -504,6 +515,7 @@ describe('Vaults', async () => { assert.equal(hasMore, !!nextCursor); cursor = nextCursor; fetchedVaults.push(...items); + endingMemory = memory; if (!cursor) break; } diff --git a/tools/constants.mjs b/tools/constants.mjs index 8af14cc8..b6ed8039 100644 --- a/tools/constants.mjs +++ b/tools/constants.mjs @@ -14,6 +14,7 @@ export const INITIAL_OPERATOR_STAKE = 10_000_000_000; // 10K ARIO export const INITIAL_DELEGATE_STAKE = 10_000_000; // 10K ARIO export const INITIAL_OWNER_BALANCE = 950_000_000_000_000; // 950M ARIO export const STUB_TIMESTAMP = 21600000; // 01-01-1970 00:00:00 +export const STUB_PROCESS_ID = 'process-id-stub-'.padEnd(43, '0'); export const STUB_MESSAGE_ID = ''.padEnd(43, 'm'); export const STUB_HASH_CHAIN = 'NGU1fq_ssL9m6kRbRU1bqiIDBht79ckvAwRMGElkSOg'; /* ao READ-ONLY Env Variables */ diff --git a/tools/evolve.mjs b/tools/evolve.mjs index f63970a2..a47d10f3 100644 --- a/tools/evolve.mjs +++ b/tools/evolve.mjs @@ -6,7 +6,7 @@ import { execSync } from 'child_process'; const wallet = JSON.parse(process.env.WALLET); const signer = createAoSigner(new ArweaveSigner(wallet)); const networkProcess = new AOProcess({ - processId: process.env.IO_NETWORK_PROCESS_ID, // TODO: Update to ARIO_NETWORK_PROCESS_ID + processId: process.env.ARIO_NETWORK_PROCESS_ID, ao: connect({ CU_URL: process.env.AO_CU_URL, }), diff --git a/yarn.lock b/yarn.lock index ee0d9ec6..6c52fdb4 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2,18 +2,20 @@ # yarn lockfile v1 -"@ar.io/sdk@alpha": - version "2.5.3-alpha.1" - resolved "https://registry.yarnpkg.com/@ar.io/sdk/-/sdk-2.5.3-alpha.1.tgz#f8ecbfd592dbe73850ee3bcd04fb33a16c3cf156" - integrity sha512-xPSBYJ+N/qQCwyH3UmTiPmhuTFAJwp8os7qnLARdwy4X1b3SlstNp6YVORBQlQVBmFPYpGD98PCReuHbId5xjA== +"@ar.io/sdk@^3.1.0-alpha.9": + version "3.1.0-alpha.9" + resolved "https://registry.yarnpkg.com/@ar.io/sdk/-/sdk-3.1.0-alpha.9.tgz#d6c148ec494afaf9f27523ba5be82b4ee1fb29c4" + integrity sha512-VQhI9XVNqQAjylRFNy/20glEuLOfZvuEeKrVsMX6JRDrYV3BvWeJBVG1swManaikRdEzENmCH0MwBkr6fjSVEA== dependencies: "@dha-team/arbundles" "^1.0.1" "@permaweb/aoconnect" "^0.0.57" arweave "1.14.4" - axios "1.7.7" + axios "1.7.9" axios-retry "^4.3.0" + commander "^12.1.0" eventemitter3 "^5.0.1" plimit-lit "^3.0.1" + prompts "^2.4.2" winston "^3.13.0" zod "^3.23.8" @@ -650,10 +652,10 @@ axios-retry@^4.3.0: dependencies: is-retry-allowed "^2.2.0" -axios@1.7.7: - version "1.7.7" - resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.7.tgz#2f554296f9892a72ac8d8e4c5b79c14a91d0a47f" - integrity sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q== +axios@1.7.9: + version "1.7.9" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.9.tgz#d7d071380c132a24accda1b2cfc1535b79ec650a" + integrity sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw== dependencies: follow-redirects "^1.15.6" form-data "^4.0.0" @@ -897,7 +899,7 @@ combined-stream@^1.0.8: dependencies: delayed-stream "~1.0.0" -commander@~12.1.0: +commander@^12.1.0, commander@~12.1.0: version "12.1.0" resolved "https://registry.yarnpkg.com/commander/-/commander-12.1.0.tgz#01423b36f501259fdaac4d0e4d60c96c991585d3" integrity sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA== @@ -1311,6 +1313,11 @@ keccak@^3.0.2: node-gyp-build "^4.2.0" readable-stream "^3.6.0" +kleur@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" + integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== + kuler@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/kuler/-/kuler-2.0.0.tgz#e2c570a3800388fb44407e851531c1d670b061b3" @@ -1610,6 +1617,14 @@ process@^0.11.10: resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== +prompts@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" + integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q== + dependencies: + kleur "^3.0.3" + sisteransi "^1.0.5" + proper-lockfile@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/proper-lockfile/-/proper-lockfile-4.1.2.tgz#c8b9de2af6b2f1601067f98e01ac66baa223141f" @@ -1775,6 +1790,11 @@ simple-swizzle@^0.2.2: dependencies: is-arrayish "^0.3.1" +sisteransi@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" + integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== + slice-ansi@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-5.0.0.tgz#b73063c57aa96f9cd881654b15294d95d285c42a"