From f9720f9acf8198ac4cb8a8e8813ffeb6693b52e2 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Mon, 2 Dec 2024 21:17:24 -0800 Subject: [PATCH 01/76] feat(balances): assert a positive transfer quantity PE-7219 --- src/balances.lua | 1 + src/vaults.lua | 1 + 2 files changed, 2 insertions(+) diff --git a/src/balances.lua b/src/balances.lua index a3f43ec4..fb6b54de 100644 --- a/src/balances.lua +++ b/src/balances.lua @@ -17,6 +17,7 @@ function balances.transfer(recipient, from, qty) assert(type(from) == "string", "From is required!") assert(type(qty) == "number", "Quantity is required and must be a number!") assert(utils.isInteger(qty), "Quantity must be an integer: " .. qty) + assert(qty > 0, "Quantity must be greater than 0") balances.reduceBalance(from, qty) balances.increaseBalance(recipient, qty) diff --git a/src/vaults.lua b/src/vaults.lua index b9317615..dd6a809e 100644 --- a/src/vaults.lua +++ b/src/vaults.lua @@ -50,6 +50,7 @@ end --- @param vaultId string The vault id --- @return Vault The created vault function vaults.vaultedTransfer(from, recipient, qty, lockLengthMs, currentTimestamp, vaultId) + assert(qty > 0, "Quantity must be greater than 0") assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") assert(not vaults.getVault(recipient, vaultId), "Vault with id " .. vaultId .. " already exists") assert( From be47f4b2da44530918242ac0680d8ac767f70fde Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 16:28:08 -0800 Subject: [PATCH 02/76] feat(base64): harden our implementation of base64 decoding PE-7219 --- spec/epochs_spec.lua | 6 +- src/base64.lua | 95 +++++++++---- src/base64_spec.lua | 307 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 381 insertions(+), 27 deletions(-) create mode 100644 src/base64_spec.lua diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 00f50c3c..a20a6ad5 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -137,7 +137,7 @@ describe("epochs", function() normalizedCompositeWeight = 1 / 3, }, { - gatewayAddress = "observer2", + gatewayAddress = "observer3", observerAddress = "observerAddress", stake = gar.getSettings().operators.minStake, startTimestamp = startTimestamp, @@ -159,7 +159,7 @@ describe("epochs", function() end) end) - describe("computePrescrbiedNamesForEpoch", function() + describe("computePrescribedNamesForEpoch", function() it("should return all eligible names if fewer than the maximum in name registry", function() _G.NameRegistry.records = { ["arns-name-1"] = { @@ -225,7 +225,7 @@ describe("epochs", function() undernameLimit = 10, }, } - local expectation = { "arns-name-1", "arns-name-2", "arns-name-3", "arns-name-4", "arns-name-5" } + local expectation = { "arns-name-1", "arns-name-3", "arns-name-4", "arns-name-5", "arns-name-6" } local status, result = pcall(epochs.computePrescribedNamesForEpoch, 0, hashchain) assert.is_true(status) assert.are.equal(5, #result) diff --git a/src/base64.lua b/src/base64.lua index b46bb87b..9650f8f2 100644 --- a/src/base64.lua +++ b/src/base64.lua @@ -200,45 +200,92 @@ function base64.decode(b64, decoder, usecaching) end pattern = ("[^%%w%%%s%%%s%%=]"):format(char(s62), char(s63)) end - b64 = b64:gsub(pattern, "") - local cache = usecaching and {} - local t, k = {}, 1 + + -- Remove whitespace and invalid characters + b64 = b64:gsub("[\n\r%s]", ""):gsub(pattern, "") + + -- Handle excessive padding + while #b64 % 4 ~= 0 do + if b64:sub(-1) == "=" then + b64 = b64:sub(1, -2) -- Remove the last character if it's '=' + else + break -- Stop if the last character is not '=' + end + end + + -- Truncate at invalid '=' characters + local eqPos = b64:find("=") + if eqPos and eqPos < #b64 then + -- Trim excessive '=' characters within the string + b64 = b64:sub(1, eqPos - 1) + end + + -- Ensure the length is a multiple of 4 local n = #b64 local padding = b64:sub(-2) == "==" and 2 or b64:sub(-1) == "=" and 1 or 0 - - -- Adjust length to be a multiple of 4 if n % 4 ~= 0 then b64 = b64 .. string.rep("=", 4 - (n % 4)) + padding = (4 - (n % 4)) % 4 -- Recalculate padding after adjustment n = #b64 end - for i = 1, padding > 0 and n - 4 or n, 4 do + local cache = usecaching and {} + local t, k = {}, 1 + + for i = 1, n - 4, 4 do local a, b, c, d = b64:byte(i, i + 3) local s - if usecaching then - local v0 = a * 0x1000000 + b * 0x10000 + c * 0x100 + d - s = cache[v0] - if not s then + + if a and b and c and d and decoder[a] and decoder[b] and decoder[c] and decoder[d] then + if usecaching then + local v0 = a * 0x1000000 + b * 0x10000 + c * 0x100 + d + s = cache[v0] + if not s then + local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + decoder[d] + s = char(extract(v, 16, 8), extract(v, 8, 8), extract(v, 0, 8)) + cache[v0] = s + end + else local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + decoder[d] s = char(extract(v, 16, 8), extract(v, 8, 8), extract(v, 0, 8)) - cache[v0] = s end - else - local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + decoder[d] - s = char(extract(v, 16, 8), extract(v, 8, 8), extract(v, 0, 8)) + t[k] = s + k = k + 1 end - t[k] = s - k = k + 1 end - if padding == 1 then - local a, b, c = b64:byte(n - 3, n - 1) - local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 - t[k] = char(extract(v, 16, 8), extract(v, 8, 8)) - elseif padding == 2 then - local a, b = b64:byte(n - 3, n - 2) - local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 - t[k] = char(extract(v, 16, 8)) + + -- Handle the final block (based on padding) + if padding > 0 then + local a, b, c = b64:byte(n - 3, n) + local v + if padding == 1 then + if a == 61 and b == 61 and c == 61 then + -- Invalid case: final block is entirely padding + return concat(t) + elseif decoder[a] and decoder[b] and decoder[c] then + v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + t[k] = char(extract(v, 16, 8), extract(v, 8, 8)) + end + elseif padding == 2 then + if a == 61 and b == 61 then + -- Invalid case: final block is entirely padding + return concat(t) + elseif decoder[a] and decoder[b] then + v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + t[k] = char(extract(v, 16, 8)) + end + end + else + local a, b, c, d = b64:byte(n - 3, n) + if decoder[a] and decoder[b] and decoder[c] and decoder[d] then + local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + decoder[d] + t[k] = char(extract(v, 16, 8), extract(v, 8, 8), extract(v, 0, 8)) + elseif decoder[a] and decoder[b] and decoder[c] then + local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + t[k] = char(extract(v, 16, 8), extract(v, 8, 8)) + end end + return concat(t) end diff --git a/src/base64_spec.lua b/src/base64_spec.lua new file mode 100644 index 00000000..e4be219b --- /dev/null +++ b/src/base64_spec.lua @@ -0,0 +1,307 @@ +local base64 = require("base64") + +describe("base64", function() + describe("decode", function() + describe("with the default base64 decoder", function() + it("should decode a standard base64 string without padding", function() + local input = "SGVsbG8gV29ybGQh" -- "Hello World!" in Base64 + local expected = "Hello World!" + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should decode a standard base64 string with single padding", function() + local input = "U29mdHdhcmU=" -- "Software" in Base64 + local expected = "Software" + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should decode a standard base64 string with double padding", function() + local input = "QQ==" -- "A" in Base64 + local expected = "A" + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should decode using a custom decoder", function() + local customDecoder = {} + for i = 0, 25 do + customDecoder[string.byte("A") + i] = i + customDecoder[string.byte("a") + i] = 26 + i + end + for i = 0, 9 do + customDecoder[string.byte("0") + i] = 52 + i + end + customDecoder[string.byte("-")] = 62 + customDecoder[string.byte("_")] = 63 -- Custom Base64 URL-safe alphabet + + local input = "SGVsbG8gV29ybGQh" + local expected = "Hello World!" + local result = base64.decode(input, customDecoder, false) + assert.are.equal(expected, result) + end) + + it("should handle invalid characters by stripping them out", function() + local input = "SGVsbG8g@@#%V29ybGQh" -- Invalid characters mixed in + local expected = "Hello World!" + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should work correctly with caching enabled", function() + local input = "SGVsbG8gV29ybGQh" -- "Hello World!" + local expected = "Hello World!" + local resultWithCaching = base64.decode(input, nil, true) + local resultWithoutCaching = base64.decode(input) + assert.are.equal(expected, resultWithCaching) + assert.are.equal(resultWithCaching, resultWithoutCaching) + end) + + it("should decode a long base64 string efficiently with caching", function() + local input = ("SGVsbG8g"):rep(1000) -- "Hello " repeated 1000 times + local expected = ("Hello "):rep(1000) + local result = base64.decode(input, nil, true) + assert.are.equal(expected, result) + end) + + it("should handle an empty string input", function() + local input = "" + local expected = "" + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should handle invalid Base64 padding gracefully", function() + local input = "SGVsbG8g===" -- Invalid triple padding + local expected = "Hello " -- Trims invalid padding and decodes valid part + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should handle invalid '=' characters mid-string by stopping at invalid segments", function() + local input = "SGVsb=G8gV29ybGQh" -- Invalid '=' mid-string + local expected = "Hel" -- Stops decoding at the invalid segment + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should handle trailing invalid characters gracefully", function() + local input = "SGVsbG8g@" + local expected = "Hello " -- Stops decoding at invalid character + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should handle Base64 strings without padding", function() + local input = "SGVsbG8" + local expected = "Hello" -- Decodes correctly without padding + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + + it("should handle Base64 strings with embedded whitespace", function() + local input = "SGVs\nbG8gV29y bGQh" + local expected = "Hello World!" -- Ignores whitespace and decodes correctly + local result = base64.decode(input) + assert.are.equal(expected, result) + end) + end) + + describe("with base64url decoder", function() + local URL_DECODER = base64.URL_DECODER + + it("should decode a standard base64url string without padding", function() + local input = "SGVsbG8gV29ybGQh" -- "Hello World!" in Base64url + local expected = "Hello World!" + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should decode a standard base64url string with single padding", function() + local input = "U29mdHdhcmU=" -- "Software" in Base64url + local expected = "Software" + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should decode a standard base64url string with double padding", function() + local input = "QQ==" -- "A" in Base64url + local expected = "A" + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should decode using a custom decoder with base64url-specific characters", function() + local input = "U29mLXdhX3N0YWtl" -- Example using '-' and '_' + local expected = "Sof-wa_stake" + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should handle invalid characters by stripping them out in base64url", function() + local input = "SGVsbG8g@@#%V29ybGQh" -- Invalid characters mixed in + local expected = "Hello World!" + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should work correctly with caching enabled for base64url", function() + local input = "SGVsbG8gV29ybGQh" -- "Hello World!" in Base64url + local expected = "Hello World!" + local resultWithCaching = base64.decode(input, URL_DECODER, true) + local resultWithoutCaching = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, resultWithCaching) + assert.are.equal(resultWithCaching, resultWithoutCaching) + end) + + it("should decode a long base64url string efficiently with caching", function() + local input = ("SGVsbG8g"):rep(1000) -- "Hello " repeated 1000 times + local expected = ("Hello "):rep(1000) + local result = base64.decode(input, URL_DECODER, true) + assert.are.equal(expected, result) + end) + + it("should handle an empty string input for base64url", function() + local input = "" + local expected = "" + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should handle invalid base64url padding gracefully", function() + local input = "SGVsbG8g===" -- Invalid triple padding + local expected = "Hello " -- Trims invalid padding and decodes valid part + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it( + "should handle invalid '=' characters mid-string by stopping at invalid segments in base64url", + function() + local input = "SGVsb=G8gV29ybGQh" -- Invalid '=' mid-string + local expected = "Hel" -- Stops decoding at the invalid segment + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end + ) + + it("should handle trailing invalid characters gracefully in base64url", function() + local input = "SGVsbG8g@" + local expected = "Hello " -- Stops decoding at invalid character + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should handle base64url strings without padding", function() + local input = "SGVsbG8" -- "Hello" without padding + local expected = "Hello" + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should handle base64url strings with embedded whitespace", function() + local input = "SGVs\nbG8gV29y bGQh" + local expected = "Hello World!" -- Ignores whitespace and decodes correctly + local result = base64.decode(input, URL_DECODER, false) + assert.are.equal(expected, result) + end) + + it("should handle excessive padding in malformed Base64 strings", function() + -- Input string with excessive padding ('==') causing it to be malformed + local input = "observer1c29tZSBzYW1wbGUgaGFzaA==" + + -- Expected result after trimming excessive '=' and decoding + local expectedHex = "a1bb1eaef7abd5cdbdb59481cd85b5c1b19481a185cda0" + + -- Decode the input string using the Base64 decoder + local decoded = base64.decode(input, base64.URL_DECODER) + + -- Convert the decoded binary data to a hexadecimal string + local function toHex(str) + return ( + str:gsub(".", function(c) + return string.format("%02x", string.byte(c)) -- Convert each character to lowercase hex + end) + ) + end + + local resultHex = toHex(decoded) + assert.are.same(expectedHex, resultHex, "Hex representation should match the expected value") + end) + end) + end) + + describe("base64.encode/decode", function() + it("should correctly encode and decode empty string", function() + local input = "" + local encoded = base64.encode(input) + local decoded = base64.decode(encoded) + assert.are.equal(input, decoded) + end) + + it("should correctly encode and decode short strings", function() + local testCases = { + ["f"] = "Zg==", + ["fo"] = "Zm8=", + ["foo"] = "Zm9v", + ["foob"] = "Zm9vYg==", + ["fooba"] = "Zm9vYmE=", + ["foobar"] = "Zm9vYmFy", + } + for input, expected in pairs(testCases) do + local encoded = base64.encode(input) + assert.are.equal(encoded, expected) + local decoded = base64.decode(encoded) + assert.are.equal(decoded, input) + end + end) + end) + + describe("base64.encode/decode with random data", function() + -- Generating random characters slows down the unit tests a bunch. Memoize all the chars we need. + local function memoizeRandomBytes(maxLength) + -- Generate a single long random string + local fullRandom = {} + for _ = 1, maxLength do + fullRandom[#fullRandom + 1] = string.char(math.random(0, 255)) + end + local fullRandomStr = table.concat(fullRandom) + + return function(length) + assert(length <= maxLength, "Requested length exceeds maximum memoized length") + -- Optionally use a random offset for randomness + local offset = math.random(0, maxLength - length) + return fullRandomStr:sub(offset + 1, offset + length) + end + end + + -- Memoized random generator for up to 64 bytes + local getRandomBytes = memoizeRandomBytes(128) + + it("should correctly handle random byte sequences", function() + -- Representative lengths for coverage + local lengths = { 2, 20, 128 } + for _, length in ipairs(lengths) do + local input = getRandomBytes(length) + local encoded = base64.encode(input) + local decoded = base64.decode(encoded) + assert.are.equal(input, decoded) + end + end) + end) + + describe("base64.encode/decode with invalid inputs", function() + local invalidInputs = { nil, 123, {}, true, function() end } + for _, input in ipairs(invalidInputs) do + it("should throw error for invalid input", function() + assert.has_error(function() + base64.encode(input) + end) + assert.has_error(function() + base64.decode(input) + end) + end) + end + end) +end) From 03bb6768320a28e4cdb364268777e6c797ae9fc3 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 16:42:13 -0800 Subject: [PATCH 03/76] feat(primary names): remove redundant assert PE-7219 --- src/primary_names.lua | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/primary_names.lua b/src/primary_names.lua index debad165..1ac45d0b 100644 --- a/src/primary_names.lua +++ b/src/primary_names.lua @@ -144,9 +144,6 @@ function primaryNames.approvePrimaryNameRequest(recipient, name, from, timestamp assert(record, "ArNS record '" .. baseName .. "' does not exist") assert(record.processId == from, "Primary name request must be approved by the owner of the base name") - -- assert the name matches the request - assert(request.name == name, "Provided name does not match the primary name request") - -- set the primary name local newPrimaryName = primaryNames.setPrimaryNameFromRequest(recipient, request, timestamp) return { From 98359c66b58e7ff0dbadac3ea8324563cc2cb90f Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 19:54:27 -0800 Subject: [PATCH 04/76] test(epochs): update test values to provide for stable sorting PE-7219 --- spec/epochs_spec.lua | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index a20a6ad5..d456d1b1 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -160,23 +160,26 @@ describe("epochs", function() end) describe("computePrescribedNamesForEpoch", function() + -- NOTE: Record names in the tests below use spelled out numbers because without that + -- there's insufficient base64url information encoded in the final encoded block to + -- disambiguate the decoded vallues. it("should return all eligible names if fewer than the maximum in name registry", function() _G.NameRegistry.records = { - ["arns-name-1"] = { + ["arns-name-one"] = { startTimestamp = startTimestamp, endTimestamp = startTimestamp + 60 * 1000 * 60 * 24 * 365, -- add a year type = "lease", purchasePrice = 0, undernameLimit = 10, }, - ["arns-name-2"] = { + ["arns-name-two"] = { startTimestamp = startTimestamp, type = "permabuy", purchasePrice = 0, undernameLimit = 10, }, } - local expectation = { "arns-name-1", "arns-name-2" } + local expectation = { "arns-name-two", "arns-name-one" } local status, result = pcall(epochs.computePrescribedNamesForEpoch, 0, hashchain) assert.is_true(status) assert.are.equal(2, #result) @@ -185,39 +188,39 @@ describe("epochs", function() it("should return a subset of eligible names if more than the maximum in the name registry", function() _G.NameRegistry.records = { - ["arns-name-1"] = { + ["arns-name-one"] = { startTimestamp = startTimestamp, endTimestamp = startTimestamp + 60 * 1000 * 60 * 24 * 365, -- add a year type = "lease", purchasePrice = 0, undernameLimit = 10, }, - ["arns-name-2"] = { + ["arns-name-two"] = { startTimestamp = startTimestamp, type = "permabuy", purchasePrice = 0, undernameLimit = 10, }, - ["arns-name-3"] = { + ["arns-name-three"] = { startTimestamp = startTimestamp, endTimestamp = startTimestamp + 60 * 1000 * 60 * 24 * 365, -- add a year type = "lease", purchasePrice = 0, undernameLimit = 10, }, - ["arns-name-4"] = { + ["arns-name-four"] = { startTimestamp = startTimestamp, type = "permabuy", purchasePrice = 0, undernameLimit = 10, }, - ["arns-name-5"] = { + ["arns-name-five"] = { startTimestamp = startTimestamp, type = "permabuy", purchasePrice = 0, undernameLimit = 10, }, - ["arns-name-6"] = { + ["arns-name-six"] = { startTimestamp = startTimestamp, endTimestamp = startTimestamp + 60 * 1000 * 60 * 24 * 365, -- add a year type = "lease", @@ -225,7 +228,8 @@ describe("epochs", function() undernameLimit = 10, }, } - local expectation = { "arns-name-1", "arns-name-3", "arns-name-4", "arns-name-5", "arns-name-6" } + local expectation = + { "arns-name-five", "arns-name-four", "arns-name-one", "arns-name-three", "arns-name-two" } local status, result = pcall(epochs.computePrescribedNamesForEpoch, 0, hashchain) assert.is_true(status) assert.are.equal(5, #result) From 185226b50522e01b91404f0ef373697137151aa8 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 19:54:51 -0800 Subject: [PATCH 05/76] feat(stream): basic asserts for stream functions PE-7219 --- src/crypto/util/stream.lua | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crypto/util/stream.lua b/src/crypto/util/stream.lua index 938205a9..bae35528 100644 --- a/src/crypto/util/stream.lua +++ b/src/crypto/util/stream.lua @@ -3,6 +3,7 @@ local Queue = require(".crypto.util.queue") local Stream = {} Stream.fromString = function(string) + assert(type(string) == "string", "expected string, got " .. type(string)) local i = 0 return function() i = i + 1 @@ -25,6 +26,7 @@ Stream.toString = function(stream) end Stream.fromArray = function(array) + assert(type(array) == "table", "expected table, got " .. type(array)) local queue = Queue() local i = 1 From 3c12ce8183b621333bd11b1e9c18864bc16367c3 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 19:58:38 -0800 Subject: [PATCH 06/76] feat(stream): basic asserts for fromHex function PE-7219 --- src/crypto/util/stream.lua | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crypto/util/stream.lua b/src/crypto/util/stream.lua index bae35528..a3f10259 100644 --- a/src/crypto/util/stream.lua +++ b/src/crypto/util/stream.lua @@ -61,6 +61,8 @@ for i = 0, 255 do end Stream.fromHex = function(hex) + assert(type(hex) == "string", "expected string, got " .. type(hex)) + assert(string.len(hex) % 2 == 0, "expected even-length string") local queue = Queue() for i = 1, string.len(hex) / 2 do From 850f91e9ded5f78405fc4ae7a6ccc69bda30c494 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 20:52:13 -0800 Subject: [PATCH 07/76] feat(auctions): basic assertions for new auctions PE-7219 --- src/auctions.lua | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/auctions.lua b/src/auctions.lua index 21057d82..5a20dc59 100644 --- a/src/auctions.lua +++ b/src/auctions.lua @@ -1,4 +1,5 @@ local Auction = {} +local utils = require("utils") -- Default Auction Settings AuctionSettings = { @@ -32,6 +33,13 @@ AuctionSettings = { --- @param registrationFeeCalculator function Function to calculate registration fee that supports type, baseFee, years, demandFactor --- @return Auction The new Auction instance function Auction:new(name, startTimestamp, demandFactor, baseFee, initiator, registrationFeeCalculator) + assert(type(name) == "string" and #name > 0, "name must be a string with length > 0") + -- TODO: Use new assertions for valid addresses from latest codebase on initiator + assert(utils.isInteger(startTimestamp) and startTimestamp > 0, "startTimestamp must be a positive integer") + assert(demandFactor > 0, "demandFactor must be a positive number") + assert(utils.isInteger(baseFee) and baseFee > 0, "baseFee must be a positive integer") + assert(type(registrationFeeCalculator) == "function", "registrationFeeCalculator must be a function") + local auction = { name = name, initiator = initiator, From 18815f800ae510e7961fbe7e342cb2e825fe5d7e Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 21:03:52 -0800 Subject: [PATCH 08/76] feat(auctions): assert minimum price interval ms PE-7219 --- src/auctions.lua | 5 +++++ src/constants.lua | 1 + 2 files changed, 6 insertions(+) diff --git a/src/auctions.lua b/src/auctions.lua index 5a20dc59..6fd2bb48 100644 --- a/src/auctions.lua +++ b/src/auctions.lua @@ -1,5 +1,6 @@ local Auction = {} local utils = require("utils") +local constants = require("constants") -- Default Auction Settings AuctionSettings = { @@ -66,6 +67,10 @@ end --- @param intervalMs number The interval in milliseconds, must be at least 15 minutes --- @return table A table of prices indexed by timestamp function Auction:computePricesForAuction(type, years, intervalMs) + assert( + utils.isInteger(intervalMs) and intervalMs >= constants.MIN_PRICE_INTERVAL_MS, + "intervalMs must be an integer >= " .. constants.MIN_PRICE_INTERVAL_MS .. "ms (15 minutes)" + ) local prices = {} for i = self.startTimestamp, self.endTimestamp, intervalMs do local priceAtTimestamp = self:getPriceForAuctionAtTimestamp(i, type, years) diff --git a/src/constants.lua b/src/constants.lua index 8bc63873..dca2a77a 100644 --- a/src/constants.lua +++ b/src/constants.lua @@ -101,5 +101,6 @@ constants.genesisFees = { [50] = 400000000, [51] = 400000000, } +constants.MIN_PRICE_INTERVAL_MS = 15 * 60 * 1000 -- 15 minutes return constants From c23dbbe3d913261898a060a90ba15c5ad989c7ae Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 21:08:21 -0800 Subject: [PATCH 09/76] feat(arns): basic assertion for name length PE-7219 --- src/demand.lua | 1 + 1 file changed, 1 insertion(+) diff --git a/src/demand.lua b/src/demand.lua index 873b6ba7..d3b92d07 100644 --- a/src/demand.lua +++ b/src/demand.lua @@ -57,6 +57,7 @@ end --- @param nameLength number The length of the name --- @return number #The base fee for the name length function demand.baseFeeForNameLength(nameLength) + assert(utils.isInteger(nameLength) and nameLength > 0, "nameLength must be a positive integer") return demand.getFees()[nameLength] end From 56194f9296c5a67a413d9f56f632963a29818b39 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 3 Dec 2024 21:11:14 -0800 Subject: [PATCH 10/76] feat(demand factoring): remove redundant check PE-7219 --- src/demand.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/demand.lua b/src/demand.lua index d3b92d07..91baa6ab 100644 --- a/src/demand.lua +++ b/src/demand.lua @@ -140,7 +140,7 @@ function demand.updateDemandFactor(timestamp) local settings = demand.getSettings() -- check that we have settings - if not demand.shouldUpdateDemandFactor(timestamp) or not settings then + if not settings then print("No settings found") return demand.getDemandFactor() end From b852c2b8f7da5893392eb6b192e53bc444521dda Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Thu, 5 Dec 2024 13:20:33 -0800 Subject: [PATCH 11/76] feat(epochs): save cycles when names count is equal to prescribed number PE-7238 --- src/epochs.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/epochs.lua b/src/epochs.lua index be81a2d6..c37f6297 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -181,7 +181,7 @@ function epochs.computePrescribedNamesForEpoch(epochIndex, hashchain) return nameAString < nameBString end) - if #activeArNSNames < epochs.getSettings().prescribedNameCount then + if #activeArNSNames <= epochs.getSettings().prescribedNameCount then return activeArNSNames end From 0e356790064f1a8b42615537237a74e80937a68d Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Thu, 5 Dec 2024 13:31:33 -0800 Subject: [PATCH 12/76] feat(epochs): remove redundant sort PE-7238 --- src/epochs.lua | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/epochs.lua b/src/epochs.lua index c37f6297..ac8dfe58 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -284,9 +284,6 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) end, {}) for address, _ in pairs(prescribedObserversAddressesLookup) do table.insert(prescribedObservers, filteredObserversAddressMap[address]) - table.sort(prescribedObservers, function(a, b) - return a.normalizedCompositeWeight > b.normalizedCompositeWeight - end) end -- sort them in place From 890e14610327b6c1ba3ce65dcff5341c9352ba92 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Thu, 5 Dec 2024 14:40:01 -0800 Subject: [PATCH 13/76] feat(utils): harden isInteger PE-7238 --- spec/utils_spec.lua | 117 ++++++++++++++++++++++++++++++++++++++------ src/utils.lua | 2 +- 2 files changed, 102 insertions(+), 17 deletions(-) diff --git a/spec/utils_spec.lua b/spec/utils_spec.lua index d0d38eb6..16fb85ef 100644 --- a/spec/utils_spec.lua +++ b/spec/utils_spec.lua @@ -775,22 +775,107 @@ describe("utils", function() end) describe("isInteger", function() - it("should return true for an integer", function() - local input = 1 - local result = utils.isInteger(input) - assert.is_true(result) - end) - - it("should return false for a non-integer", function() - local input = 1.234 - local result = utils.isInteger(input) - assert.is_false(result) - end) - - it("should convert string to integer", function() - local input = "1" - local result = utils.isInteger(input) - assert.is_true(result) + it("should return true for valid integers", function() + assert.is_true(utils.isInteger(0)) + assert.is_true(utils.isInteger(-1)) + assert.is_true(utils.isInteger(123456789)) + assert.is_true(utils.isInteger("0")) + assert.is_true(utils.isInteger("-1")) + assert.is_true(utils.isInteger("123456789")) + end) + + it("should return false for non-integer floating-point numbers", function() + assert.is_false(utils.isInteger(1.23)) + assert.is_false(utils.isInteger(-0.456)) + assert.is_false(utils.isInteger("1.23")) + assert.is_false(utils.isInteger("-0.456")) + end) + + it("should return true for integer floating-point numbers", function() + assert.is_true(utils.isInteger(1.0)) + assert.is_true(utils.isInteger(1.)) + assert.is_true(utils.isInteger(-100.0)) + assert.is_true(utils.isInteger(0.0)) + assert.is_true(utils.isInteger(-0.0)) + assert.is_true(utils.isInteger("1.0")) + assert.is_true(utils.isInteger("-100.0")) + assert.is_true(utils.isInteger("1.")) + end) + + it("should return true for integers in scientific notation", function() + assert.is_true(utils.isInteger("1e3")) -- 1000 + assert.is_true(utils.isInteger("-1e3")) -- -1000 + assert.is_true(utils.isInteger("1.0e3")) -- 1000 + assert.is_true(utils.isInteger("-1.0e3")) -- -1000 + assert.is_true(utils.isInteger("1.23e3")) -- 1230 + assert.is_true(utils.isInteger("-1.23e3")) -- -1230 + end) + + it("should return false for non-integers in scientific notation", function() + assert.is_false(utils.isInteger("1.23e-3")) -- 0.00123 + assert.is_false(utils.isInteger("-1.23e-3")) -- -0.00123 + end) + + it("should return true for hexadecimal integers and hexadecimal integer floats", function() + assert.is_true(utils.isInteger("0x1F")) -- 31 + assert.is_true(utils.isInteger("0xABC")) -- 2748 + assert.is_true(utils.isInteger("-0x10")) -- -16 + assert.is_true(utils.isInteger("0x1.8p3")) -- 12.0 + end) + + it("should return false for hexadecimal floats", function() + assert.is_false(utils.isInteger("-0x1.921fbp+1")) -- ~3.14 + end) + + it("should return false for invalid strings", function() + assert.is_false(utils.isInteger("123abc")) + assert.is_false(utils.isInteger("1.2.3")) + assert.is_false(utils.isInteger("1.0e--2")) + assert.is_false(utils.isInteger("abc")) + assert.is_false(utils.isInteger("")) + end) + + it("should handle edge cases for `inf` and `nan`", function() + assert.is_false(utils.isInteger(math.huge)) -- Infinity + assert.is_false(utils.isInteger(-math.huge)) -- -Infinity + assert.is_false(utils.isInteger(0 / 0)) -- NaN + assert.is_false(utils.isInteger("inf")) + assert.is_false(utils.isInteger("-inf")) + assert.is_false(utils.isInteger("nan")) + end) + + it("should handle large and small numbers", function() + assert.is_true(utils.isInteger("1.7976931348623157e+308")) -- Max finite value, treated as integer + assert.is_false(utils.isInteger("4.9406564584124654e-324")) -- Min positive subnormal value, not an integer + assert.is_false(utils.isInteger("-4.9406564584124654e-324")) + end) + + it("should handle negative zero", function() + assert.is_true(utils.isInteger(-0.0)) + assert.is_true(utils.isInteger("0.0")) + assert.is_true(utils.isInteger("-0.0")) + end) + + it("should handle numbers with leading zeros", function() + assert.is_true(utils.isInteger("000123")) + assert.is_true(utils.isInteger("000000")) + assert.is_true(utils.isInteger("-000456")) + end) + + it("should return false for non-numbers and non-integer strings", function() + assert.is_false(utils.isInteger({})) + assert.is_false(utils.isInteger(nil)) + assert.is_false(utils.isInteger(true)) + assert.is_false(utils.isInteger(false)) + assert.is_false(utils.isInteger(function() end)) + assert.is_false(utils.isInteger("true")) + assert.is_false(utils.isInteger("false")) + assert.is_false(utils.isInteger("foo")) + assert.is_false(utils.isInteger("1.234")) + assert.is_false(utils.isInteger("1.0e-10")) + assert.is_false(utils.isInteger("1.0e")) -- not a valid lua number + assert.is_false(utils.isInteger("1.0e-")) -- not a valid lua number + assert.is_false(utils.isInteger("1.0e+")) -- not a valid lua number end) end) end) diff --git a/src/utils.lua b/src/utils.lua index 12fadadf..67a862a0 100644 --- a/src/utils.lua +++ b/src/utils.lua @@ -14,7 +14,7 @@ function utils.isInteger(value) if type(value) == "string" then value = tonumber(value) end - return value % 1 == 0 + return type(value) == "number" and value % 1 == 0 end --- Rounds a number to a given precision From e359dbf700f0cd92eb0c6a58fe308a3f3183ff45 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Thu, 5 Dec 2024 14:44:40 -0800 Subject: [PATCH 14/76] feat(utils): harden sumTableValues PE-7238 --- src/utils.lua | 1 + 1 file changed, 1 insertion(+) diff --git a/src/utils.lua b/src/utils.lua index 67a862a0..47c1e7d5 100644 --- a/src/utils.lua +++ b/src/utils.lua @@ -31,6 +31,7 @@ end function utils.sumTableValues(tbl) local sum = 0 for _, value in pairs(tbl) do + assert(type(value) == "number", "Table values must be numbers. Found: " .. type(value)) sum = sum + value end return sum From 747d22800e824e1ae0ce21beb95c41899bb8c041 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Thu, 5 Dec 2024 14:51:49 -0800 Subject: [PATCH 15/76] feat(utils): harden splitString PE-7238 --- spec/utils_spec.lua | 6 ++++++ src/utils.lua | 12 ++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/spec/utils_spec.lua b/spec/utils_spec.lua index 16fb85ef..9d97e071 100644 --- a/spec/utils_spec.lua +++ b/spec/utils_spec.lua @@ -377,6 +377,12 @@ describe("utils", function() local result = utils.splitAndTrimString(input) assert.are.same({ "apple", "banana", "cherry" }, result) end) + + it("should handle regex characters as delimiters", function() + local input = "apple|banana.cherry[date]eggplant" + local result = utils.splitAndTrimString(input, "[|.]") + assert.are.same({ "apple", "banana", "cherry", "date", "eggplant" }, result) + end) end) describe("createLookupTable", function() diff --git a/src/utils.lua b/src/utils.lua index 47c1e7d5..679c4e75 100644 --- a/src/utils.lua +++ b/src/utils.lua @@ -411,12 +411,20 @@ function utils.getHashFromBase64URL(str) return crypto.digest.sha2_256(hashStream).asBytes() end +--- Escapes Lua pattern characters in a string +--- @param str string The string to escape +--- @return string # The escaped string +local function escapePattern(str) + return (str:gsub("([%^%$%(%)%%%.%[%]%*%+%-%?])", "%%%1")) +end + --- Splits a string by a delimiter --- @param input string The string to split ---- @param delimiter string The delimiter to split by ---- @return table The split string +--- @param delimiter string|nil The delimiter to split by +--- @return table # The split string function utils.splitString(input, delimiter) delimiter = delimiter or "," + delimiter = escapePattern(delimiter) local result = {} for token in (input or ""):gmatch(string.format("([^%s]+)", delimiter)) do table.insert(result, token) From c315d720b7eef323ce7052245294619624baa8fd Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Thu, 5 Dec 2024 14:54:40 -0800 Subject: [PATCH 16/76] feat(vaults): ensure quantities are positive values PE-7238 --- src/vaults.lua | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/vaults.lua b/src/vaults.lua index dd6a809e..f2d7f510 100644 --- a/src/vaults.lua +++ b/src/vaults.lua @@ -21,6 +21,7 @@ local constants = require("constants") --- @param vaultId string The vault id --- @return Vault The created vault function vaults.createVault(from, qty, lockLengthMs, currentTimestamp, vaultId) + assert(qty > 0, "Quantity must be greater than 0") assert(not vaults.getVault(from, vaultId), "Vault with id " .. vaultId .. " already exists") assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") assert( @@ -102,6 +103,7 @@ end --- @param currentTimestamp number The current timestamp --- @return Vault The increased vault function vaults.increaseVault(from, qty, vaultId, currentTimestamp) + assert(qty > 0, "Quantity must be greater than 0") assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") local vault = vaults.getVault(from, vaultId) From f3b2b3175c81378b52a8816b955124f3abc03366 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Thu, 5 Dec 2024 14:59:03 -0800 Subject: [PATCH 17/76] feat(funding plans): ensure funding plans applied fully PE-7238 --- src/arns.lua | 3 +++ src/primary_names.lua | 1 + 2 files changed, 4 insertions(+) diff --git a/src/arns.lua b/src/arns.lua index 77546b0f..c03d0335 100644 --- a/src/arns.lua +++ b/src/arns.lua @@ -286,6 +286,7 @@ function arns.increaseundernameLimit(from, name, qty, currentTimestamp, msgId, f local fundingPlan = gar.getFundingPlan(from, additionalUndernameCost, fundFrom) assert(fundingPlan and fundingPlan.shortfall == 0 or false, "Insufficient balances") local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) + assert(fundingResult.totalFunded == additionalUndernameCost, "Funding plan application failed") -- update the record with the new undername count arns.modifyRecordundernameLimit(name, qty) @@ -738,6 +739,7 @@ function arns.upgradeRecord(from, name, currentTimestamp, msgId, fundFrom) local fundingPlan = gar.getFundingPlan(from, upgradeCost, fundFrom) assert(fundingPlan and fundingPlan.shortfall == 0 or false, "Insufficient balances") local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) + assert(fundingResult.totalFunded == upgradeCost, "Funding plan application failed") balances.increaseBalance(ao.id, upgradeCost) demand.tallyNamePurchase(upgradeCost) @@ -883,6 +885,7 @@ function arns.submitAuctionBid(name, bidAmount, bidder, timestamp, processId, ty -- apply the funding plan local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, timestamp) + assert(fundingResult.totalFunded == finalBidAmount, "Funding plan application failed") local record = { processId = processId, diff --git a/src/primary_names.lua b/src/primary_names.lua index 1ac45d0b..beaa8d96 100644 --- a/src/primary_names.lua +++ b/src/primary_names.lua @@ -72,6 +72,7 @@ function primaryNames.createPrimaryNameRequest(name, initiator, timestamp, msgId local fundingPlan = gar.getFundingPlan(initiator, PRIMARY_NAME_COST, fundFrom) assert(fundingPlan and fundingPlan.shortfall == 0, "Insufficient balances") local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, timestamp) + assert(fundingResult.totalFunded == PRIMARY_NAME_COST, "Funding plan application failed") --- transfer the primary name cost from the initiator to the protocol balance balances.increaseBalance(ao.id, PRIMARY_NAME_COST) From 537243f4459b18c464b5710e235bbe7bcf535ec9 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 12 Dec 2024 15:31:00 -0600 Subject: [PATCH 18/76] refactor: ensure no vaultedTransfer/transfer to ones self PE-7289 --- src/balances.lua | 1 + src/vaults.lua | 1 + 2 files changed, 2 insertions(+) diff --git a/src/balances.lua b/src/balances.lua index fb6b54de..fa8c7e1c 100644 --- a/src/balances.lua +++ b/src/balances.lua @@ -16,6 +16,7 @@ function balances.transfer(recipient, from, qty) assert(type(recipient) == "string", "Recipient is required!") assert(type(from) == "string", "From is required!") assert(type(qty) == "number", "Quantity is required and must be a number!") + assert(recipient ~= from, "Cannot transfer to self") assert(utils.isInteger(qty), "Quantity must be an integer: " .. qty) assert(qty > 0, "Quantity must be greater than 0") diff --git a/src/vaults.lua b/src/vaults.lua index f2d7f510..bfe445b4 100644 --- a/src/vaults.lua +++ b/src/vaults.lua @@ -52,6 +52,7 @@ end --- @return Vault The created vault function vaults.vaultedTransfer(from, recipient, qty, lockLengthMs, currentTimestamp, vaultId) assert(qty > 0, "Quantity must be greater than 0") + assert(recipient ~= from, "Cannot transfer to self") assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") assert(not vaults.getVault(recipient, vaultId), "Vault with id " .. vaultId .. " already exists") assert( From acfe43f449fc6ad6d675a1fa883bf0f102bdd94c Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 12 Dec 2024 15:54:44 -0600 Subject: [PATCH 19/76] refactor: assert a fee was found for the name length PE-7289 --- src/demand.lua | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/demand.lua b/src/demand.lua index 91baa6ab..cb2ec0ab 100644 --- a/src/demand.lua +++ b/src/demand.lua @@ -58,7 +58,9 @@ end --- @return number #The base fee for the name length function demand.baseFeeForNameLength(nameLength) assert(utils.isInteger(nameLength) and nameLength > 0, "nameLength must be a positive integer") - return demand.getFees()[nameLength] + local fee = demand.getFees()[nameLength] + assert(fee, "No fee found for name length: " .. nameLength) + return fee end --- Gets the moving average of trailing purchase counts From 5a1f454c7fbfe328db3aa39311c8efb920562f91 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 12 Dec 2024 16:24:55 -0600 Subject: [PATCH 20/76] refactor: assert token cost years and quantity values as described PE-7289 --- src/main.lua | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/main.lua b/src/main.lua index ea7f4e80..77bca213 100644 --- a/src/main.lua +++ b/src/main.lua @@ -870,12 +870,14 @@ function assertTokenCostTags(msg) assert(msg.Tags.Name, "Name is required") -- if years is provided, assert it is a number and integer between 1 and 5 if msg.Tags.Years then - assert(utils.isInteger(msg.Tags.Years), "Invalid years. Must be integer between 1 and 5") + assert(utils.isInteger(msg.Tags.Years), "Invalid years. Must be integer") + assert(msg.Tags.Years > 0 and msg.Tags.Years < 6, "Invalid years. Must be between 1 and 5") end -- if quantity provided must be a number and integer greater than 0 if msg.Tags.Quantity then - assert(utils.isInteger(msg.Tags.Quantity), "Invalid quantity. Must be integer greater than 0") + assert(utils.isInteger(msg.Tags.Quantity), "Invalid quantity. Must be integer") + assert(msg.Tags.Quantity > 0, "Invalid quantity. Must be greater than 0") end end From f79c8f7c617ab7213528cc80eef1bd9f319903a1 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 12 Dec 2024 16:58:34 -0600 Subject: [PATCH 21/76] refactor: handle value is nil in isInteger PE-7289 --- src/utils.lua | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/utils.lua b/src/utils.lua index 679c4e75..f7897c4b 100644 --- a/src/utils.lua +++ b/src/utils.lua @@ -11,6 +11,9 @@ end --- @param value any The value to check --- @return boolean isInteger - whether the value is an integer function utils.isInteger(value) + if value == nil then + return false + end if type(value) == "string" then value = tonumber(value) end From b193bc64f2087f040cc1def6c953c6b64e27cf3b Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Fri, 13 Dec 2024 12:05:40 -0600 Subject: [PATCH 22/76] refactor: use vault helpers checking for existing vaults to prevent loss of funds PE-7289 --- src/gar.lua | 186 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 138 insertions(+), 48 deletions(-) diff --git a/src/gar.lua b/src/gar.lua index f217fdcd..675e9662 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -187,21 +187,19 @@ function gar.leaveNetwork(from, currentTimestamp, msgId) -- if the slash happens to be 100% we do not need to vault anything if minimumStakedTokens > 0 then - gateway.vaults[from] = { - balance = minimumStakedTokens, - startTimestamp = currentTimestamp, - endTimestamp = gatewayEndTimestamp, - } + gar.createGatewayWithdrawVault(gateway, from, minimumStakedTokens, currentTimestamp, gatewayEndTimestamp) -- if there is more than the minimum staked tokens, we need to vault the rest but on shorter term local remainingStake = gateway.operatorStake - gar.getSettings().operators.minStake if remainingStake > 0 then - gateway.vaults[msgId] = { - balance = remainingStake, - startTimestamp = currentTimestamp, - endTimestamp = gatewayStakeWithdrawTimestamp, - } + gar.createGatewayWithdrawVault( + gateway, + msgId, + remainingStake, + currentTimestamp, + gatewayStakeWithdrawTimestamp + ) end end @@ -299,11 +297,13 @@ function gar.decreaseOperatorStake(from, qty, currentTimestamp, msgId, instantWi -- Calculate the penalty and withdraw using the utility function expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, from) else - gateway.vaults[msgId] = { - balance = qty, - startTimestamp = currentTimestamp, - endTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs, - } + gar.createGatewayWithdrawVault( + gateway, + msgId, + qty, + currentTimestamp, + currentTimestamp + gar.getSettings().operators.withdrawLengthMs + ) end -- Update the gateway @@ -607,11 +607,7 @@ function gar.decreaseDelegateStake(gatewayAddress, delegator, qty, currentTimest -- Calculate the penalty and withdraw using the utility function and move the balances expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, delegator) else - -- Withdraw the delegate's stake - local newDelegateVault = gar.createDelegateVault(qty, currentTimestamp) - - -- Lock the qty in a vault to be unlocked after withdrawal period and decrease the gateway's total delegated stake - gateway.delegates[delegator].vaults[messageId] = newDelegateVault + gar.createDelegateWithdrawVault(gateway, delegator, messageId, qty, currentTimestamp) end decreaseDelegateStakeAtGateway(delegator, gateway, qty) @@ -906,18 +902,17 @@ function gar.pruneGateways(currentTimestamp, msgId) -- first, return any expired vaults regardless of the gateway status for vaultId, vault in pairs(gateway.vaults) do if vault.endTimestamp <= currentTimestamp then - balances.increaseBalance(address, vault.balance) + gar.fulfillGatewayWithdrawVault(gateway, address, vaultId) + result.gatewayStakeReturned = result.gatewayStakeReturned + vault.balance - gateway.vaults[vaultId] = nil end end -- return any delegated vaults and return the stake to the delegate for delegateAddress, delegate in pairs(gateway.delegates) do for vaultId, vault in pairs(delegate.vaults) do if vault.endTimestamp <= currentTimestamp then - balances.increaseBalance(delegateAddress, vault.balance) + gar.fulfillGatewayDelegateVault(gateway, delegateAddress, vaultId) result.delegateStakeReturned = result.delegateStakeReturned + vault.balance - delegate.vaults[vaultId] = nil end end end @@ -1068,12 +1063,9 @@ function gar.cancelGatewayWithdrawal(from, gatewayAddress, vaultId) local previousTotalDelegatedStake = gateway.totalDelegatedStake local vaultBalance = existingVault.balance if isGatewayWithdrawal then - gateway.vaults[vaultId] = nil - gateway.operatorStake = gateway.operatorStake + vaultBalance + gar.cancelGatewayWithdrawVault(gateway, vaultId) else - assert(gar.delegateAllowedToStake(from, gateway), "This Gateway does not allow this delegate to stake.") - delegate.vaults[vaultId] = nil - increaseDelegateStakeAtGateway(delegate, gateway, vaultBalance) + gar.cancelGatewayDelegateVault(gateway, from, vaultId) end GatewayRegistry[gatewayAddress] = gateway return { @@ -1263,7 +1255,7 @@ function gar.kickDelegateFromGateway(delegateAddress, gateway, msgId, currentTim local remainingStake = delegate.delegatedStake if remainingStake > 0 then - delegate.vaults[msgId] = gar.createDelegateVault(delegate.delegatedStake, currentTimestamp) + gar.createDelegateWithdrawVault(gateway, delegateAddress, msgId, remainingStake, currentTimestamp) end decreaseDelegateStakeAtGateway(delegateAddress, gateway, remainingStake, ban) end @@ -1555,8 +1547,13 @@ function gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) -- create an exit vault for the remaining stake if less than the gateway's minimum if delegate.delegatedStake > 0 and delegate.delegatedStake < gateway.settings.minDelegatedStake then - -- create a vault for the remaining stake - delegate.vaults[msgId] = gar.createDelegateVault(delegate.delegatedStake, currentTimestamp) + gar.createDelegateWithdrawVault( + gateway, + fundingPlan.address, + msgId, + delegate.delegatedStake, + currentTimestamp + ) decreaseDelegateStakeAtGateway(fundingPlan.address, gateway, delegate.delegatedStake) appliedPlan.newWithdrawVaults[gatewayAddress] = { [msgId] = utils.deepCopy(delegate.vaults[msgId]), @@ -1748,14 +1745,7 @@ function gar.redelegateStake(params) "Quantity must be less than or equal to the vaulted stake amount." ) - if existingVault.balance == stakeToTakeFromSource then - -- The operator vault has been emptied - sourceGateway.vaults[vaultId] = nil - else - -- The operator vault has been partially emptied - sourceGateway.vaults[delegateAddress][vaultId].balance = sourceGateway.vaults[delegateAddress][vaultId].balance - - stakeToTakeFromSource - end + gar.reduceStakeFromGatewayVault(sourceGateway, stakeToTakeFromSource, vaultId) else -- Get the redelegation amount from the operator stakes local maxWithdraw = sourceGateway.operatorStake - gar.getSettings().operators.minStake @@ -1780,14 +1770,7 @@ function gar.redelegateStake(params) "Quantity must be less than or equal to the vaulted stake amount." ) - if existingVault.balance == stakeToTakeFromSource then - -- The vault has been emptied - sourceGateway.delegates[delegateAddress].vaults[vaultId] = nil - gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, sourceGateway) - else - -- The vault has been partially emptied - existingVault.balance = existingVault.balance - stakeToTakeFromSource - end + gar.reduceStakeFromDelegateVault(sourceGateway, delegateAddress, stakeToTakeFromSource, vaultId) else -- Check if the delegate has enough stake to redelegate assert( @@ -1905,4 +1888,111 @@ function gar.getPaginatedVaultsForGateway(gatewayAddress, cursor, limit, sortBy, ) end +--- @param gateway Gateway +--- @param vaultId WalletAddress | MessageId +--- @param qty mIO +--- @param currentTimestamp Timestamp +--- @param endTimestamp Timestamp +function gar.createGatewayWithdrawVault(gateway, vaultId, qty, currentTimestamp, endTimestamp) + assert(not gateway.vaults[vaultId], "Vault already exists") + + gateway.vaults[vaultId] = { + balance = qty, + startTimestamp = currentTimestamp, + endTimestamp = endTimestamp, + } +end + +--- @param gateway Gateway +--- @param delegateAddress WalletAddress +--- @param vaultId MessageId +--- @param qty mIO +--- @param currentTimestamp Timestamp +function gar.createDelegateWithdrawVault(gateway, delegateAddress, vaultId, qty, currentTimestamp) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + assert(not delegate.vaults[vaultId], "Vault already exists") + + -- Lock the qty in a vault to be unlocked after withdrawal period and decrease the gateway's total delegated stake + gateway.delegates[delegateAddress].vaults[vaultId] = gar.createDelegateVault(qty, currentTimestamp) +end + +---@param gateway Gateway +---@param vaultId MessageId +function gar.cancelGatewayWithdrawVault(gateway, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + gateway.vaults[vaultId] = nil + gateway.operatorStake = gateway.operatorStake + vault.balance +end + +---@param gateway Gateway +---@param gatewayAddress WalletAddress +---@param vaultId MessageId +function gar.fulfillGatewayWithdrawVault(gateway, gatewayAddress, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + balances.increaseBalance(gatewayAddress, vault.balance) + gateway.vaults[vaultId] = nil +end + +---@param gateway Gateway +---@param delegateAddress WalletAddress +function gar.cancelGatewayDelegateVault(gateway, delegateAddress, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + assert(gar.delegateAllowedToStake(delegateAddress, gateway), "This Gateway does not allow this delegate to stake.") + + gateway.delegates[delegateAddress].vaults[vaultId] = nil + increaseDelegateStakeAtGateway(delegate, gateway, vault.balance) +end + +---@param gateway Gateway +---@param delegateAddress WalletAddress +function gar.fulfillGatewayDelegateVault(gateway, delegateAddress, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + + balances.increaseBalance(delegateAddress, vault.balance) + gateway.delegates[delegateAddress] = nil + decreaseDelegateStakeAtGateway(delegateAddress, gateway, vault.balance) +end + +--- @param gateway Gateway +--- @param qty mIO +--- @param vaultId MessageId +function gar.reduceStakeFromGatewayVault(gateway, qty, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + assert(qty <= vault.balance, "Insufficient balance in vault") + + if qty == vault.balance then + gateway.vaults[vaultId] = nil + else + gateway.vaults[vaultId].balance = vault.balance - qty + end +end + +--- @param gateway Gateway +--- @param delegateAddress WalletAddress +--- @param vaultId MessageId +function gar.reduceStakeFromDelegateVault(gateway, delegateAddress, qty, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + assert(qty <= vault.balance, "Insufficient balance in vault") + + if qty == vault.balance then + gateway.delegates[delegateAddress].vaults[vaultId] = nil + gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) + else + gateway.delegates[delegateAddress].vaults[vaultId].balance = vault.balance - qty + end +end + return gar From f19231f21d2929b46271c7efcb41b7c9b549fb63 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Fri, 13 Dec 2024 12:47:35 -0600 Subject: [PATCH 23/76] test: re-locate base64 unit test file PE-7289 --- {src => spec}/base64_spec.lua | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {src => spec}/base64_spec.lua (100%) diff --git a/src/base64_spec.lua b/spec/base64_spec.lua similarity index 100% rename from src/base64_spec.lua rename to spec/base64_spec.lua From 86e82a544bb22d134260d359a90f86e8c6626c3d Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Fri, 13 Dec 2024 13:20:13 -0600 Subject: [PATCH 24/76] refactor: remove unnecessary constant in reintegration PE-7289 --- src/constants.lua | 1 - 1 file changed, 1 deletion(-) diff --git a/src/constants.lua b/src/constants.lua index cf0f425b..1b9f7f64 100644 --- a/src/constants.lua +++ b/src/constants.lua @@ -121,7 +121,6 @@ constants.genesisFees = { [50] = 400000000, [51] = 400000000, } -constants.MIN_PRICE_INTERVAL_MS = 15 * 60 * 1000 -- 15 minutes -- General constants.MIN_UNSAFE_ADDRESS_LENGTH = 1 From 349dcfbe37e321eaee82c917a8fdaea1ee6774fb Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Mon, 16 Dec 2024 15:53:25 -0600 Subject: [PATCH 25/76] refactor: assert reduceBalance > 0 PE-7289 --- src/balances.lua | 2 ++ src/epochs.lua | 4 +++- src/gar.lua | 6 ++++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/balances.lua b/src/balances.lua index fa8c7e1c..47867be4 100644 --- a/src/balances.lua +++ b/src/balances.lua @@ -48,6 +48,8 @@ end ---@throws error If target has insufficient balance function balances.reduceBalance(target, qty) assert(balances.walletHasSufficientBalance(target, qty), "Insufficient balance") + assert(qty > 0, "Quantity must be greater than 0") + local prevBalance = balances.getBalance(target) Balances[target] = prevBalance - qty end diff --git a/src/epochs.lua b/src/epochs.lua index ac8dfe58..e2661109 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -672,7 +672,9 @@ function epochs.distributeRewardsForEpoch(currentTimestamp) delegateAddress, actualDelegateReward ) - balances.reduceBalance(ao.id, actualDelegateReward) + if actualDelegateReward > 0 then + balances.reduceBalance(ao.id, actualDelegateReward) + end end -- increment the total distributed totalDistributed = math.floor(totalDistributed + actualDelegateReward) diff --git a/src/gar.lua b/src/gar.lua index 675e9662..e5ffdb6e 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -1506,8 +1506,10 @@ function gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) } -- draw down balance first - balances.reduceBalance(fundingPlan.address, fundingPlan.balance) - appliedPlan.totalFunded = appliedPlan.totalFunded + fundingPlan.balance + if fundingPlan.balance > 0 then + balances.reduceBalance(fundingPlan.address, fundingPlan.balance) + appliedPlan.totalFunded = appliedPlan.totalFunded + fundingPlan.balance + end --draw down stakes and vaults, creating withdraw vaults if necessary for gatewayAddress, delegationPlan in pairs(fundingPlan.stakes) do From 0db2adbb4e34397353726a43eb7bb96af9bc9738 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Mon, 16 Dec 2024 16:23:02 -0600 Subject: [PATCH 26/76] refactor: use local functions for vault helpers PE-7289 --- src/gar.lua | 52 ++++++++++++++++++++-------------------------------- 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/src/gar.lua b/src/gar.lua index e5ffdb6e..3fad878d 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -187,19 +187,13 @@ function gar.leaveNetwork(from, currentTimestamp, msgId) -- if the slash happens to be 100% we do not need to vault anything if minimumStakedTokens > 0 then - gar.createGatewayWithdrawVault(gateway, from, minimumStakedTokens, currentTimestamp, gatewayEndTimestamp) + createGatewayWithdrawVault(gateway, from, minimumStakedTokens, currentTimestamp, gatewayEndTimestamp) -- if there is more than the minimum staked tokens, we need to vault the rest but on shorter term local remainingStake = gateway.operatorStake - gar.getSettings().operators.minStake if remainingStake > 0 then - gar.createGatewayWithdrawVault( - gateway, - msgId, - remainingStake, - currentTimestamp, - gatewayStakeWithdrawTimestamp - ) + createGatewayWithdrawVault(gateway, msgId, remainingStake, currentTimestamp, gatewayStakeWithdrawTimestamp) end end @@ -297,7 +291,7 @@ function gar.decreaseOperatorStake(from, qty, currentTimestamp, msgId, instantWi -- Calculate the penalty and withdraw using the utility function expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, from) else - gar.createGatewayWithdrawVault( + createGatewayWithdrawVault( gateway, msgId, qty, @@ -607,7 +601,7 @@ function gar.decreaseDelegateStake(gatewayAddress, delegator, qty, currentTimest -- Calculate the penalty and withdraw using the utility function and move the balances expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, delegator) else - gar.createDelegateWithdrawVault(gateway, delegator, messageId, qty, currentTimestamp) + createDelegateWithdrawVault(gateway, delegator, messageId, qty, currentTimestamp) end decreaseDelegateStakeAtGateway(delegator, gateway, qty) @@ -902,7 +896,7 @@ function gar.pruneGateways(currentTimestamp, msgId) -- first, return any expired vaults regardless of the gateway status for vaultId, vault in pairs(gateway.vaults) do if vault.endTimestamp <= currentTimestamp then - gar.fulfillGatewayWithdrawVault(gateway, address, vaultId) + fulfillGatewayWithdrawVault(gateway, address, vaultId) result.gatewayStakeReturned = result.gatewayStakeReturned + vault.balance end @@ -911,7 +905,7 @@ function gar.pruneGateways(currentTimestamp, msgId) for delegateAddress, delegate in pairs(gateway.delegates) do for vaultId, vault in pairs(delegate.vaults) do if vault.endTimestamp <= currentTimestamp then - gar.fulfillGatewayDelegateVault(gateway, delegateAddress, vaultId) + fulfillGatewayDelegateVault(gateway, delegateAddress, vaultId) result.delegateStakeReturned = result.delegateStakeReturned + vault.balance end end @@ -1063,9 +1057,9 @@ function gar.cancelGatewayWithdrawal(from, gatewayAddress, vaultId) local previousTotalDelegatedStake = gateway.totalDelegatedStake local vaultBalance = existingVault.balance if isGatewayWithdrawal then - gar.cancelGatewayWithdrawVault(gateway, vaultId) + cancelGatewayWithdrawVault(gateway, vaultId) else - gar.cancelGatewayDelegateVault(gateway, from, vaultId) + cancelGatewayDelegateVault(gateway, from, vaultId) end GatewayRegistry[gatewayAddress] = gateway return { @@ -1255,7 +1249,7 @@ function gar.kickDelegateFromGateway(delegateAddress, gateway, msgId, currentTim local remainingStake = delegate.delegatedStake if remainingStake > 0 then - gar.createDelegateWithdrawVault(gateway, delegateAddress, msgId, remainingStake, currentTimestamp) + createDelegateWithdrawVault(gateway, delegateAddress, msgId, remainingStake, currentTimestamp) end decreaseDelegateStakeAtGateway(delegateAddress, gateway, remainingStake, ban) end @@ -1549,13 +1543,7 @@ function gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) -- create an exit vault for the remaining stake if less than the gateway's minimum if delegate.delegatedStake > 0 and delegate.delegatedStake < gateway.settings.minDelegatedStake then - gar.createDelegateWithdrawVault( - gateway, - fundingPlan.address, - msgId, - delegate.delegatedStake, - currentTimestamp - ) + createDelegateWithdrawVault(gateway, fundingPlan.address, msgId, delegate.delegatedStake, currentTimestamp) decreaseDelegateStakeAtGateway(fundingPlan.address, gateway, delegate.delegatedStake) appliedPlan.newWithdrawVaults[gatewayAddress] = { [msgId] = utils.deepCopy(delegate.vaults[msgId]), @@ -1747,7 +1735,7 @@ function gar.redelegateStake(params) "Quantity must be less than or equal to the vaulted stake amount." ) - gar.reduceStakeFromGatewayVault(sourceGateway, stakeToTakeFromSource, vaultId) + reduceStakeFromGatewayVault(sourceGateway, stakeToTakeFromSource, vaultId) else -- Get the redelegation amount from the operator stakes local maxWithdraw = sourceGateway.operatorStake - gar.getSettings().operators.minStake @@ -1772,7 +1760,7 @@ function gar.redelegateStake(params) "Quantity must be less than or equal to the vaulted stake amount." ) - gar.reduceStakeFromDelegateVault(sourceGateway, delegateAddress, stakeToTakeFromSource, vaultId) + reduceStakeFromDelegateVault(sourceGateway, delegateAddress, stakeToTakeFromSource, vaultId) else -- Check if the delegate has enough stake to redelegate assert( @@ -1895,7 +1883,7 @@ end --- @param qty mIO --- @param currentTimestamp Timestamp --- @param endTimestamp Timestamp -function gar.createGatewayWithdrawVault(gateway, vaultId, qty, currentTimestamp, endTimestamp) +function createGatewayWithdrawVault(gateway, vaultId, qty, currentTimestamp, endTimestamp) assert(not gateway.vaults[vaultId], "Vault already exists") gateway.vaults[vaultId] = { @@ -1910,7 +1898,7 @@ end --- @param vaultId MessageId --- @param qty mIO --- @param currentTimestamp Timestamp -function gar.createDelegateWithdrawVault(gateway, delegateAddress, vaultId, qty, currentTimestamp) +function createDelegateWithdrawVault(gateway, delegateAddress, vaultId, qty, currentTimestamp) local delegate = gateway.delegates[delegateAddress] assert(delegate, "Delegate not found") assert(not delegate.vaults[vaultId], "Vault already exists") @@ -1921,7 +1909,7 @@ end ---@param gateway Gateway ---@param vaultId MessageId -function gar.cancelGatewayWithdrawVault(gateway, vaultId) +function cancelGatewayWithdrawVault(gateway, vaultId) local vault = gateway.vaults[vaultId] assert(vault, "Vault not found") gateway.vaults[vaultId] = nil @@ -1931,7 +1919,7 @@ end ---@param gateway Gateway ---@param gatewayAddress WalletAddress ---@param vaultId MessageId -function gar.fulfillGatewayWithdrawVault(gateway, gatewayAddress, vaultId) +function fulfillGatewayWithdrawVault(gateway, gatewayAddress, vaultId) local vault = gateway.vaults[vaultId] assert(vault, "Vault not found") balances.increaseBalance(gatewayAddress, vault.balance) @@ -1940,7 +1928,7 @@ end ---@param gateway Gateway ---@param delegateAddress WalletAddress -function gar.cancelGatewayDelegateVault(gateway, delegateAddress, vaultId) +function cancelGatewayDelegateVault(gateway, delegateAddress, vaultId) local delegate = gateway.delegates[delegateAddress] assert(delegate, "Delegate not found") local vault = delegate.vaults[vaultId] @@ -1953,7 +1941,7 @@ end ---@param gateway Gateway ---@param delegateAddress WalletAddress -function gar.fulfillGatewayDelegateVault(gateway, delegateAddress, vaultId) +function fulfillGatewayDelegateVault(gateway, delegateAddress, vaultId) local delegate = gateway.delegates[delegateAddress] assert(delegate, "Delegate not found") local vault = delegate.vaults[vaultId] @@ -1967,7 +1955,7 @@ end --- @param gateway Gateway --- @param qty mIO --- @param vaultId MessageId -function gar.reduceStakeFromGatewayVault(gateway, qty, vaultId) +function reduceStakeFromGatewayVault(gateway, qty, vaultId) local vault = gateway.vaults[vaultId] assert(vault, "Vault not found") assert(qty <= vault.balance, "Insufficient balance in vault") @@ -1982,7 +1970,7 @@ end --- @param gateway Gateway --- @param delegateAddress WalletAddress --- @param vaultId MessageId -function gar.reduceStakeFromDelegateVault(gateway, delegateAddress, qty, vaultId) +function reduceStakeFromDelegateVault(gateway, delegateAddress, qty, vaultId) local delegate = gateway.delegates[delegateAddress] assert(delegate, "Delegate not found") local vault = delegate.vaults[vaultId] From 7ff44c458ca916a481396b573ca7942ffb8cc3d8 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Mon, 16 Dec 2024 16:31:02 -0600 Subject: [PATCH 27/76] refactor: add exitVault helper, use unlock over fulfill PE-7289 --- src/gar.lua | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/gar.lua b/src/gar.lua index 3fad878d..497f360b 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -181,19 +181,17 @@ function gar.leaveNetwork(from, currentTimestamp, msgId) assert(gar.isGatewayEligibleToLeave(gateway, currentTimestamp), "The gateway is not eligible to leave the network.") local gatewayEndTimestamp = currentTimestamp + gar.getSettings().operators.leaveLengthMs - local gatewayStakeWithdrawTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs - local minimumStakedTokens = math.min(gar.getSettings().operators.minStake, gateway.operatorStake) -- if the slash happens to be 100% we do not need to vault anything if minimumStakedTokens > 0 then - createGatewayWithdrawVault(gateway, from, minimumStakedTokens, currentTimestamp, gatewayEndTimestamp) + createGatewayExitVault(gateway, minimumStakedTokens, currentTimestamp, from) -- if there is more than the minimum staked tokens, we need to vault the rest but on shorter term local remainingStake = gateway.operatorStake - gar.getSettings().operators.minStake if remainingStake > 0 then - createGatewayWithdrawVault(gateway, msgId, remainingStake, currentTimestamp, gatewayStakeWithdrawTimestamp) + createGatewayWithdrawVault(gateway, msgId, remainingStake, currentTimestamp) end end @@ -896,7 +894,7 @@ function gar.pruneGateways(currentTimestamp, msgId) -- first, return any expired vaults regardless of the gateway status for vaultId, vault in pairs(gateway.vaults) do if vault.endTimestamp <= currentTimestamp then - fulfillGatewayWithdrawVault(gateway, address, vaultId) + unlockGatewayWithdrawVault(gateway, address, vaultId) result.gatewayStakeReturned = result.gatewayStakeReturned + vault.balance end @@ -905,7 +903,7 @@ function gar.pruneGateways(currentTimestamp, msgId) for delegateAddress, delegate in pairs(gateway.delegates) do for vaultId, vault in pairs(delegate.vaults) do if vault.endTimestamp <= currentTimestamp then - fulfillGatewayDelegateVault(gateway, delegateAddress, vaultId) + unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) result.delegateStakeReturned = result.delegateStakeReturned + vault.balance end end @@ -1882,14 +1880,26 @@ end --- @param vaultId WalletAddress | MessageId --- @param qty mIO --- @param currentTimestamp Timestamp ---- @param endTimestamp Timestamp -function createGatewayWithdrawVault(gateway, vaultId, qty, currentTimestamp, endTimestamp) +function createGatewayWithdrawVault(gateway, vaultId, qty, currentTimestamp) assert(not gateway.vaults[vaultId], "Vault already exists") gateway.vaults[vaultId] = { balance = qty, startTimestamp = currentTimestamp, - endTimestamp = endTimestamp, + endTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs, + } +end + +--- @param gateway Gateway +--- @param qty mIO +--- @param currentTimestamp Timestamp +--- @param gatewayAddress WalletAddress +function createGatewayExitVault(gateway, qty, currentTimestamp, gatewayAddress) + assert(not gateway.vaults[gatewayAddress], "Exit Vault already exists") + gateway.vaults[gatewayAddress] = { + balance = qty, + startTimestamp = currentTimestamp, + endTimestamp = currentTimestamp + gar.getSettings().operators.leaveLengthMs, } end @@ -1919,7 +1929,7 @@ end ---@param gateway Gateway ---@param gatewayAddress WalletAddress ---@param vaultId MessageId -function fulfillGatewayWithdrawVault(gateway, gatewayAddress, vaultId) +function unlockGatewayWithdrawVault(gateway, gatewayAddress, vaultId) local vault = gateway.vaults[vaultId] assert(vault, "Vault not found") balances.increaseBalance(gatewayAddress, vault.balance) @@ -1941,7 +1951,7 @@ end ---@param gateway Gateway ---@param delegateAddress WalletAddress -function fulfillGatewayDelegateVault(gateway, delegateAddress, vaultId) +function unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) local delegate = gateway.delegates[delegateAddress] assert(delegate, "Delegate not found") local vault = delegate.vaults[vaultId] From df876d5b2c230ff9871eb930f4ebb3fb62e78696 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Mon, 16 Dec 2024 16:33:30 -0600 Subject: [PATCH 28/76] refactor: remove unnecessary param PE-7289 --- src/gar.lua | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/gar.lua b/src/gar.lua index 497f360b..153a7d9c 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -289,13 +289,7 @@ function gar.decreaseOperatorStake(from, qty, currentTimestamp, msgId, instantWi -- Calculate the penalty and withdraw using the utility function expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, from) else - createGatewayWithdrawVault( - gateway, - msgId, - qty, - currentTimestamp, - currentTimestamp + gar.getSettings().operators.withdrawLengthMs - ) + createGatewayWithdrawVault(gateway, msgId, qty, currentTimestamp) end -- Update the gateway From 652486feecfbc042fadb4a7331b3331275dcf747 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 17:12:49 -0600 Subject: [PATCH 29/76] fix(PE-7289): adds checksum of eth address when validating formatted ETH address --- spec/utils_spec.lua | 14 ++++++++++++++ src/utils.lua | 13 ++++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/spec/utils_spec.lua b/spec/utils_spec.lua index 9d97e071..79e2abfd 100644 --- a/spec/utils_spec.lua +++ b/spec/utils_spec.lua @@ -23,6 +23,20 @@ describe("utils", function() end) end) + describe("isValidUnformattedEthAddress", function() + it("should return true on a valid unformatted ETH address", function() + assert.is_true(utils.isValidUnformattedEthAddress(testEthAddress)) + end) + + it("should return false on a non-string value", function() + assert.is_false(utils.isValidUnformattedEthAddress(3)) + end) + + it("should return false on an invalid unformatted ETH address", function() + assert.is_false(utils.isValidUnformattedEthAddress("ZxFCAd0B19bB29D4674531d6f115237E16AfCE377C")) + end) + end) + describe("formatAddress", function() it("should format ETH address to lowercase", function() assert.is.equal(testEthAddress, utils.formatAddress(testEthAddress)) diff --git a/src/utils.lua b/src/utils.lua index f7897c4b..e5c29e61 100644 --- a/src/utils.lua +++ b/src/utils.lua @@ -255,11 +255,18 @@ function utils.isValidArweaveAddress(address) return type(address) == "string" and #address == 43 and string.match(address, "^[%w-_]+$") ~= nil end ---- Checks if an address is a valid Ethereum address +--- Checks if an address looks like an unformatted Ethereum address +--- @param address string The address to check +--- @return boolean isValidUnformattedEthAddress - whether the address is a valid unformatted Ethereum address +function utils.isValidUnformattedEthAddress(address) + return type(address) == "string" and #address == 42 and string.match(address, "^0x[%x]+$") ~= nil +end + +--- Checks if an address is a valid Ethereum address and is in EIP-55 checksum format --- @param address string The address to check --- @return boolean isValidEthAddress - whether the address is a valid Ethereum address function utils.isValidEthAddress(address) - return type(address) == "string" and #address == 42 and string.match(address, "^0x[%x]+$") ~= nil + return utils.isValidUnformattedEthAddress(address) and address == utils.formatEIP55Address(address) end --- Checks if an address is a valid AO address @@ -299,7 +306,7 @@ end --- @param address string The address to format --- @return string formattedAddress - the EIP-55 checksum formatted address function utils.formatAddress(address) - if utils.isValidEthAddress(address) then + if utils.isValidUnformattedEthAddress(address) then return utils.formatEIP55Address(address) end return address From 380728f3515f551e4b1a7a877bbd8f13c8da6cc4 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 20:11:03 -0600 Subject: [PATCH 30/76] fix(main): add global handler to sanitize and validate inputs --- src/main.lua | 133 ++++++++++++++++++++++++---------------- src/utils.lua | 27 ++++++++ tests/arns.test.mjs | 28 +++++++-- tests/gar.test.mjs | 25 ++++++-- tests/handlers.test.mjs | 7 ++- tests/helpers.mjs | 22 +++++-- tests/primary.test.mjs | 39 ++++++++++-- tests/tick.test.mjs | 30 +++++---- 8 files changed, 222 insertions(+), 89 deletions(-) diff --git a/src/main.lua b/src/main.lua index 77bca213..5384576e 100644 --- a/src/main.lua +++ b/src/main.lua @@ -21,7 +21,6 @@ Vaults = Vaults or {} GatewayRegistry = GatewayRegistry or {} NameRegistry = NameRegistry or {} Epochs = Epochs or {} -LastTickedEpochIndex = LastTickedEpochIndex or -1 local utils = require("utils") local json = require("json") @@ -120,6 +119,9 @@ LastKnownStakedSupply = LastKnownStakedSupply or 0 -- total operator stake acros LastKnownDelegatedSupply = LastKnownDelegatedSupply or 0 -- total delegated stake across all gateways LastKnownWithdrawSupply = LastKnownWithdrawSupply or 0 -- total withdraw supply across all gateways (gateways and delegates) LastKnownPnpRequestSupply = LastKnownPnpRequestSupply or 0 -- total supply stashed in outstanding Primary Name Protocol requests +LastTickedEpochIndex = LastTickedEpochIndex or -1 +LastKnownMessageTimestamp = LastKnownMessageTimestamp or 0 +LastKnownMessageId = LastKnownMessageId or "" local function lastKnownTotalTokenSupply() return LastKnownCirculatingSupply + LastKnownLockedSupply @@ -331,8 +333,71 @@ local function assertValidFundFrom(fundFrom) assert(validFundFrom[fundFrom], "Invalid fund from type. Must be one of: any, balance, stake") end -local function addEventingHandler(handlerName, pattern, handleFn, critical) +-- Sanitize inputs before every interaction +local function assertAndSanitizeInputs(msg) + assert( + msg.Timestamp and msg.Timestamp >= LastKnownMessageTimestamp, + "Timestamp must be greater than or equal to the last known message timestamp of " + .. LastKnownMessageTimestamp + .. " but was " + .. msg.Timestamp + ) + assert(msg.From, "From is required") + assert(msg.Id, "Id is required") + assert(msg.Tags and type(msg.Tags) == "table", "Tags are required") + + msg.Tags = utils.validateAndSanitizeInputs(msg.Tags) + msg.From = utils.formatAddress(msg.From) + msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) or tonumber(msg.Tags.Timestamp) or nil + + local knownAddressTags = { + "Recipient", + "Initiator", + "Target", + "Source", + "Address", + "Vault-Id", + "Process-Id", + "Observer-Address", + } + + for _, tagName in ipairs(knownAddressTags) do + -- Format all incoming addresses + msg.Tags[tagName] = msg.Tags[tagName] and utils.formatAddress(msg.Tags[tagName]) or nil + end + + local knownNumberTags = { + "Quantity", + "Lock-Length", + "Operator-Stake", + "Delegated-Stake", + "Withdraw-Stake", + "Timestamp", + "Years", + "Min-Delegated-Stake", + "Port", + "Extend-Length", + "Delegate-Reward-Share-Ratio", + "Epoch-Index", + "Price-Interval-Ms", + "Block-Height", + } + for _, tagName in ipairs(knownNumberTags) do + -- Format all incoming numbers + msg.Tags[tagName] = msg.Tags[tagName] and tonumber(msg.Tags[tagName]) or nil + end +end + +local function updateLastKnownMessage(msg) + if msg.Timestamp >= LastKnownMessageTimestamp then + LastKnownMessageTimestamp = msg.Timestamp + LastKnownMessageId = msg.Id + end +end + +local function addEventingHandler(handlerName, pattern, handleFn, critical, printEvent) critical = critical or false + printEvent = printEvent == nil and true or printEvent Handlers.add(handlerName, pattern, function(msg) -- add an IOEvent to the message if it doesn't exist msg.ioEvent = msg.ioEvent or IOEvent(msg) @@ -356,18 +421,24 @@ local function addEventingHandler(handlerName, pattern, handleFn, critical) local errorWithEvent = tostring(resultOrError) .. "\n" .. errorEvent:toJSON() error(errorWithEvent, 0) -- 0 ensures not to include this line number in the error message end - msg.ioEvent:printEvent() + if printEvent then + msg.ioEvent:printEvent() + end end) end --- prune state before every interaction +addEventingHandler("sanitize", function() + return "continue" +end, function(msg) + assertAndSanitizeInputs(msg) + updateLastKnownMessage(msg) +end, CRITICAL, false) + -- NOTE: THIS IS A CRITICAL HANDLER AND WILL DISCARD THE MEMORY ON ERROR addEventingHandler("prune", function() return "continue" -- continue is a pattern that matches every message and continues to the next handler that matches the tags end, function(msg) - local msgTimestamp = tonumber(msg.Timestamp or msg.Tags.Timestamp) - assert(msgTimestamp, "Timestamp is required for a tick interaction") - local epochIndex = epochs.getEpochIndexForTimestamp(msgTimestamp) + local epochIndex = epochs.getEpochIndexForTimestamp(msg.Timestamp) msg.ioEvent:addField("epochIndex", epochIndex) local previousStateSupplies = { @@ -380,50 +451,8 @@ end, function(msg) lastKnownRequestSupply = LastKnownPnpRequestSupply, lastKnownTotalSupply = lastKnownTotalTokenSupply(), } - - msg.From = utils.formatAddress(msg.From) - msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) or nil - - local knownAddressTags = { - "Recipient", - "Initiator", - "Target", - "Source", - "Address", - "Vault-Id", - "Process-Id", - "Observer-Address", - } - - for _, tagName in ipairs(knownAddressTags) do - -- Format all incoming addresses - msg.Tags[tagName] = msg.Tags[tagName] and utils.formatAddress(msg.Tags[tagName]) or nil - end - - local knownNumberTags = { - "Quantity", - "Lock-Length", - "Operator-Stake", - "Delegated-Stake", - "Withdraw-Stake", - "Timestamp", - "Years", - "Min-Delegated-Stake", - "Port", - "Extend-Length", - "Delegate-Reward-Share-Ratio", - "Epoch-Index", - "Price-Interval-Ms", - "Block-Height", - } - for _, tagName in ipairs(knownNumberTags) do - -- Format all incoming numbers - msg.Tags[tagName] = msg.Tags[tagName] and tonumber(msg.Tags[tagName]) or nil - end - - local msgId = msg.Id - print("Pruning state at timestamp: " .. msgTimestamp) - local prunedStateResult = prune.pruneState(msgTimestamp, msgId, LastGracePeriodEntryEndTimestamp) + print("Pruning state at timestamp: " .. msg.Timestamp) + local prunedStateResult = prune.pruneState(msg.Timestamp, msg.Id, LastGracePeriodEntryEndTimestamp) if prunedStateResult then local prunedRecordsCount = utils.lengthOfTable(prunedStateResult.prunedRecords or {}) @@ -501,7 +530,7 @@ end, function(msg) end return prunedStateResult -end, CRITICAL) +end, CRITICAL, false) -- Write handlers addEventingHandler(ActionMap.Transfer, utils.hasMatchingTag("Action", ActionMap.Transfer), function(msg) diff --git a/src/utils.lua b/src/utils.lua index e5c29e61..959c1ce4 100644 --- a/src/utils.lua +++ b/src/utils.lua @@ -568,4 +568,31 @@ function utils.filterDictionary(tbl, predicate) return filtered end +--- Sanitizes inputs to ensure they are valid strings +--- @param table table The table to sanitize +--- @return table sanitizedTable - the sanitized table +function utils.validateAndSanitizeInputs(table) + assert(type(table) == "table", "Table must be a table") + for key, value in pairs(table) do + assert(type(key) == "string", "Key must be a string") + assert( + type(value) == "string" or type(value) == "number" or type(value) == "boolean", + "Value must be a string, integer, or boolean" + ) + if type(value) == "string" then + assert(#key > 0, "Key cannot be empty") + assert(#value > 0, "Value cannot be empty") + assert(not string.match(key, "^%s+$"), "Key cannot be only whitespace") + assert(not string.match(value, "^%s+$"), "Value cannot be only whitespace") + end + if type(value) == "boolean" then + assert(value == true or value == false, "Boolean value must be true or false") + end + if type(value) == "number" then + assert(utils.isInteger(value), "Number must be an integer") + end + end + return table +end + return utils diff --git a/tests/arns.test.mjs b/tests/arns.test.mjs index 80668b18..993a8b99 100644 --- a/tests/arns.test.mjs +++ b/tests/arns.test.mjs @@ -328,7 +328,6 @@ describe('ArNS', async () => { { name: 'Quantity', value: `${650000000}` }, // delegate all of their balance { name: 'Address', value: STUB_OPERATOR_ADDRESS }, // our gateway address ], - Timestamp: STUB_TIMESTAMP + 1, }, memory, ); @@ -596,6 +595,7 @@ describe('ArNS', async () => { memory, transferQty: 700000000, // 600000000 for name purchase + 100000000 for extending the lease stakeQty: 650000000, // delegate most of their balance so that name purchase uses balance and stakes + timestamp: STUB_TIMESTAMP, }); memory = stakeResult.memory; @@ -612,6 +612,7 @@ describe('ArNS', async () => { { name: 'Process-Id', value: ''.padEnd(43, 'a') }, { name: 'Fund-From', value: 'any' }, ], + Timestamp: STUB_TIMESTAMP + 1, }, memory, ); @@ -625,6 +626,7 @@ describe('ArNS', async () => { { name: 'Action', value: 'Record' }, { name: 'Name', value: 'test-name' }, ], + Timestamp: STUB_TIMESTAMP + 1, }, buyRecordResult.Memory, ); @@ -641,6 +643,7 @@ describe('ArNS', async () => { { name: 'Years', value: '1' }, { name: 'Fund-From', value: 'any' }, ], + Timestamp: STUB_TIMESTAMP + 1, }, buyRecordResult.Memory, ); @@ -651,6 +654,7 @@ describe('ArNS', async () => { { name: 'Action', value: 'Record' }, { name: 'Name', value: 'test-name' }, ], + Timestamp: STUB_TIMESTAMP + 1, }, extendResult.Memory, ); @@ -727,6 +731,7 @@ describe('ArNS', async () => { transferQty: 3_100_000_000, // 60,000,0000 for name purchase + 2,500,000,000 for upgrading the name stakeQty: 3_100_000_000 - 50_000_000, // delegate most of their balance so that name purchase uses balance and stakes stakerAddress: STUB_ADDRESS, + timestamp: STUB_TIMESTAMP, }); memory = stakeResult.memory; @@ -833,6 +838,7 @@ describe('ArNS', async () => { }, releaseNameResult.Memory, ); + // assert no error tag const auctionErrorTag = auctionResult.Messages?.[0]?.Tags?.find( (tag) => tag.name === 'Error', @@ -999,6 +1005,7 @@ describe('ArNS', async () => { const balancesResult = await handle( { Tags: [{ name: 'Action', value: 'Balances' }], + Timestamp: bidTimestamp, }, submitBidResult.Memory, ); @@ -1008,6 +1015,7 @@ describe('ArNS', async () => { initialRecord.purchasePrice + expectedRewardForProtocol; const balances = JSON.parse(balancesResult.Messages[0].Data); + assert.equal(balances[initiator], expectedRewardForInitiator); assert.equal(balances[PROCESS_ID], expectedProtocolBalance); assert.equal(balances[bidderAddress], 0); @@ -1019,6 +1027,7 @@ describe('ArNS', async () => { processId: ''.padEnd(43, 'a'), type: 'lease', years: 1, + timestamp: STUB_TIMESTAMP, }); // tick the contract after the lease leaves its grace period @@ -1039,6 +1048,7 @@ describe('ArNS', async () => { { name: 'Action', value: 'Auction-Info' }, { name: 'Name', value: 'test-name' }, ], + Timestamp: futureTimestamp, }, tickResult.Memory, ); @@ -1089,7 +1099,7 @@ describe('ArNS', async () => { { name: 'Quantity', value: `${expectedPurchasePrice}` }, { name: 'Cast', value: true }, ], - Timestamp: bidTimestamp - 1, + Timestamp: bidTimestamp, }, tickResult.Memory, ); @@ -1109,6 +1119,7 @@ describe('ArNS', async () => { transferQty: 0, stakeQty: expectedPurchasePrice, stakerAddress: bidderAddress, + timestamp: bidTimestamp, }); memoryToUse = stakeResult.memory; } @@ -1230,6 +1241,7 @@ describe('ArNS', async () => { const balancesResult = await handle( { Tags: [{ name: 'Action', value: 'Balances' }], + Timestamp: bidTimestamp, }, submitBidResult.Memory, ); @@ -1499,6 +1511,7 @@ describe('ArNS', async () => { it('should paginate records correctly', async () => { // buy 3 records let buyRecordsMemory; // updated after each purchase + let lastTimestamp = STUB_TIMESTAMP; const recordsCount = 3; for (let i = 0; i < recordsCount; i++) { const buyRecordsResult = await handle( @@ -1508,11 +1521,12 @@ describe('ArNS', async () => { { name: 'Name', value: `test-name-${i}` }, { name: 'Process-Id', value: ''.padEnd(43, `${i}`) }, ], - Timestamp: STUB_TIMESTAMP + i * 1000, // order of names is based on timestamp + Timestamp: lastTimestamp + i * 1000, // order of names is based on timestamp }, buyRecordsMemory, ); buyRecordsMemory = buyRecordsResult.Memory; + lastTimestamp = lastTimestamp + i * 1000; } // call the paginated records handler repeatedly until all records are fetched @@ -1526,6 +1540,7 @@ describe('ArNS', async () => { { name: 'Cursor', value: cursor }, { name: 'Limit', value: 1 }, ], + Timestamp: lastTimestamp, }, buyRecordsMemory, ); @@ -1599,6 +1614,7 @@ describe('ArNS', async () => { { name: 'Action', value: 'Gateway' }, { name: 'Address', value: joinedGateway }, ], + Timestamp: afterDistributionTimestamp, }, firstTickAndDistribution.Memory, ); @@ -1618,7 +1634,7 @@ describe('ArNS', async () => { const transferMemory = await transfer({ recipient: nonEligibleAddress, quantity: 200_000_000_000, - timestamp: afterDistributionTimestamp - 1, + timestamp: afterDistributionTimestamp, memory: firstTickAndDistribution.Memory, }); arnsDiscountMemory = transferMemory; @@ -1663,6 +1679,7 @@ describe('ArNS', async () => { { name: 'Intent', value: 'Buy-Record' }, { name: 'Name', value: 'test-name' }, ], + Timestamp: afterDistributionTimestamp, }, arnsDiscountMemory, ); @@ -1856,6 +1873,7 @@ describe('ArNS', async () => { From: nonEligibleAddress, Owner: nonEligibleAddress, Tags: upgradeToPermabuyTags, + Timestamp: upgradeToPermabuyTimestamp, }, buyRecordResult.Memory, ); @@ -2005,7 +2023,7 @@ describe('ArNS', async () => { it('should not apply the discount on submit bid for a non-eligible gateway', async () => { const balanceBefore = await getBalance({ memory: expiredRecordMemory, - timestamp: submitBidTimestamp - 1, + timestamp: submitBidTimestamp, address: nonEligibleAddress, }); const result = await handle( diff --git a/tests/gar.test.mjs b/tests/gar.test.mjs index 633eaff9..d2c48360 100644 --- a/tests/gar.test.mjs +++ b/tests/gar.test.mjs @@ -30,7 +30,7 @@ describe('GatewayRegistry', async () => { const delegateStake = async ({ memory, - timestamp, + timestamp = STUB_TIMESTAMP, delegatorAddress, quantity, gatewayAddress, @@ -41,6 +41,7 @@ describe('GatewayRegistry', async () => { recipient: delegatorAddress, quantity, memory, + timestamp, }); const delegateResult = await handle( @@ -578,6 +579,7 @@ describe('GatewayRegistry', async () => { const gateway = await getGateway({ memory: sharedMemory, address: STUB_ADDRESS, + timestamp: STUB_TIMESTAMP, }); // leave at timestamp @@ -592,6 +594,7 @@ describe('GatewayRegistry', async () => { const leavingGateway = await getGateway({ memory: leaveNetworkMemory, address: STUB_ADDRESS, + timestamp: leavingTimestamp, }); assert.deepStrictEqual(leavingGateway, { ...gateway, @@ -605,6 +608,7 @@ describe('GatewayRegistry', async () => { await getGatewayVaultsItems({ memory: leaveNetworkMemory, gatewayAddress: STUB_ADDRESS, + timestamp: leavingTimestamp, }), [ { @@ -672,7 +676,6 @@ describe('GatewayRegistry', async () => { for (const delegateAddress of delegateAddresses) { const maybeDelegateResult = await delegateStake({ memory: nextMemory, - timestamp: STUB_TIMESTAMP, delegatorAddress: delegateAddress, quantity: 500_000_000, gatewayAddress: STUB_ADDRESS, @@ -976,6 +979,7 @@ describe('GatewayRegistry', async () => { const updatedGateway = await getGateway({ address: STUB_ADDRESS, memory: decreaseStakeMemory, + timestamp: decreaseTimestamp, }); assert.deepStrictEqual(updatedGateway, { ...gatewayBefore, @@ -985,6 +989,7 @@ describe('GatewayRegistry', async () => { await getGatewayVaultsItems({ memory: decreaseStakeMemory, gatewayAddress: STUB_ADDRESS, + timestamp: decreaseTimestamp, }), [ { @@ -1122,6 +1127,7 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: delegatedStakeMemory, + timestamp: delegationTimestamp, }); assert.deepStrictEqual(gatewayAfter, { ...gatewayBefore, @@ -1130,6 +1136,7 @@ describe('GatewayRegistry', async () => { const delegateItems = await getDelegatesItems({ memory: delegatedStakeMemory, gatewayAddress: STUB_ADDRESS, + timestamp: delegationTimestamp, }); assert.deepEqual( [ @@ -1161,6 +1168,7 @@ describe('GatewayRegistry', async () => { const gatewayBefore = await getGateway({ address: STUB_ADDRESS, memory: delegatedStakeMemory, + timestamp: STUB_TIMESTAMP, }); const { memory: decreaseStakeMemory } = await decreaseDelegateStake({ memory: delegatedStakeMemory, @@ -1174,6 +1182,7 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: decreaseStakeMemory, + timestamp: decreaseStakeTimestamp, }); assert.deepStrictEqual(gatewayAfter, { ...gatewayBefore, @@ -1182,6 +1191,7 @@ describe('GatewayRegistry', async () => { const delegateItems = await getDelegatesItems({ memory: decreaseStakeMemory, gatewayAddress: STUB_ADDRESS, + timestamp: decreaseStakeTimestamp, }); assert.deepEqual( [ @@ -1219,6 +1229,7 @@ describe('GatewayRegistry', async () => { const gatewayBefore = await getGateway({ address: STUB_ADDRESS, memory: delegatedStakeMemory, + timestamp: STUB_TIMESTAMP, }); const { memory: decreaseStakeMemory, result } = await decreaseDelegateStake({ @@ -1244,6 +1255,7 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: decreaseStakeMemory, + timestamp: decreaseStakeTimestamp, }); assert.deepStrictEqual(gatewayAfter, gatewayBefore); }); @@ -1287,6 +1299,7 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: cancelWithdrawalMemory, + timestamp: decreaseStakeTimestamp, }); // no changes to the gateway after a withdrawal is cancelled assert.deepStrictEqual(gatewayAfter, gatewayBefore); @@ -1320,6 +1333,7 @@ describe('GatewayRegistry', async () => { const gatewayAfter = await getGateway({ address: STUB_ADDRESS, memory: cancelWithdrawalMemory, + timestamp: decreaseStakeTimestamp, }); // no changes to the gateway after a withdrawal is cancelled assert.deepStrictEqual(gatewayAfter, gatewayBefore); @@ -1405,7 +1419,6 @@ describe('GatewayRegistry', async () => { const { memory: addGatewayMemory2 } = await joinNetwork({ address: secondGatewayAddress, memory: sharedMemory, - timestamp: STUB_TIMESTAMP - 1, }); let cursor; let fetchedGateways = []; @@ -1459,7 +1472,6 @@ describe('GatewayRegistry', async () => { const { memory: addGatewayMemory2 } = await joinNetwork({ address: secondGatewayAddress, memory: sharedMemory, - timestamp: STUB_TIMESTAMP - 1, }); // Stake to both gateways @@ -1503,6 +1515,7 @@ describe('GatewayRegistry', async () => { { name: 'Sort-Order', value: sortOrder }, ...(cursor ? [{ name: 'Cursor', value: `${cursor}` }] : []), ], + Timestamp: STUB_TIMESTAMP + 2, }, decreaseStakeMemory, ); @@ -1659,6 +1672,7 @@ describe('GatewayRegistry', async () => { const delegateItems = await getDelegatesItems({ memory: delegatedStakeMemory, gatewayAddress: sourceAddress, + timestamp: STUB_TIMESTAMP, }); assert.deepEqual( [ @@ -1784,6 +1798,7 @@ describe('GatewayRegistry', async () => { const delegateItems = await getDelegatesItems({ memory: delegatedStakeMemory, gatewayAddress: sourceAddress, + timestamp: STUB_TIMESTAMP, }); assert.deepStrictEqual( [ @@ -1827,6 +1842,7 @@ describe('GatewayRegistry', async () => { await getDelegatesItems({ memory: redelegateStakeMemory, gatewayAddress: targetAddress, + timestamp: STUB_TIMESTAMP + 2, }), [ { @@ -1848,6 +1864,7 @@ describe('GatewayRegistry', async () => { await getDelegatesItems({ memory: redelegateStakeMemory, gatewayAddress: sourceAddress, + timestamp: STUB_TIMESTAMP + 2, }), [], ); diff --git a/tests/handlers.test.mjs b/tests/handlers.test.mjs index f052ca22..49eaa280 100644 --- a/tests/handlers.test.mjs +++ b/tests/handlers.test.mjs @@ -33,15 +33,18 @@ describe('handlers', async () => { const { Handlers: handlersList } = JSON.parse(handlers.Messages[0].Data); assert.ok(handlersList.includes('_eval')); assert.ok(handlersList.includes('_default')); + assert.ok(handlersList.includes('sanitize')); assert.ok(handlersList.includes('prune')); const evalIndex = handlersList.indexOf('_eval'); const defaultIndex = handlersList.indexOf('_default'); + const sanitizeIndex = handlersList.indexOf('sanitize'); const pruneIndex = handlersList.indexOf('prune'); - const expectedHandlerCount = 70; // TODO: update this if more handlers are added + const expectedHandlerCount = 71; // TODO: update this if more handlers are added assert.ok(evalIndex === 0); assert.ok(defaultIndex === 1); - assert.ok(pruneIndex === 2); + assert.ok(sanitizeIndex === 2); + assert.ok(pruneIndex === 3); assert.ok( handlersList.length === expectedHandlerCount, 'should have ' + diff --git a/tests/helpers.mjs b/tests/helpers.mjs index 4dd40b6c..70e053e3 100644 --- a/tests/helpers.mjs +++ b/tests/helpers.mjs @@ -63,6 +63,7 @@ export const transfer = async ({ quantity = initialOperatorStake, memory = startMemory, cast = false, + timestamp = STUB_TIMESTAMP, } = {}) => { if (quantity === 0) { // Nothing to do @@ -79,6 +80,7 @@ export const transfer = async ({ { name: 'Quantity', value: quantity }, { name: 'Cast', value: cast }, ], + Timestamp: timestamp, }, memory, ); @@ -93,11 +95,11 @@ export const joinNetwork = async ({ tags = validGatewayTags, quantity = 100_000_000_000, }) => { - // give them the join network token amount const transferMemory = await transfer({ recipient: address, quantity, memory, + timestamp, }); const joinNetworkResult = await handle( { @@ -131,6 +133,7 @@ export const setUpStake = async ({ quantity: transferQty, memory, cast: true, + timestamp, }); // Stake a gateway for the user to delegate to @@ -138,7 +141,7 @@ export const setUpStake = async ({ memory, address: gatewayAddress, tags: gatewayTags, - timestamp: timestamp - 1, + timestamp: timestamp, }); assertNoResultError(joinNetworkResult); memory = joinNetworkResult.memory; @@ -219,11 +222,15 @@ export const getDelegates = async ({ }; }; -export const getDelegatesItems = async ({ memory, gatewayAddress }) => { +export const getDelegatesItems = async ({ + memory, + gatewayAddress, + timestamp = STUB_TIMESTAMP, +}) => { const { result } = await getDelegates({ memory, from: STUB_ADDRESS, - timestamp: STUB_TIMESTAMP, + timestamp, gatewayAddress, }); return JSON.parse(result.Messages?.[0]?.Data).items; @@ -254,13 +261,18 @@ export const getVaults = async ({ }; }; -export const getGatewayVaultsItems = async ({ memory, gatewayAddress }) => { +export const getGatewayVaultsItems = async ({ + memory, + gatewayAddress, + timestamp = STUB_TIMESTAMP, +}) => { const gatewayVaultsResult = await handle( { Tags: [ { name: 'Action', value: 'Paginated-Gateway-Vaults' }, { name: 'Address', value: gatewayAddress }, ], + Timestamp: timestamp, }, memory, ); diff --git a/tests/primary.test.mjs b/tests/primary.test.mjs index 8786f71d..cff53d70 100644 --- a/tests/primary.test.mjs +++ b/tests/primary.test.mjs @@ -15,6 +15,7 @@ describe('primary names', function () { type = 'permabuy', years = 1, memory, + timestamp = STUB_TIMESTAMP, }) => { const buyRecordResult = await handle( { @@ -25,6 +26,7 @@ describe('primary names', function () { { name: 'Years', value: years }, { name: 'Process-Id', value: processId }, ], + Timestamp: timestamp, }, memory, ); @@ -48,6 +50,7 @@ describe('primary names', function () { recipient: caller, quantity: 100000000, // primary name cost memory, + timestamp, }); memory = transferMemory; } @@ -98,11 +101,17 @@ describe('primary names', function () { }; }; - const removePrimaryNames = async ({ names, caller, memory }) => { + const removePrimaryNames = async ({ + names, + caller, + memory, + timestamp = STUB_TIMESTAMP, + }) => { const removePrimaryNamesResult = await handle( { From: caller, Owner: caller, + Timestamp: timestamp, Tags: [ { name: 'Action', value: 'Remove-Primary-Names' }, { name: 'Names', value: names.join(',') }, @@ -121,6 +130,7 @@ describe('primary names', function () { address, memory, assert = true, + timestamp = STUB_TIMESTAMP, }) => { const getPrimaryNameResult = await handle( { @@ -128,6 +138,7 @@ describe('primary names', function () { { name: 'Action', value: 'Primary-Name' }, { name: 'Address', value: address }, ], + Timestamp: timestamp, }, memory, ); @@ -140,13 +151,18 @@ describe('primary names', function () { }; }; - const getOwnerOfPrimaryName = async ({ name, memory }) => { + const getOwnerOfPrimaryName = async ({ + name, + memory, + timestamp = STUB_TIMESTAMP, + }) => { const getOwnerResult = await handle( { Tags: [ { name: 'Action', value: 'Primary-Name' }, { name: 'Name', value: name }, ], + Timestamp: timestamp, }, memory, ); @@ -160,9 +176,11 @@ describe('primary names', function () { it('should allow creating and approving a primary name for an existing base name when the recipient is not the base name owner and is funding from stakes', async function () { const processId = ''.padEnd(43, 'a'); const recipient = ''.padEnd(43, 'b'); + const requestTimestamp = 1234567890; const { memory: buyRecordMemory } = await buyRecord({ name: 'test-name', processId, + timestamp: requestTimestamp, }); const stakeResult = await setUpStake({ @@ -170,12 +188,13 @@ describe('primary names', function () { stakerAddress: recipient, transferQty: 550000000, stakeQty: 500000000, + timestamp: requestTimestamp, }); const { result: requestPrimaryNameResult } = await requestPrimaryName({ name: 'test-name', caller: recipient, - timestamp: 1234567890, + timestamp: requestTimestamp, memory: stakeResult.memory, fundFrom: 'stakes', }); @@ -225,6 +244,7 @@ describe('primary names', function () { await getPrimaryNameForAddress({ address: recipient, memory: approvePrimaryNameRequestResult.Memory, + timestamp: approvedTimestamp, }); const primaryNameLookupResult = JSON.parse( @@ -236,6 +256,7 @@ describe('primary names', function () { const { result: ownerOfPrimaryNameResult } = await getOwnerOfPrimaryName({ name: 'test-name', memory: approvePrimaryNameRequestResult.Memory, + timestamp: approvedTimestamp, }); const ownerResult = JSON.parse(ownerOfPrimaryNameResult.Messages[0].Data); @@ -244,9 +265,11 @@ describe('primary names', function () { it('should immediately approve a primary name for an existing base name when the caller of the request is the base name owner', async function () { const processId = ''.padEnd(43, 'a'); + const requestTimestamp = 1234567890; const { memory: buyRecordMemory } = await buyRecord({ name: 'test-name', processId, + timestamp: requestTimestamp, }); const approvalTimestamp = 1234567899; @@ -303,6 +326,7 @@ describe('primary names', function () { await getPrimaryNameForAddress({ address: processId, memory: requestPrimaryNameResult.Memory, + timestamp: approvalTimestamp, }); const primaryNameLookupResult = JSON.parse( @@ -314,6 +338,7 @@ describe('primary names', function () { const { result: ownerOfPrimaryNameResult } = await getOwnerOfPrimaryName({ name: 'test-name', memory: requestPrimaryNameResult.Memory, + timestamp: approvalTimestamp, }); const ownerResult = JSON.parse(ownerOfPrimaryNameResult.Messages[0].Data); @@ -323,15 +348,17 @@ describe('primary names', function () { it('should allow removing a primary named by the owner or the owner of the base record', async function () { const processId = ''.padEnd(43, 'a'); const recipient = ''.padEnd(43, 'b'); + const requestTimestamp = 1234567890; const { memory: buyRecordMemory } = await buyRecord({ name: 'test-name', processId, + timestamp: requestTimestamp, }); // create a primary name claim const { result: requestPrimaryNameResult } = await requestPrimaryName({ name: 'test-name', caller: recipient, - timestamp: 1234567890, + timestamp: requestTimestamp, memory: buyRecordMemory, }); // claim the primary name @@ -340,7 +367,7 @@ describe('primary names', function () { name: 'test-name', caller: processId, recipient: recipient, - timestamp: 1234567890, + timestamp: requestTimestamp, memory: requestPrimaryNameResult.Memory, }); @@ -349,6 +376,7 @@ describe('primary names', function () { names: ['test-name'], caller: processId, memory: approvePrimaryNameRequestResult.Memory, + timestamp: requestTimestamp, }); // assert no error @@ -379,6 +407,7 @@ describe('primary names', function () { await getPrimaryNameForAddress({ address: recipient, memory: removePrimaryNameResult.Memory, + timestamp: requestTimestamp, assert: false, // we expect an error here, don't throw }); diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 8e2d9ac9..f8a47da4 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -101,10 +101,8 @@ describe('Tick', async () => { buyRecordData.endTimestamp + 1000 * 60 * 60 * 24 * 14 + 1; const futureTickResult = await handle( { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: futureTimestamp.toString() }, - ], + Tags: [{ name: 'Action', value: 'Tick' }], + Timestamp: futureTimestamp, }, buyRecordResult.Memory, ); @@ -125,6 +123,7 @@ describe('Tick', async () => { { name: 'Action', value: 'Record' }, { name: 'Name', value: 'test-name' }, ], + Timestamp: futureTimestamp, }, futureTickResult.Memory, ); @@ -140,6 +139,7 @@ describe('Tick', async () => { { name: 'Action', value: 'Auction-Info' }, { name: 'Name', value: 'test-name' }, ], + Timestamp: futureTimestamp, }, futureTickResult.Memory, ); @@ -176,10 +176,7 @@ describe('Tick', async () => { ); // assert no error tag - const errorTag = joinNetworkResult.Messages?.[0]?.Tags?.find( - (tag) => tag.name === 'Error', - ); - assert.strictEqual(errorTag, undefined); + assertNoResultError(joinNetworkResult); // check the gateway record from contract const gateway = await handle( @@ -223,10 +220,8 @@ describe('Tick', async () => { const futureTimestamp = leavingGatewayData.endTimestamp + 1; const futureTick = await handle( { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: futureTimestamp.toString() }, - ], + Tags: [{ name: 'Action', value: 'Tick' }], + Timestamp: futureTimestamp, }, leaveNetworkResult.Memory, ); @@ -238,6 +233,7 @@ describe('Tick', async () => { { name: 'Action', value: 'Gateway' }, { name: 'Address', value: STUB_ADDRESS }, ], + Timestamp: futureTimestamp, }, futureTick.Memory, ); @@ -337,6 +333,7 @@ describe('Tick', async () => { const prunedVault = await handle( { Tags: [{ name: 'Action', value: 'Vault' }], + Timestamp: futureTimestamp, }, futureTick.Memory, ); @@ -353,6 +350,7 @@ describe('Tick', async () => { { name: 'Action', value: 'Balance' }, { name: 'Target', value: DEFAULT_HANDLE_OPTIONS.Owner }, ], + Timestamp: futureTimestamp, }, futureTick.Memory, ); @@ -655,6 +653,7 @@ describe('Tick', async () => { const delegateItems = await getDelegatesItems({ memory: distributionTick.Memory, gatewayAddress: STUB_ADDRESS, + timestamp: distributionTimestamp, }); assert.deepEqual(delegateItems, [ { @@ -691,6 +690,7 @@ describe('Tick', async () => { recipient: fundedUser, quantity: 100_000_000_000_000, memory: genesisEpochTick.Memory, + timestamp: genesisEpochStart, }); // Buy records in this epoch @@ -703,6 +703,7 @@ describe('Tick', async () => { { name: 'Name', value: 'test-name-' + i }, { name: 'Purchase-Type', value: 'permabuy' }, ], + Timestamp: genesisEpochStart, }, buyRecordMemory, ); @@ -776,10 +777,7 @@ describe('Tick', async () => { const epochTimestamp = genesisEpochStart + (epochDurationMs + 1) * i; const { Memory } = await handle( { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: epochTimestamp.toString() }, - ], + Tags: [{ name: 'Action', value: 'Tick' }], Timestamp: epochTimestamp, }, tickMemory, From a741ebff6d4c270c5fbc928bd84b0efcfc644987 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 20:26:47 -0600 Subject: [PATCH 31/76] fix(utils): move function to utils, ad sanitize --- src/main.lua | 37 ------------------------------------- src/utils.lua | 41 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 38 deletions(-) diff --git a/src/main.lua b/src/main.lua index 5384576e..26a30fdb 100644 --- a/src/main.lua +++ b/src/main.lua @@ -349,43 +349,6 @@ local function assertAndSanitizeInputs(msg) msg.Tags = utils.validateAndSanitizeInputs(msg.Tags) msg.From = utils.formatAddress(msg.From) msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) or tonumber(msg.Tags.Timestamp) or nil - - local knownAddressTags = { - "Recipient", - "Initiator", - "Target", - "Source", - "Address", - "Vault-Id", - "Process-Id", - "Observer-Address", - } - - for _, tagName in ipairs(knownAddressTags) do - -- Format all incoming addresses - msg.Tags[tagName] = msg.Tags[tagName] and utils.formatAddress(msg.Tags[tagName]) or nil - end - - local knownNumberTags = { - "Quantity", - "Lock-Length", - "Operator-Stake", - "Delegated-Stake", - "Withdraw-Stake", - "Timestamp", - "Years", - "Min-Delegated-Stake", - "Port", - "Extend-Length", - "Delegate-Reward-Share-Ratio", - "Epoch-Index", - "Price-Interval-Ms", - "Block-Height", - } - for _, tagName in ipairs(knownNumberTags) do - -- Format all incoming numbers - msg.Tags[tagName] = msg.Tags[tagName] and tonumber(msg.Tags[tagName]) or nil - end end local function updateLastKnownMessage(msg) diff --git a/src/utils.lua b/src/utils.lua index 959c1ce4..7e371e02 100644 --- a/src/utils.lua +++ b/src/utils.lua @@ -573,6 +573,7 @@ end --- @return table sanitizedTable - the sanitized table function utils.validateAndSanitizeInputs(table) assert(type(table) == "table", "Table must be a table") + local sanitizedTable = {} for key, value in pairs(table) do assert(type(key) == "string", "Key must be a string") assert( @@ -591,8 +592,46 @@ function utils.validateAndSanitizeInputs(table) if type(value) == "number" then assert(utils.isInteger(value), "Number must be an integer") end + sanitizedTable[key] = value + end + + local knownAddressTags = { + "Recipient", + "Initiator", + "Target", + "Source", + "Address", + "Vault-Id", + "Process-Id", + "Observer-Address", + } + + for _, tagName in ipairs(knownAddressTags) do + -- Format all incoming addresses + sanitizedTable[tagName] = sanitizedTable[tagName] and utils.formatAddress(sanitizedTable[tagName]) or nil + end + + local knownNumberTags = { + "Quantity", + "Lock-Length", + "Operator-Stake", + "Delegated-Stake", + "Withdraw-Stake", + "Timestamp", + "Years", + "Min-Delegated-Stake", + "Port", + "Extend-Length", + "Delegate-Reward-Share-Ratio", + "Epoch-Index", + "Price-Interval-Ms", + "Block-Height", + } + for _, tagName in ipairs(knownNumberTags) do + -- Format all incoming numbers + sanitizedTable[tagName] = sanitizedTable[tagName] and tonumber(sanitizedTable[tagName]) or nil end - return table + return sanitizedTable end return utils From ca76658d18961f250ffa943ec4bc2cd26c5d3025 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 20:29:28 -0600 Subject: [PATCH 32/76] chore(test): remove unused sanitze --- tests/arns.test.mjs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/arns.test.mjs b/tests/arns.test.mjs index 993a8b99..6e85c8f1 100644 --- a/tests/arns.test.mjs +++ b/tests/arns.test.mjs @@ -612,7 +612,6 @@ describe('ArNS', async () => { { name: 'Process-Id', value: ''.padEnd(43, 'a') }, { name: 'Fund-From', value: 'any' }, ], - Timestamp: STUB_TIMESTAMP + 1, }, memory, ); @@ -626,7 +625,6 @@ describe('ArNS', async () => { { name: 'Action', value: 'Record' }, { name: 'Name', value: 'test-name' }, ], - Timestamp: STUB_TIMESTAMP + 1, }, buyRecordResult.Memory, ); @@ -643,7 +641,6 @@ describe('ArNS', async () => { { name: 'Years', value: '1' }, { name: 'Fund-From', value: 'any' }, ], - Timestamp: STUB_TIMESTAMP + 1, }, buyRecordResult.Memory, ); @@ -654,7 +651,6 @@ describe('ArNS', async () => { { name: 'Action', value: 'Record' }, { name: 'Name', value: 'test-name' }, ], - Timestamp: STUB_TIMESTAMP + 1, }, extendResult.Memory, ); From 9a16da48c29ee4403fc26a4f18fc9eec60c10230 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 22:08:53 -0600 Subject: [PATCH 33/76] feat(gar): optimize prescribed observers, use lookup table and compute full array on read handlers --- spec/epochs_spec.lua | 94 ++++++++------------------------------------ src/epochs.lua | 66 ++++++++++--------------------- src/main.lua | 23 ++++++++++- 3 files changed, 59 insertions(+), 124 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 8df8d1f8..542df7d4 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -73,27 +73,15 @@ describe("epochs", function() observerAddress = "observerAddress", } local expectation = { - { - gatewayAddress = "test-this-is-valid-arweave-wallet-address-1", - observerAddress = "observerAddress", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1, - }, + ["observerAddress"] = "test-this-is-valid-arweave-wallet-address-1", } - local status, result = pcall(epochs.computePrescribedObserversForEpoch, 0, hashchain) - assert.is_true(status) - assert.are.equal(1, #result) - assert.are.same(expectation, result) + local prescribedObserverMap = epochs.computePrescribedObserversForEpoch(0, hashchain) + assert.are.same(expectation, prescribedObserverMap) end) it("should return the maximum number of gateways if more are enrolled in network", function() local testHashchain = "c29tZSBzYW1wbGUgaGFzaA==" -- base64 of "some sample hash" + print("testHashchain: " .. testHashchain) _G.EpochSettings = { maxObservers = 2, -- limit to 2 observers epochZeroStartTimestamp = startTimestamp, @@ -118,52 +106,25 @@ describe("epochs", function() }, settings = testSettings, status = "joined", - observerAddress = "observerAddress", + observerAddress = "observer-address-" .. i, } -- note - ordering of keys is not guaranteed when insert into maps - _G.GatewayRegistry["observer" .. i] = gateway + _G.GatewayRegistry["gateway-address-" .. i] = gateway end local expectation = { - { - gatewayAddress = "observer1", - observerAddress = "observerAddress", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1 / 3, - }, - { - gatewayAddress = "observer3", - observerAddress = "observerAddress", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1 / 3, - }, + ["observer-address-1"] = "gateway-address-1", + ["observer-address-3"] = "gateway-address-3", } - local status, result = pcall(epochs.computePrescribedObserversForEpoch, 0, testHashchain) - assert.is_true(status) - assert.are.equal(2, #result) - table.sort(result, function(a, b) - return a.gatewayAddress < b.gatewayAddress - end) - assert.are.same(expectation, result) + local prescribedObserverMap = epochs.computePrescribedObserversForEpoch(0, testHashchain) + assert.are.same(expectation, prescribedObserverMap) end) end) describe("computePrescribedNamesForEpoch", function() -- NOTE: Record names in the tests below use spelled out numbers because without that -- there's insufficient base64url information encoded in the final encoded block to - -- disambiguate the decoded vallues. + -- disambiguate the decoded values. it("should return all eligible names if fewer than the maximum in name registry", function() _G.NameRegistry.records = { ["arns-name-one"] = { @@ -513,18 +474,7 @@ describe("epochs", function() reports = {}, }, prescribedObservers = { - { - compositeWeight = 4.0, - gatewayAddress = "test-this-is-valid-arweave-wallet-address-1", - gatewayRewardRatioWeight = 1.0, - normalizedCompositeWeight = 1.0, - observerAddress = "test-this-is-valid-arweave-wallet-address-1", - observerRewardRatioWeight = 1.0, - stake = gar.getSettings().operators.minStake, - stakeWeight = 1.0, - startTimestamp = 0, - tenureWeight = 4, - }, + ["test-this-is-valid-arweave-wallet-address-1"] = "test-this-is-valid-arweave-wallet-address-1", }, prescribedNames = {}, distributions = { @@ -634,21 +584,11 @@ describe("epochs", function() }, prescribedNames = {}, prescribedObservers = { - { - observerAddress = "test-this-very-valid-observer-wallet-addr-1", - }, - { - observerAddress = "test-this-very-valid-observer-wallet-addr-2", - }, - { - observerAddress = "test-this-very-valid-observer-wallet-addr-3", - }, - { - observerAddress = "test-this-very-valid-observer-wallet-addr-4", - }, - { - observerAddress = "test-this-very-valid-observer-wallet-addr-5", - }, + ["test-this-very-valid-observer-wallet-addr-1"] = true, + ["test-this-very-valid-observer-wallet-addr-2"] = true, + ["test-this-very-valid-observer-wallet-addr-3"] = true, + ["test-this-very-valid-observer-wallet-addr-4"] = true, + ["test-this-very-valid-observer-wallet-addr-5"] = true, }, } diff --git a/src/epochs.lua b/src/epochs.lua index cb521c79..fa884f25 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -108,7 +108,7 @@ end --- Gets the prescribed observers for an epoch --- @param epochIndex number The epoch index ---- @return WeightedGateway[] # The prescribed observers for the epoch +--- @return table # The prescribed observers for the epoch function epochs.getPrescribedObserversForEpoch(epochIndex) return epochs.getEpoch(epochIndex).prescribedObservers or {} end @@ -228,7 +228,7 @@ end --- Computes the prescribed observers for an epoch --- @param epochIndex number The epoch index --- @param hashchain string The hashchain ---- @return WeightedGateway[], WeightedGateway[] # The prescribed observers for the epoch, and all the gateways with weights +--- @return table, WeightedGateway[] # The prescribed observers for the epoch, and all the gateways with weights function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) assert(epochIndex >= 0, "Epoch index must be greater than or equal to 0") assert(type(hashchain) == "string", "Hashchain must be a string") @@ -239,6 +239,7 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) -- Filter out any observers that could have a normalized composite weight of 0 local filteredObservers = {} + local prescribedObserversLookup = {} -- use ipairs as weightedObservers in array for _, observer in ipairs(weightedGateways) do if observer.normalizedCompositeWeight > 0 then @@ -246,7 +247,11 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) end end if #filteredObservers <= epochs.getSettings().maxObservers then - return filteredObservers, weightedGateways + -- Create lookup table mapping observer addresses to gateway addresses + for _, observer in ipairs(filteredObservers) do + prescribedObserversLookup[observer.observerAddress] = observer.gatewayAddress + end + return prescribedObserversLookup, weightedGateways end -- the hash we will use to create entropy for prescribed observers @@ -263,22 +268,20 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) -- get our prescribed observers, using the hashchain as entropy local hash = epochHash - local prescribedObserversAddressesLookup = {} - while utils.lengthOfTable(prescribedObserversAddressesLookup) < epochs.getSettings().maxObservers do + while utils.lengthOfTable(prescribedObserversLookup) < epochs.getSettings().maxObservers do local hashString = crypto.utils.array.toString(hash) local random = crypto.random(nil, nil, hashString) / 0xffffffff local cumulativeNormalizedCompositeWeight = 0 for _, observer in ipairs(filteredObservers) do - local alreadyPrescribed = prescribedObserversAddressesLookup[observer.gatewayAddress] - + local alreadyPrescribed = prescribedObserversLookup[observer.observerAddress] -- add only if observer has not already been prescribed - if not alreadyPrescribed then + if alreadyPrescribed == nil then -- add the observers normalized composite weight to the cumulative weight cumulativeNormalizedCompositeWeight = cumulativeNormalizedCompositeWeight + observer.normalizedCompositeWeight -- if the random value is less than the cumulative weight, we have found our observer if random <= cumulativeNormalizedCompositeWeight then - prescribedObserversAddressesLookup[observer.gatewayAddress] = true + prescribedObserversLookup[observer.observerAddress] = observer.gatewayAddress break end end @@ -287,22 +290,8 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) local newHash = crypto.utils.stream.fromArray(hash) hash = crypto.digest.sha2_256(newHash).asBytes() end - local prescribedObservers = {} - local filteredObserversAddressMap = utils.reduce(filteredObservers, function(acc, _, observer) - acc[observer.gatewayAddress] = observer - return acc - end, {}) - for address, _ in pairs(prescribedObserversAddressesLookup) do - table.insert(prescribedObservers, filteredObserversAddressMap[address]) - end - - -- sort them in place - table.sort(prescribedObservers, function(a, b) - return a.normalizedCompositeWeight > b.normalizedCompositeWeight -- sort by descending weight - end) - -- return the prescribed observers and the weighted observers - return prescribedObservers, weightedGateways + return prescribedObserversLookup, weightedGateways end --- Gets the epoch timestamps for an epoch index @@ -434,16 +423,12 @@ function epochs.saveObservations(observerAddress, reportTxId, failedGatewayAddre ) local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) - assert(#prescribedObservers > 0, "No prescribed observers for the current epoch.") + assert(utils.lengthOfTable(prescribedObservers) > 0, "No prescribed observers for the current epoch.") - local observerIndex = utils.findInArray(prescribedObservers, function(prescribedObserver) - return prescribedObserver.observerAddress == observerAddress - end) + local gatewayAddressForObserver = prescribedObservers[observerAddress] + assert(gatewayAddressForObserver, "Caller is not a prescribed observer for the current epoch.") - local observer = prescribedObservers[observerIndex] - assert(observer, "Caller is not a prescribed observer for the current epoch.") - - local observingGateway = gar.getGateway(observer.gatewayAddress) + local observingGateway = gar.getGateway(gatewayAddressForObserver) assert(observingGateway, "The associated gateway not found in the registry.") local epoch = epochs.getEpoch(epochIndex) @@ -503,7 +488,7 @@ end --- Computes the total eligible rewards for an epoch based on the protocol balance and the reward percentage and prescribed observers --- @param epochIndex number The epoch index ---- @param prescribedObservers WeightedGateway[] The prescribed observers for the epoch +--- @param prescribedObservers table The prescribed observers for the epoch --- @return ComputedRewards # The total eligible rewards function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObservers) local epochStartTimestamp = epochs.getEpochTimestampsForIndex(epochIndex) @@ -513,10 +498,6 @@ function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObserv local totalEligibleRewards = math.floor(protocolBalance * rewardRate) local eligibleGatewayReward = math.floor(totalEligibleRewards * 0.90 / #activeGatewayAddresses) -- TODO: make these setting variables local eligibleObserverReward = math.floor(totalEligibleRewards * 0.10 / #prescribedObservers) -- TODO: make these setting variables - local prescribedObserversLookup = utils.reduce(prescribedObservers, function(acc, _, observer) - acc[observer.observerAddress] = true - return acc - end, {}) -- compute for each gateway what their potential rewards are and for their delegates local potentialRewards = {} -- use ipairs as activeGatewayAddresses is an array @@ -525,7 +506,7 @@ function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObserv if gateway ~= nil then local potentialReward = eligibleGatewayReward -- start with the gateway reward -- it it is a prescribed observer for the epoch, it is eligible for the observer reward - if prescribedObserversLookup[gateway.observerAddress] then + if prescribedObservers[gateway.observerAddress] then potentialReward = potentialReward + eligibleObserverReward -- add observer reward if it is a prescribed observer end -- if any delegates are present, distribute the rewards to the delegates @@ -590,14 +571,7 @@ function epochs.distributeRewardsForEpoch(currentTimestamp) end local eligibleGatewaysForEpoch = epochs.getEligibleRewardsForEpoch(epochIndex) - local prescribedObserversLookup = utils.reduce( - epochs.getPrescribedObserversForEpoch(epochIndex), - function(acc, _, observer) - acc[observer.observerAddress] = true - return acc - end, - {} - ) + local prescribedObserversLookup = epochs.getPrescribedObserversForEpoch(epochIndex) local totalObservationsSubmitted = utils.lengthOfTable(epoch.observations.reports) or 0 -- get the eligible rewards for the epoch diff --git a/src/main.lua b/src/main.lua index 3e135166..919fa3f4 100644 --- a/src/main.lua +++ b/src/main.lua @@ -1946,10 +1946,31 @@ addEventingHandler( local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) + -- Iterate over prescribed observers and add gateway details + local prescribedObserversWithWeights = {} + for _, gatewayAddress in ipairs(prescribedObservers) do + local gateway = gar.getGateway(gatewayAddress) + if gateway then + table.insert(prescribedObserversWithWeights, { + observerAddress = gateway.observerAddress, + gatewayAddress = gatewayAddress, + normalizedCompositeWeight = gateway.weights.normalizedCompositeWeight, + stakeWeight = gateway.weights.stakeWeight, + tenureWeight = gateway.weights.tenureWeight, + gatewayRewardRatioWeight = gateway.weights.gatewayRewardRatioWeight, + observerRewardRatioWeight = gateway.weights.observerRewardRatioWeight, + compositeWeight = gateway.weights.compositeWeight, + }) + end + end + -- sort by normalizedCompositeWeight + table.sort(prescribedObserversWithWeights, function(a, b) + return a.normalizedCompositeWeight > b.normalizedCompositeWeight + end) Send(msg, { Target = msg.From, Action = "Prescribed-Observers-Notice", - Data = json.encode(prescribedObservers), + Data = json.encode(prescribedObserversWithWeights), }) end ) From 957b601725b6ae732798eff343c6c8945d6fdfaf Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 22:11:21 -0600 Subject: [PATCH 34/76] chore(tests): remove print logs from tests --- spec/epochs_spec.lua | 1 - src/epochs.lua | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 542df7d4..03c8e45e 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -81,7 +81,6 @@ describe("epochs", function() it("should return the maximum number of gateways if more are enrolled in network", function() local testHashchain = "c29tZSBzYW1wbGUgaGFzaA==" -- base64 of "some sample hash" - print("testHashchain: " .. testHashchain) _G.EpochSettings = { maxObservers = 2, -- limit to 2 observers epochZeroStartTimestamp = startTimestamp, diff --git a/src/epochs.lua b/src/epochs.lua index fa884f25..9efe1b73 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -275,7 +275,7 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) for _, observer in ipairs(filteredObservers) do local alreadyPrescribed = prescribedObserversLookup[observer.observerAddress] -- add only if observer has not already been prescribed - if alreadyPrescribed == nil then + if not alreadyPrescribed then -- add the observers normalized composite weight to the cumulative weight cumulativeNormalizedCompositeWeight = cumulativeNormalizedCompositeWeight + observer.normalizedCompositeWeight From 353142d5a76848f1ec073dd8b1851a1cec5d1978 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 22:37:55 -0600 Subject: [PATCH 35/76] fix(epochs): update return types and fix breaking integration tests --- spec/epochs_spec.lua | 46 +++++++++++++++----------------------------- src/epochs.lua | 16 +++++++-------- tests/gar.test.mjs | 9 +++++---- tests/tick.test.mjs | 18 +++-------------- 4 files changed, 32 insertions(+), 57 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 03c8e45e..5e688817 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -106,6 +106,14 @@ describe("epochs", function() settings = testSettings, status = "joined", observerAddress = "observer-address-" .. i, + weights = { + stakeWeight = 1, + tenureWeight = 1, + gatewayRewardRatioWeight = 1, + observerRewardRatioWeight = 1, + compositeWeight = 1, + normalizedCompositeWeight = 1, + }, } -- note - ordering of keys is not guaranteed when insert into maps _G.GatewayRegistry["gateway-address-" .. i] = gateway @@ -212,7 +220,7 @@ describe("epochs", function() assert.match("Observations for the current epoch cannot be submitted before", error) end) it("should throw an error if the caller is not prescribed", function() - local observer = "test-this-is-valid-arweave-wallet-address-2" + local observer = "test-this-is-valid-arweave-observer-address-2" local reportTxId = "test-this-very-valid-observations-report-tx" local settings = epochs.getSettings() local timestamp = settings.epochZeroStartTimestamp + settings.distributionDelayMs + 1 @@ -220,18 +228,7 @@ describe("epochs", function() "test-this-is-valid-arweave-wallet-address-1", } _G.Epochs[0].prescribedObservers = { - { - gatewayAddress = "test-this-is-valid-arweave-wallet-address-1", - observerAddress = "test-this-is-valid-arweave-wallet-address-1", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1, - }, + ["test-this-is-valid-arweave-observer-address-1"] = "test-this-is-valid-arweave-gateway-address-1", } local status, error = pcall(epochs.saveObservations, observer, reportTxId, failedGateways, timestamp) assert.is_false(status) @@ -240,7 +237,7 @@ describe("epochs", function() it( "should save observation when the timestamp is after the distribution delay and only mark gateways around during the full epoch as failed", function() - local observer = "test-this-is-valid-arweave-wallet-address-2" + local observer = "test-this-is-valid-arweave-observer-address-2" local reportTxId = "test-this-very-valid-observations-report-tx" local settings = epochs.getSettings() local timestamp = settings.epochZeroStartTimestamp + settings.distributionDelayMs + 1 @@ -262,7 +259,7 @@ describe("epochs", function() }, settings = testSettings, status = "joined", - observerAddress = "test-this-is-valid-arweave-wallet-address-1", + observerAddress = "test-this-is-valid-arweave-observer-address-1", }, ["test-this-is-valid-arweave-wallet-address-2"] = { operatorStake = gar.getSettings().operators.minStake, @@ -281,7 +278,7 @@ describe("epochs", function() }, settings = testSettings, status = "joined", - observerAddress = "test-this-is-valid-arweave-wallet-address-2", + observerAddress = "test-this-is-valid-arweave-observer-address-2", }, ["test-this-is-valid-arweave-wallet-address-3"] = { operatorStake = gar.getSettings().operators.minStake, @@ -300,7 +297,7 @@ describe("epochs", function() }, settings = testSettings, status = "joined", - observerAddress = "test-this-is-valid-arweave-wallet-address-3", + observerAddress = "test-this-is-valid-arweave-observer-address-3", }, ["test-this-is-valid-arweave-wallet-address-4"] = { operatorStake = gar.getSettings().operators.minStake, @@ -320,22 +317,11 @@ describe("epochs", function() }, settings = testSettings, status = "leaving", -- leaving, so it is not eligible to receive stats from this epoch - observerAddress = "test-this-is-valid-arweave-wallet-address-4", + observerAddress = "test-this-is-valid-arweave-observer-address-4", }, } _G.Epochs[0].prescribedObservers = { - { - gatewayAddress = "test-this-is-valid-arweave-wallet-address-2", - observerAddress = "test-this-is-valid-arweave-wallet-address-2", - stake = gar.getSettings().operators.minStake, - startTimestamp = startTimestamp, - stakeWeight = 1, - tenureWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1 / gar.getSettings().observers.tenureWeightPeriod, - normalizedCompositeWeight = 1, - }, + ["test-this-is-valid-arweave-observer-address-2"] = "test-this-is-valid-arweave-wallet-address-2", } local failedGateways = { "test-this-is-valid-arweave-wallet-address-1", diff --git a/src/epochs.lua b/src/epochs.lua index 9efe1b73..6dae811f 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -247,7 +247,6 @@ function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) end end if #filteredObservers <= epochs.getSettings().maxObservers then - -- Create lookup table mapping observer addresses to gateway addresses for _, observer in ipairs(filteredObservers) do prescribedObserversLookup[observer.observerAddress] = observer.gatewayAddress end @@ -422,10 +421,10 @@ function epochs.saveObservations(observerAddress, reportTxId, failedGatewayAddre "Observations for the current epoch cannot be submitted before: " .. epochDistributionTimestamp ) - local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) - assert(utils.lengthOfTable(prescribedObservers) > 0, "No prescribed observers for the current epoch.") + local prescribedObserversLookup = epochs.getPrescribedObserversForEpoch(epochIndex) + assert(utils.lengthOfTable(prescribedObserversLookup) > 0, "No prescribed observers for the current epoch.") - local gatewayAddressForObserver = prescribedObservers[observerAddress] + local gatewayAddressForObserver = prescribedObserversLookup[observerAddress] assert(gatewayAddressForObserver, "Caller is not a prescribed observer for the current epoch.") local observingGateway = gar.getGateway(gatewayAddressForObserver) @@ -488,16 +487,17 @@ end --- Computes the total eligible rewards for an epoch based on the protocol balance and the reward percentage and prescribed observers --- @param epochIndex number The epoch index ---- @param prescribedObservers table The prescribed observers for the epoch +--- @param prescribedObserversLookup table The prescribed observers for the epoch --- @return ComputedRewards # The total eligible rewards -function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObservers) +function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObserversLookup) local epochStartTimestamp = epochs.getEpochTimestampsForIndex(epochIndex) local activeGatewayAddresses = gar.getActiveGatewaysBeforeTimestamp(epochStartTimestamp) local protocolBalance = balances.getBalance(ao.id) local rewardRate = epochs.getRewardRateForEpoch(epochIndex) local totalEligibleRewards = math.floor(protocolBalance * rewardRate) local eligibleGatewayReward = math.floor(totalEligibleRewards * 0.90 / #activeGatewayAddresses) -- TODO: make these setting variables - local eligibleObserverReward = math.floor(totalEligibleRewards * 0.10 / #prescribedObservers) -- TODO: make these setting variables + local eligibleObserverReward = + math.floor(totalEligibleRewards * 0.10 / utils.lengthOfTable(prescribedObserversLookup)) -- TODO: make these setting variables -- compute for each gateway what their potential rewards are and for their delegates local potentialRewards = {} -- use ipairs as activeGatewayAddresses is an array @@ -506,7 +506,7 @@ function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObserv if gateway ~= nil then local potentialReward = eligibleGatewayReward -- start with the gateway reward -- it it is a prescribed observer for the epoch, it is eligible for the observer reward - if prescribedObservers[gateway.observerAddress] then + if prescribedObserversLookup[gateway.observerAddress] then potentialReward = potentialReward + eligibleObserverReward -- add observer reward if it is a prescribed observer end -- if any delegates are present, distribute the rewards to the delegates diff --git a/tests/gar.test.mjs b/tests/gar.test.mjs index 664161fc..16b91faa 100644 --- a/tests/gar.test.mjs +++ b/tests/gar.test.mjs @@ -1283,10 +1283,11 @@ describe('GatewayRegistry', async () => { // Assert prescribed observers const prescribedObservers = JSON.parse(futureTick.Messages[0].Data) .maybeNewEpoch.prescribedObservers; - assert.equal(prescribedObservers.length, 2); - const prescribedObserverAddresses = prescribedObservers.map( - (o) => o.observerAddress, - ); + assert.deepEqual(prescribedObservers, { + [STUB_ADDRESS]: STUB_ADDRESS, + [observerAddress]: gatewayAddress, + }); + const prescribedObserverAddresses = Object.keys(prescribedObservers); assert.ok(prescribedObserverAddresses.includes(STUB_ADDRESS)); assert.ok(prescribedObserverAddresses.includes(observerAddress)); gatewayMemory = futureTick.Memory; diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 5ec35998..d5f55084 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -454,21 +454,9 @@ describe('Tick', async () => { failureSummaries: [], reports: [], }, - prescribedObservers: [ - { - // TODO: we could just return the addresses here - observerAddress: STUB_ADDRESS, - observerRewardRatioWeight: 1, - normalizedCompositeWeight: 1, - gatewayRewardRatioWeight: 1, - gatewayAddress: STUB_ADDRESS, - stake: INITIAL_OPERATOR_STAKE * 3, - tenureWeight: 4, - compositeWeight: 12, - startTimestamp: 21600000, - stakeWeight: 3, - }, - ], // the only gateway in the network + prescribedObservers: { + [STUB_ADDRESS]: STUB_ADDRESS, + }, prescribedNames: [], // no names in the network distributions: { totalEligibleGateways: 1, From d1db217ee04d5c7b513880ab3d709205fa4c213d Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 22:48:47 -0600 Subject: [PATCH 36/76] chore(epochs): move to utility function and use in handler --- src/epochs.lua | 29 ++++++++++++++++++++++++++ src/main.lua | 55 +++++++++----------------------------------------- 2 files changed, 38 insertions(+), 46 deletions(-) diff --git a/src/epochs.lua b/src/epochs.lua index 6dae811f..0fb77eed 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -113,6 +113,35 @@ function epochs.getPrescribedObserversForEpoch(epochIndex) return epochs.getEpoch(epochIndex).prescribedObservers or {} end +--- Get prescribed observers with weights for epoch +--- @param epochIndex number The epoch index +--- @return WeightedGateway[] # The prescribed observers with weights for the epoch +function epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) + -- Iterate over prescribed observers and add gateway details + local prescribedObserversWithWeights = {} + for _, gatewayAddress in ipairs(prescribedObservers) do + local gateway = gar.getGateway(gatewayAddress) + if gateway then + table.insert(prescribedObserversWithWeights, { + observerAddress = gateway.observerAddress, + gatewayAddress = gatewayAddress, + normalizedCompositeWeight = gateway.weights.normalizedCompositeWeight, + stakeWeight = gateway.weights.stakeWeight, + tenureWeight = gateway.weights.tenureWeight, + gatewayRewardRatioWeight = gateway.weights.gatewayRewardRatioWeight, + observerRewardRatioWeight = gateway.weights.observerRewardRatioWeight, + compositeWeight = gateway.weights.compositeWeight, + }) + end + end + -- sort by normalizedCompositeWeight + table.sort(prescribedObserversWithWeights, function(a, b) + return a.normalizedCompositeWeight > b.normalizedCompositeWeight + end) + return prescribedObserversWithWeights +end + --- Gets the eligible rewards for an epoch --- @param epochIndex number The epoch index --- @return Rewards # T he eligible rewards for the epoch diff --git a/src/main.lua b/src/main.lua index 919fa3f4..ee5225f2 100644 --- a/src/main.lua +++ b/src/main.lua @@ -1939,34 +1939,9 @@ addEventingHandler( utils.hasMatchingTag("Action", ActionMap.PrescribedObservers), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) - local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) - -- Iterate over prescribed observers and add gateway details - local prescribedObserversWithWeights = {} - for _, gatewayAddress in ipairs(prescribedObservers) do - local gateway = gar.getGateway(gatewayAddress) - if gateway then - table.insert(prescribedObserversWithWeights, { - observerAddress = gateway.observerAddress, - gatewayAddress = gatewayAddress, - normalizedCompositeWeight = gateway.weights.normalizedCompositeWeight, - stakeWeight = gateway.weights.stakeWeight, - tenureWeight = gateway.weights.tenureWeight, - gatewayRewardRatioWeight = gateway.weights.gatewayRewardRatioWeight, - observerRewardRatioWeight = gateway.weights.observerRewardRatioWeight, - compositeWeight = gateway.weights.compositeWeight, - }) - end - end - -- sort by normalizedCompositeWeight - table.sort(prescribedObserversWithWeights, function(a, b) - return a.normalizedCompositeWeight > b.normalizedCompositeWeight - end) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) + local prescribedObserversWithWeights = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) Send(msg, { Target = msg.From, Action = "Prescribed-Observers-Notice", @@ -1976,12 +1951,8 @@ addEventingHandler( ) addEventingHandler(ActionMap.Observations, utils.hasMatchingTag("Action", ActionMap.Observations), function(msg) - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) local observations = epochs.getObservationsForEpoch(epochIndex) Send(msg, { Target = msg.From, @@ -1993,12 +1964,8 @@ end) addEventingHandler(ActionMap.PrescribedNames, utils.hasMatchingTag("Action", ActionMap.PrescribedNames), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) local prescribedNames = epochs.getPrescribedNamesForEpoch(epochIndex) Send(msg, { Target = msg.From, @@ -2009,12 +1976,8 @@ end) addEventingHandler(ActionMap.Distributions, utils.hasMatchingTag("Action", ActionMap.Distributions), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local distributions = epochs.getDistributionsForEpoch(epochIndex) Send(msg, { Target = msg.From, From e43bef04f04e32039556f1b828e1b6b25654cdaa Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Mon, 16 Dec 2024 23:06:19 -0600 Subject: [PATCH 37/76] chore(test): fix test setup for computePrescribedObservers --- spec/epochs_spec.lua | 14 +++----------- src/gar.lua | 6 +++--- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 5e688817..e4ad1997 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -106,22 +106,14 @@ describe("epochs", function() settings = testSettings, status = "joined", observerAddress = "observer-address-" .. i, - weights = { - stakeWeight = 1, - tenureWeight = 1, - gatewayRewardRatioWeight = 1, - observerRewardRatioWeight = 1, - compositeWeight = 1, - normalizedCompositeWeight = 1, - }, } -- note - ordering of keys is not guaranteed when insert into maps - _G.GatewayRegistry["gateway-address-" .. i] = gateway + _G.GatewayRegistry["observers" .. i] = gateway end local expectation = { - ["observer-address-1"] = "gateway-address-1", - ["observer-address-3"] = "gateway-address-3", + ["observer-address-1"] = "observers1", + ["observer-address-3"] = "observers3", } local prescribedObserverMap = epochs.computePrescribedObserversForEpoch(0, testHashchain) assert.are.same(expectation, prescribedObserverMap) diff --git a/src/gar.lua b/src/gar.lua index b6f4294f..6ea41c77 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -697,8 +697,8 @@ function gar.getGatewayWeightsAtTimestamp(gatewayAddresses, timestamp) local totalCompositeWeight = 0 -- Iterate over gateways to calculate weights - for _, address in pairs(gatewayAddresses) do - local gateway = gar.getGateway(address) + for _, gatewayAddress in pairs(gatewayAddresses) do + local gateway = gar.getGateway(gatewayAddress) if gateway then local totalStake = gateway.operatorStake + gateway.totalDelegatedStake -- 100 - no cap to this local stakeWeightRatio = totalStake / gar.getSettings().operators.minStake -- this is always greater than 1 as the minOperatorStake is always less than the stake @@ -728,7 +728,7 @@ function gar.getGatewayWeightsAtTimestamp(gatewayAddresses, timestamp) * observerRewardRatioWeight table.insert(weightedObservers, { - gatewayAddress = address, + gatewayAddress = gatewayAddress, observerAddress = gateway.observerAddress, stake = totalStake, startTimestamp = gateway.startTimestamp, From f55e3ee7c99eb30315cddeb755d04f2c0e293009 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 08:04:14 -0600 Subject: [PATCH 38/76] chore(test): fix test setup --- spec/epochs_spec.lua | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index e4ad1997..2ef30565 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -108,12 +108,12 @@ describe("epochs", function() observerAddress = "observer-address-" .. i, } -- note - ordering of keys is not guaranteed when insert into maps - _G.GatewayRegistry["observers" .. i] = gateway + _G.GatewayRegistry["observer" .. i] = gateway end local expectation = { - ["observer-address-1"] = "observers1", - ["observer-address-3"] = "observers3", + ["observer-address-1"] = "observer1", + ["observer-address-3"] = "observer3", } local prescribedObserverMap = epochs.computePrescribedObserversForEpoch(0, testHashchain) assert.are.same(expectation, prescribedObserverMap) From e86de5e1ce36a14ef747c225ec43e5c52d1ce17d Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 09:06:28 -0600 Subject: [PATCH 39/76] fix(epochs): swap pairs with ipairs, add integration and unit tests for epochs --- spec/epochs_spec.lua | 50 +++++++++++++++ src/epochs.lua | 3 +- src/gar.lua | 2 +- src/main.lua | 7 ++- tests/epochs.test.mjs | 135 ++++++++++++++++++++++++++++++++++++++++ tests/handlers.test.mjs | 4 -- tests/helpers.mjs | 92 ++++++++++++++++++++++++++- tools/constants.mjs | 1 + 8 files changed, 285 insertions(+), 9 deletions(-) create mode 100644 tests/epochs.test.mjs diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 2ef30565..a4aad80d 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -51,6 +51,56 @@ describe("epochs", function() } end) + describe("getPrescribedObserversWithWeightsForEpoch", function() + it("should return the prescribed observers with weights for the epoch", function() + _G.GatewayRegistry["test-this-is-valid-arweave-wallet-address-1"] = { + operatorStake = gar.getSettings().operators.minStake, + totalDelegatedStake = 0, + vaults = {}, + delegates = {}, + startTimestamp = startTimestamp, + stats = { + prescribedEpochCount = 0, + observedEpochCount = 0, + totalEpochCount = 0, + passedEpochCount = 0, + failedEpochCount = 0, + failedConsecutiveEpochs = 0, + passedConsecutiveEpochs = 0, + }, + settings = testSettings, + status = "joined", + observerAddress = "observerAddress", + weights = { + normalizedCompositeWeight = 1, + stakeWeight = 1, + tenureWeight = 1, + gatewayRewardRatioWeight = 1, + observerRewardRatioWeight = 1, + compositeWeight = 1, + }, + } + _G.Epochs[0].prescribedObservers = { + ["test-this-is-valid-arweave-wallet-address-1"] = "test-this-is-valid-arweave-gateway-address-1", + } + local epochIndex = 0 + local expectation = { + { + observerAddress = "observerAddress", + gatewayAddress = "test-this-is-valid-arweave-gateway-address-1", + normalizedCompositeWeight = 1, + stakeWeight = 1, + tenureWeight = 1, + gatewayRewardRatioWeight = 1, + observerRewardRatioWeight = 1, + compositeWeight = 1, + }, + } + local result = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + assert.are.same(result, expectation) + end) + end) + describe("computePrescribedObserversForEpoch", function() it("should return all eligible gateways if fewer than the maximum in network", function() _G.GatewayRegistry["test-this-is-valid-arweave-wallet-address-1"] = { diff --git a/src/epochs.lua b/src/epochs.lua index 0fb77eed..c256f7bd 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -64,7 +64,6 @@ EpochSettings = EpochSettings or { pruneEpochsCount = 14, -- prune epochs older than 14 days prescribedNameCount = 2, - rewardPercentage = 0.0005, -- 0.05% maxObservers = 50, epochZeroStartTimestamp = 1719900000000, -- July 9th, 00:00:00 UTC durationMs = constants.defaultEpochDurationMs, -- 24 hours @@ -120,7 +119,7 @@ function epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) -- Iterate over prescribed observers and add gateway details local prescribedObserversWithWeights = {} - for _, gatewayAddress in ipairs(prescribedObservers) do + for _, gatewayAddress in pairs(prescribedObservers) do local gateway = gar.getGateway(gatewayAddress) if gateway then table.insert(prescribedObserversWithWeights, { diff --git a/src/gar.lua b/src/gar.lua index 6ea41c77..b8c7084d 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -27,7 +27,7 @@ local gar = {} --- @field services GatewayServices | nil --- @field status "joined"|"leaving" --- @field observerAddress WalletAddress ---- @field weights GatewayWeights | nil +--- @field weights GatewayWeights | nil // TODO: make this required and update tests to match the type --- @field slashings table | nil --- @class Gateway : CompactGateway diff --git a/src/main.lua b/src/main.lua index ee5225f2..ee336a6c 100644 --- a/src/main.lua +++ b/src/main.lua @@ -597,6 +597,12 @@ addEventingHandler(ActionMap.Transfer, utils.hasMatchingTag("Action", ActionMap. msg.ioEvent:addField("RecipientNewBalance", recipientNewBalance) end + -- if the sender is the protocol, then we need to update the circulating supply as tokens are now in circulation + if msg.From == ao.id then + LastKnownCirculatingSupply = LastKnownCirculatingSupply + quantity + addSupplyData(msg.ioEvent) + end + -- Casting implies that the sender does not want a response - Reference: https://elixirforum.com/t/what-is-the-etymology-of-genserver-cast/33610/3 if not msg.Cast then -- Debit-Notice message template, that is sent to the Sender of the transfer @@ -1938,7 +1944,6 @@ addEventingHandler( ActionMap.PrescribedObservers, utils.hasMatchingTag("Action", ActionMap.PrescribedObservers), function(msg) - -- check if the epoch number is provided, if not get the epoch number from the timestamp local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) local prescribedObserversWithWeights = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) diff --git a/tests/epochs.test.mjs b/tests/epochs.test.mjs new file mode 100644 index 00000000..5f0d1d89 --- /dev/null +++ b/tests/epochs.test.mjs @@ -0,0 +1,135 @@ +import { + buyRecord, + getEpoch, + joinNetwork, + getPrescribedObservers, + getPrescribedNames, + tick, + startMemory, + totalTokenSupply, + getEpochSettings, +} from './helpers.mjs'; +import { describe, it, before } from 'node:test'; +import assert from 'node:assert'; +import { STUB_ADDRESS, STUB_OPERATOR_ADDRESS } from '../tools/constants.mjs'; + +const firstEpochStartTimestamp = 1719900000000; +const epochLength = 1000 * 60 * 60 * 24; // 24 hours +const distributionDelay = 1000 * 60 * 40; // 40 minutes + +describe('epochs', () => { + let sharedMemory; + + before(async () => { + const { Memory: totalTokenSupplyMemory } = await totalTokenSupply({ + memory: startMemory, + }); + // have a gateway join, and add an arns name which will be used to prescribe names and observers + const { memory: gatewayJoinMemory } = await joinNetwork({ + memory: totalTokenSupplyMemory, + address: STUB_OPERATOR_ADDRESS, + }); + const { memory: buyRecordMemory } = await buyRecord({ + memory: gatewayJoinMemory, + name: 'prescribed-name', + type: 'permabuy', + from: STUB_OPERATOR_ADDRESS, + }); + const { memory: tickMemory } = await tick({ + memory: buyRecordMemory, + timestamp: firstEpochStartTimestamp, + }); + sharedMemory = tickMemory; + }); + + describe('Epoch', () => { + it('should return the current epoch', async () => { + const epoch = await getEpoch({ + memory: sharedMemory, + timestamp: firstEpochStartTimestamp, + }); + assert.ok(epoch); + assert.deepStrictEqual(epoch, { + epochIndex: 0, + startTimestamp: firstEpochStartTimestamp, + endTimestamp: firstEpochStartTimestamp + epochLength, + startHeight: 1, + distributionTimestamp: + firstEpochStartTimestamp + epochLength + distributionDelay, + prescribedObservers: { + [STUB_ADDRESS]: STUB_OPERATOR_ADDRESS, + }, + prescribedNames: ['prescribed-name'], + observations: { + failureSummaries: [], + reports: [], + }, + distributions: { + totalEligibleGatewayReward: 22500900000, + totalEligibleGateways: 1, + totalEligibleObserverReward: 2500100000, + totalEligibleRewards: 25001000000, + rewards: { + eligible: { + [STUB_OPERATOR_ADDRESS]: { + delegateRewards: [], + operatorReward: 25001000000, // 0.001 of the protocol balance after the transfers and name purchase + }, + }, + }, + }, + }); + }); + }); + + describe('Prescribed Observers', () => { + it('should return the correct epoch for the current epoch with weights', async () => { + const prescribedObservers = await getPrescribedObservers({ + memory: sharedMemory, + timestamp: firstEpochStartTimestamp, + }); + assert.ok(prescribedObservers); + assert.deepStrictEqual(prescribedObservers, [ + { + compositeWeight: 4, + gatewayAddress: STUB_OPERATOR_ADDRESS, + gatewayRewardRatioWeight: 1, + normalizedCompositeWeight: 1, + observerAddress: STUB_ADDRESS, + observerRewardRatioWeight: 1, + stakeWeight: 1, + tenureWeight: 4, + }, + ]); + }); + }); + + describe('Prescribed Names', () => { + it('should return the correct epoch for the first epoch', async () => { + const prescribedNames = await getPrescribedNames({ + memory: sharedMemory, + timestamp: firstEpochStartTimestamp, + }); + assert.ok(prescribedNames); + assert.deepStrictEqual(prescribedNames, ['prescribed-name']); + }); + }); + + describe('Epoch-Settings', () => { + it('should return the correct epoch settings', async () => { + const epochSettings = await getEpochSettings({ + memory: sharedMemory, + timestamp: firstEpochStartTimestamp, + }); + assert.ok(epochSettings); + assert.deepStrictEqual(epochSettings, { + maxObservers: 50, + epochZeroStartTimestamp: firstEpochStartTimestamp, + durationMs: epochLength, + distributionDelayMs: distributionDelay, + prescribedNameCount: 2, + pruneEpochsCount: 14, + }); + }); + }); +}); diff --git a/tests/handlers.test.mjs b/tests/handlers.test.mjs index 1b2492dc..2e769456 100644 --- a/tests/handlers.test.mjs +++ b/tests/handlers.test.mjs @@ -1,10 +1,6 @@ import { handle } from './helpers.mjs'; import { describe, it } from 'node:test'; import assert from 'node:assert'; -import { - AO_LOADER_HANDLER_ENV, - DEFAULT_HANDLE_OPTIONS, -} from '../tools/constants.mjs'; describe('handlers', async () => { it('should maintain order of handlers, with _eval and _default first, followed by prune', async () => { diff --git a/tests/helpers.mjs b/tests/helpers.mjs index 1319c11d..b76fed03 100644 --- a/tests/helpers.mjs +++ b/tests/helpers.mjs @@ -9,6 +9,7 @@ import { STUB_TIMESTAMP, STUB_MESSAGE_ID, validGatewayTags, + STUB_PROCESS_ID, } from '../tools/constants.mjs'; const initialOperatorStake = 100_000_000_000; @@ -138,6 +139,7 @@ export const transfer = async ({ }, memory, }); + assertNoResultError(transferResult); return transferResult.Memory; }; @@ -164,6 +166,7 @@ export const joinNetwork = async ({ }, memory: transferMemory, }); + assertNoResultError(joinNetworkResult); return { memory: joinNetworkResult.Memory, result: joinNetworkResult, @@ -689,7 +692,7 @@ export const buyRecord = async ({ memory, from, name, - processId, + processId = STUB_PROCESS_ID, type = 'lease', years = 1, timestamp = STUB_TIMESTAMP, @@ -709,6 +712,7 @@ export const buyRecord = async ({ }, memory, }); + assertNoResultError(buyRecordResult); return { result: buyRecordResult, memory: buyRecordResult.Memory, @@ -757,3 +761,89 @@ export const totalTokenSupply = async ({ memory, timestamp = 0 }) => { memory, }); }; + +export const tick = async ({ memory, timestamp = STUB_TIMESTAMP }) => { + const tickResult = await handle({ + options: { + Tags: [{ name: 'Action', value: 'Tick' }], + Timestamp: timestamp, + }, + memory, + }); + return { + memory: tickResult.Memory, + result: tickResult, + }; +}; + +export const getEpoch = async ({ + memory, + timestamp = STUB_TIMESTAMP, + epochIndex, +}) => { + const epochResult = await handle({ + options: { + Tags: [ + { name: 'Action', value: 'Epoch' }, + ...(epochIndex ? [{ name: 'Epoch-Index', value: epochIndex }] : []), + ], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(epochResult); + return JSON.parse(epochResult.Messages[0].Data); +}; + +export const getPrescribedObservers = async ({ + memory, + timestamp = STUB_TIMESTAMP, + epochIndex, +}) => { + const prescribedObserversResult = await handle({ + options: { + Tags: [ + { name: 'Action', value: 'Epoch-Prescribed-Observers' }, + ...(epochIndex ? [{ name: 'Epoch-Index', value: epochIndex }] : []), + ], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(prescribedObserversResult); + return JSON.parse(prescribedObserversResult.Messages[0].Data); +}; + +export const getPrescribedNames = async ({ + memory, + timestamp = STUB_TIMESTAMP, + epochIndex, +}) => { + const prescribedNamesResult = await handle({ + options: { + Tags: [ + { name: 'Action', value: 'Epoch-Prescribed-Names' }, + ...(epochIndex ? [{ name: 'Epoch-Index', value: epochIndex }] : []), + ], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(prescribedNamesResult); + return JSON.parse(prescribedNamesResult.Messages[0].Data); +}; + +export const getEpochSettings = async ({ + memory, + timestamp = STUB_TIMESTAMP, +}) => { + const epochSettingsResult = await handle({ + options: { + Tags: [{ name: 'Action', value: 'Epoch-Settings' }], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(epochSettingsResult); + return JSON.parse(epochSettingsResult.Messages[0].Data); +}; diff --git a/tools/constants.mjs b/tools/constants.mjs index 8af14cc8..b6ed8039 100644 --- a/tools/constants.mjs +++ b/tools/constants.mjs @@ -14,6 +14,7 @@ export const INITIAL_OPERATOR_STAKE = 10_000_000_000; // 10K ARIO export const INITIAL_DELEGATE_STAKE = 10_000_000; // 10K ARIO export const INITIAL_OWNER_BALANCE = 950_000_000_000_000; // 950M ARIO export const STUB_TIMESTAMP = 21600000; // 01-01-1970 00:00:00 +export const STUB_PROCESS_ID = 'process-id-stub-'.padEnd(43, '0'); export const STUB_MESSAGE_ID = ''.padEnd(43, 'm'); export const STUB_HASH_CHAIN = 'NGU1fq_ssL9m6kRbRU1bqiIDBht79ckvAwRMGElkSOg'; /* ao READ-ONLY Env Variables */ From b47e4171af3634a116978b0b9d8ed49bb84876ee Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 09:11:09 -0600 Subject: [PATCH 40/76] chore(test): add comment for new tests --- tests/epochs.test.mjs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/epochs.test.mjs b/tests/epochs.test.mjs index 5f0d1d89..355b8327 100644 --- a/tests/epochs.test.mjs +++ b/tests/epochs.test.mjs @@ -48,7 +48,6 @@ describe('epochs', () => { memory: sharedMemory, timestamp: firstEpochStartTimestamp, }); - assert.ok(epoch); assert.deepStrictEqual(epoch, { epochIndex: 0, startTimestamp: firstEpochStartTimestamp, @@ -79,6 +78,8 @@ describe('epochs', () => { }, }, }); + + // TODO (PE-7321): add a test for an empty epoch before names and gateways have been prescribed }); }); @@ -88,7 +89,6 @@ describe('epochs', () => { memory: sharedMemory, timestamp: firstEpochStartTimestamp, }); - assert.ok(prescribedObservers); assert.deepStrictEqual(prescribedObservers, [ { compositeWeight: 4, @@ -110,7 +110,6 @@ describe('epochs', () => { memory: sharedMemory, timestamp: firstEpochStartTimestamp, }); - assert.ok(prescribedNames); assert.deepStrictEqual(prescribedNames, ['prescribed-name']); }); }); @@ -121,7 +120,6 @@ describe('epochs', () => { memory: sharedMemory, timestamp: firstEpochStartTimestamp, }); - assert.ok(epochSettings); assert.deepStrictEqual(epochSettings, { maxObservers: 50, epochZeroStartTimestamp: firstEpochStartTimestamp, From 92dd7018d0c3d36f4a515540596cde7bfd88a61f Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 09:16:18 -0600 Subject: [PATCH 41/76] chore(test): fix unit test --- spec/epochs_spec.lua | 6 +++--- src/epochs.lua | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index a4aad80d..2b811acb 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -81,13 +81,13 @@ describe("epochs", function() }, } _G.Epochs[0].prescribedObservers = { - ["test-this-is-valid-arweave-wallet-address-1"] = "test-this-is-valid-arweave-gateway-address-1", + ["observerAddress"] = "test-this-is-valid-arweave-wallet-address-1", } local epochIndex = 0 local expectation = { { observerAddress = "observerAddress", - gatewayAddress = "test-this-is-valid-arweave-gateway-address-1", + gatewayAddress = "test-this-is-valid-arweave-wallet-address-1", normalizedCompositeWeight = 1, stakeWeight = 1, tenureWeight = 1, @@ -97,7 +97,7 @@ describe("epochs", function() }, } local result = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) - assert.are.same(result, expectation) + assert.are.same(expectation, result) end) end) diff --git a/src/epochs.lua b/src/epochs.lua index c256f7bd..d2c92a06 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -134,6 +134,7 @@ function epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) }) end end + -- sort by normalizedCompositeWeight table.sort(prescribedObserversWithWeights, function(a, b) return a.normalizedCompositeWeight > b.normalizedCompositeWeight From b5827c20de74189d510eccf75e4dbfe866872a3f Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 11:15:50 -0600 Subject: [PATCH 42/76] chore(test): standardize use of test helpers in tick tests --- src/main.lua | 8 +- tests/helpers.mjs | 19 +++- tests/tick.test.mjs | 220 +++++++++++++------------------------------- 3 files changed, 83 insertions(+), 164 deletions(-) diff --git a/src/main.lua b/src/main.lua index ee336a6c..9c11adc5 100644 --- a/src/main.lua +++ b/src/main.lua @@ -1909,12 +1909,8 @@ end) addEventingHandler(ActionMap.Epoch, utils.hasMatchingTag("Action", ActionMap.Epoch), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp - local providedEpochIndex = msg.Tags["Epoch-Index"] - local timestamp = msg.Timestamp - - assert(providedEpochIndex or timestamp, "Epoch index or timestamp is required") - - local epochIndex = providedEpochIndex or epochs.getEpochIndexForTimestamp(timestamp) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) local epoch = epochs.getEpoch(epochIndex) Send(msg, { Target = msg.From, Action = "Epoch-Notice", Data = json.encode(epoch) }) end) diff --git a/tests/helpers.mjs b/tests/helpers.mjs index b76fed03..ebc43cbe 100644 --- a/tests/helpers.mjs +++ b/tests/helpers.mjs @@ -762,11 +762,16 @@ export const totalTokenSupply = async ({ memory, timestamp = 0 }) => { }); }; -export const tick = async ({ memory, timestamp = STUB_TIMESTAMP }) => { +export const tick = async ({ + memory, + timestamp = STUB_TIMESTAMP, + forcePrune = false, +}) => { const tickResult = await handle({ options: { Tags: [{ name: 'Action', value: 'Tick' }], Timestamp: timestamp, + ...(forcePrune ? { name: 'Force-Prune', value: 'true' } : {}), }, memory, }); @@ -785,7 +790,9 @@ export const getEpoch = async ({ options: { Tags: [ { name: 'Action', value: 'Epoch' }, - ...(epochIndex ? [{ name: 'Epoch-Index', value: epochIndex }] : []), + ...(epochIndex !== undefined + ? [{ name: 'Epoch-Index', value: epochIndex }] + : []), ], Timestamp: timestamp, }, @@ -804,7 +811,9 @@ export const getPrescribedObservers = async ({ options: { Tags: [ { name: 'Action', value: 'Epoch-Prescribed-Observers' }, - ...(epochIndex ? [{ name: 'Epoch-Index', value: epochIndex }] : []), + ...(epochIndex !== undefined + ? [{ name: 'Epoch-Index', value: epochIndex }] + : []), ], Timestamp: timestamp, }, @@ -823,7 +832,9 @@ export const getPrescribedNames = async ({ options: { Tags: [ { name: 'Action', value: 'Epoch-Prescribed-Names' }, - ...(epochIndex ? [{ name: 'Epoch-Index', value: epochIndex }] : []), + ...(epochIndex !== undefined + ? [{ name: 'Epoch-Index', value: epochIndex }] + : []), ], Timestamp: timestamp, }, diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index d5f55084..6572975d 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -20,9 +20,15 @@ import { joinNetwork, buyRecord, handle, + transfer, startMemory, returnedNamesPeriod, totalTokenSupply, + getEpoch, + tick, + saveObservations, + getEpochSettings, + leaveNetwork, } from './helpers.mjs'; import { assertNoInvariants } from './invariants.mjs'; @@ -46,47 +52,18 @@ describe('Tick', async () => { }); }); - const transfer = async ({ - recipient = STUB_ADDRESS, - quantity = 100_000_000_000, - memory = sharedMemory, - } = {}) => { - const transferResult = await handle({ - options: { - From: PROCESS_OWNER, - Owner: PROCESS_OWNER, - Tags: [ - { name: 'Action', value: 'Transfer' }, - { name: 'Recipient', value: recipient }, - { name: 'Quantity', value: quantity }, - { name: 'Cast', value: false }, - ], - }, - memory, - }); - - // assert no error tag - const errorTag = transferResult.Messages?.[0]?.Tags?.find( - (tag) => tag.Name === 'Error', - ); - assert.strictEqual(errorTag, undefined); - - return transferResult.Memory; - }; - it('should prune record that are expired and after the grace period and create returned names for them', async () => { - let memory = sharedMemory; - const buyRecordResult = await handle({ - options: { - Tags: [ - { name: 'Action', value: 'Buy-Record' }, - { name: 'Name', value: 'test-name' }, - { name: 'Purchase-Type', value: 'lease' }, - { name: 'Years', value: '1' }, - { name: 'Process-Id', value: ''.padEnd(43, 'a') }, - ], - }, + const memory = await transfer({ + recipient: STUB_ADDRESS, + quantity: 100_000_000_000, + memory: sharedMemory, + }); + const buyRecordResult = await buyRecord({ memory, + name: 'test-name', + type: 'lease', + from: STUB_ADDRESS, + processId: ''.padEnd(43, 'a'), }); const realRecord = await handle({ options: { @@ -95,7 +72,7 @@ describe('Tick', async () => { { name: 'Name', value: 'test-name' }, ], }, - memory: buyRecordResult.Memory, + memory: buyRecordResult.memory, }); const buyRecordData = JSON.parse(realRecord.Messages[0].Data); assert.deepEqual(buyRecordData, { @@ -110,14 +87,9 @@ describe('Tick', async () => { // mock the passage of time and tick with a future timestamp const futureTimestamp = buyRecordData.endTimestamp + 1000 * 60 * 60 * 24 * 14 + 1; - const futureTickResult = await handle({ - options: { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: futureTimestamp.toString() }, - ], - }, - memory: buyRecordResult.Memory, + const { result: futureTickResult } = await tick({ + memory: buyRecordResult.memory, + timestamp: futureTimestamp, }); const tickEvent = JSON.parse( @@ -170,40 +142,29 @@ describe('Tick', async () => { const memory = await transfer({ recipient: STUB_ADDRESS, quantity: 100_000_000_000, + memory: sharedMemory, }); - - const joinNetworkResult = await handle({ - options: { - Tags: validGatewayTags(), - From: STUB_ADDRESS, - Owner: STUB_ADDRESS, - }, + const joinNetworkResult = await joinNetwork({ memory, + address: STUB_ADDRESS, }); - // assert no error tag - assertNoResultError(joinNetworkResult); - // check the gateway record from contract const gateway = await getGateway({ - memory: joinNetworkResult.Memory, + memory: joinNetworkResult.memory, address: STUB_ADDRESS, }); assert.deepEqual(gateway.status, 'joined'); // leave the network - const leaveNetworkResult = await handle({ - options: { - From: STUB_ADDRESS, - Owner: STUB_ADDRESS, - Tags: [{ name: 'Action', value: 'Leave-Network' }], - }, - memory: joinNetworkResult.Memory, + const leaveNetworkResult = await leaveNetwork({ + memory: joinNetworkResult.memory, + address: STUB_ADDRESS, }); // check the gateways status is leaving const leavingGateway = await getGateway({ - memory: leaveNetworkResult.Memory, + memory: leaveNetworkResult.memory, address: STUB_ADDRESS, }); assert.deepEqual(leavingGateway.status, 'leaving'); @@ -211,19 +172,14 @@ describe('Tick', async () => { // expedite the timestamp to the future const futureTimestamp = leavingGateway.endTimestamp + 1; - const futureTick = await handle({ - options: { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: futureTimestamp.toString() }, - ], - }, - memory: leaveNetworkResult.Memory, + const futureTick = await tick({ + memory: leaveNetworkResult.memory, + timestamp: futureTimestamp, }); // check the gateway is pruned const prunedGateway = await getGateway({ - memory: futureTick.Memory, + memory: futureTick.memory, address: STUB_ADDRESS, }); @@ -315,12 +271,9 @@ describe('Tick', async () => { ); // mock the passage of time and tick with a future timestamp const futureTimestamp = vaultData.endTimestamp + 1; - const futureTick = await handle({ - options: { - Tags: [{ name: 'Action', value: 'Tick' }], - Timestamp: futureTimestamp, - }, + const futureTick = await tick({ memory: createVaultResult.Memory, + timestamp: futureTimestamp, }); // check the vault is pruned @@ -368,6 +321,7 @@ describe('Tick', async () => { const initialMemory = await transfer({ recipient: STUB_ADDRESS, quantity: 100_000_000_000, + memory: sharedMemory, }); const delegateAddress = 'delegate-address-'.padEnd(43, '1'); @@ -401,36 +355,25 @@ describe('Tick', async () => { assertNoResultError(newDelegateResult); // fast forward to the start of the first epoch - const epochSettings = await handle({ - options: { - Tags: [{ name: 'Action', value: 'Epoch-Settings' }], - }, + const epochSettings = await getEpochSettings({ + memory: newDelegateResult.Memory, }); - const epochSettingsData = JSON.parse(epochSettings.Messages?.[0]?.Data); - const genesisEpochTimestamp = epochSettingsData.epochZeroStartTimestamp; + const genesisEpochTimestamp = epochSettings.epochZeroStartTimestamp; // now tick to create the first epoch after the epoch start timestamp const createEpochTimestamp = genesisEpochTimestamp + 1; - const newEpochTick = await handle({ - options: { - Timestamp: createEpochTimestamp, // one millisecond after the epoch start timestamp, should create the epoch and set the prescribed observers and names - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Force-Prune', value: 'true' }, // simply exercise this though it's not critical to the test - ], - }, + const newEpochTick = await tick({ memory: newDelegateResult.Memory, + timestamp: createEpochTimestamp, + forcePrune: true, }); // assert no error tag assertNoResultError(newEpochTick); // assert the new epoch is created - const epoch = await handle({ - options: { - Timestamp: createEpochTimestamp, // one millisecond after the epoch start timestamp - Tags: [{ name: 'Action', value: 'Epoch' }], - }, - memory: newEpochTick.Memory, + const epochData = await getEpoch({ + memory: newEpochTick.memory, + timestamp: createEpochTimestamp, }); // get the epoch timestamp and assert it is in 24 hours @@ -442,7 +385,6 @@ describe('Tick', async () => { (totalGatewayRewards + totalObserverRewards) / 1; // only one gateway in the network const expectedGatewayOperatorReward = totalEligibleGatewayRewards * 0.75; // 75% of the eligible rewards go to the operator const expectedGatewayDelegateReward = totalEligibleGatewayRewards * 0.25; // 25% of the eligible rewards go to the delegates - const epochData = JSON.parse(epoch.Messages[0].Data); assert.deepStrictEqual(epochData, { epochIndex: 0, startHeight: 1, @@ -479,20 +421,11 @@ describe('Tick', async () => { // have the gateway submit an observation const reportTxId = 'report-tx-id-'.padEnd(43, '1'); const observationTimestamp = createEpochTimestamp + 7 * 1000 * 60 * 60; // 7 hours after the epoch start timestamp - const observation = await handle({ - options: { - From: STUB_ADDRESS, - Owner: STUB_ADDRESS, - Timestamp: observationTimestamp, - Tags: [ - { name: 'Action', value: 'Save-Observations' }, - { - name: 'Report-Tx-Id', - value: reportTxId, - }, - ], - }, - memory: epoch.Memory, + const observation = await saveObservations({ + memory: newEpochTick.memory, + timestamp: observationTimestamp, + from: STUB_ADDRESS, + reportTxId, }); // assert no error tag @@ -500,33 +433,21 @@ describe('Tick', async () => { // now jump ahead to the epoch distribution timestamp const distributionTimestamp = epochData.distributionTimestamp; - const distributionTick = await handle({ - options: { - Tags: [{ name: 'Action', value: 'Tick' }], - Timestamp: distributionTimestamp, - }, - memory: observation.Memory, + const distributionTick = await tick({ + memory: observation.memory, + timestamp: distributionTimestamp, }); // assert no error tag assertNoResultError(distributionTick); // check the rewards were distributed correctly - const rewards = await handle({ - options: { - Timestamp: distributionTimestamp, - Tags: [ - { name: 'Action', value: 'Epoch' }, - { - name: 'Epoch-Index', - value: '0', - }, - ], - }, - memory: distributionTick.Memory, + const distributedEpochData = await getEpoch({ + memory: distributionTick.memory, + timestamp: distributionTimestamp, + epochIndex: 0, }); - const distributedEpochData = JSON.parse(rewards.Messages[0].Data); assert.deepStrictEqual(distributedEpochData, { ...epochData, distributions: { @@ -549,18 +470,15 @@ describe('Tick', async () => { }, }); // assert the new epoch was created - const newEpoch = await handle({ - options: { - Tags: [{ name: 'Action', value: 'Epoch' }], - Timestamp: distributionTimestamp, - }, - memory: distributionTick.Memory, + const newEpoch = await getEpoch({ + memory: distributionTick.memory, + timestamp: distributionTimestamp, + epochIndex: 1, }); - const newEpochData = JSON.parse(newEpoch.Messages[0].Data); - assert.equal(newEpochData.epochIndex, 1); + assert.equal(newEpoch.epochIndex, 1); // assert the gateway stakes were updated and match the distributed rewards const gateway = await getGateway({ - memory: distributionTick.Memory, + memory: distributionTick.memory, address: STUB_ADDRESS, }); assert.deepStrictEqual(gateway, { @@ -601,7 +519,7 @@ describe('Tick', async () => { }); const delegateItems = await getDelegatesItems({ - memory: distributionTick.Memory, + memory: distributionTick.memory, gatewayAddress: STUB_ADDRESS, }); assert.deepEqual(delegateItems, [ @@ -719,17 +637,11 @@ describe('Tick', async () => { // Tick to the epoch where demandFactor is 0.5 for (let i = 0; i <= 49; i++) { const epochTimestamp = genesisEpochStart + (epochDurationMs + 1) * i; - const { Memory } = await handle({ - options: { - Tags: [ - { name: 'Action', value: 'Tick' }, - { name: 'Timestamp', value: epochTimestamp.toString() }, - ], - Timestamp: epochTimestamp, - }, + const { result: tickResult } = await tick({ memory: tickMemory, + timestamp: epochTimestamp, }); - tickMemory = Memory; + tickMemory = tickResult.Memory; if (i === 45) { const demandFactor = await getDemandFactor({ From 444733b6c00a567fe781bb42cab97a7cd3d3dfab Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Tue, 17 Dec 2024 13:32:35 -0600 Subject: [PATCH 43/76] test: resolve issues from merge PE-7319 --- tests/gar.test.mjs | 44 ++++++++++++++++++++++++++++++----------- tests/handlers.test.mjs | 2 +- tests/helpers.mjs | 12 +++++------ tests/invariants.mjs | 1 + tests/primary.test.mjs | 10 ++++++---- tests/tick.test.mjs | 7 +++++-- tests/vaults.test.mjs | 1 - 7 files changed, 51 insertions(+), 26 deletions(-) diff --git a/tests/gar.test.mjs b/tests/gar.test.mjs index ef27e835..91a10214 100644 --- a/tests/gar.test.mjs +++ b/tests/gar.test.mjs @@ -45,6 +45,7 @@ describe('GatewayRegistry', async () => { const STUB_ADDRESS_9 = ''.padEnd(43, '9'); let sharedMemory = startMemory; // memory we'll use across unique tests; + let lastTimestamp = STUB_TIMESTAMP; beforeEach(async () => { const { Memory: totalTokenSupplyMemory } = await totalTokenSupply({ @@ -56,11 +57,12 @@ describe('GatewayRegistry', async () => { }); // NOTE: all tests will start with this gateway joined to the network - use `sharedMemory` for the first interaction for each test to avoid having to join the network again sharedMemory = joinNetworkMemory; + lastTimestamp = STUB_TIMESTAMP + 1000 * 60; // Default 60s after the stubbed timestamp, some tests will override this }); afterEach(async () => { await assertNoInvariants({ - timestamp: STUB_TIMESTAMP, + timestamp: lastTimestamp, memory: sharedMemory, }); }); @@ -363,6 +365,7 @@ describe('GatewayRegistry', async () => { ); sharedMemory = leaveNetworkMemory; + lastTimestamp = leavingTimestamp; }); }); @@ -375,11 +378,13 @@ describe('GatewayRegistry', async () => { expectedDelegates, expectedAllowedDelegates, inputMemory = sharedMemory, + timestamp = STUB_TIMESTAMP, }) { // gateway before const gateway = await getGateway({ address: STUB_ADDRESS, memory: inputMemory, + timestamp, }); const { memory: updatedSettingsMemory } = await updateGatewaySettings({ @@ -389,12 +394,14 @@ describe('GatewayRegistry', async () => { { name: 'Action', value: 'Update-Gateway-Settings' }, ...settingsTags, ], + timestamp, }); // check the gateway record from contract const updatedGateway = await getGateway({ address: STUB_ADDRESS, memory: updatedSettingsMemory, + timestamp, }); // should match old gateway, with new settings @@ -415,6 +422,7 @@ describe('GatewayRegistry', async () => { delegatorAddress: delegateAddress, quantity: 10_000_000, gatewayAddress: STUB_ADDRESS, + timestamp, }).catch(() => {}); if (maybeDelegateResult?.memory) { nextMemory = maybeDelegateResult.memory; @@ -423,6 +431,7 @@ describe('GatewayRegistry', async () => { const updatedGatewayDelegates = await getDelegatesItems({ memory: nextMemory, gatewayAddress: STUB_ADDRESS, + timestamp, }); assert.deepStrictEqual( updatedGatewayDelegates @@ -434,7 +443,7 @@ describe('GatewayRegistry', async () => { await getAllowedDelegates({ memory: nextMemory, from: STUB_ADDRESS, - timestamp: STUB_TIMESTAMP, + timestamp, gatewayAddress: STUB_ADDRESS, }); const updatedAllowedDelegates = JSON.parse( @@ -537,6 +546,7 @@ describe('GatewayRegistry', async () => { gatewayAddress: STUB_ADDRESS, }); const updatedMemory = await updateGatewaySettingsTest({ + timestamp: STUB_TIMESTAMP + 1, inputMemory: stakedMemory, settingsTags: [ { name: 'Allow-Delegated-Staking', value: 'allowlist' }, @@ -568,25 +578,26 @@ describe('GatewayRegistry', async () => { ], }, memory: updatedMemory, + timestamp: STUB_TIMESTAMP + 5, }); assertNoResultError(delegationsResult); assert.deepStrictEqual( [ { - // Kicked out due to not being in allowlist - type: 'stake', + type: 'vault', gatewayAddress: STUB_ADDRESS, - delegationId: `${STUB_ADDRESS}_${STUB_TIMESTAMP}`, - balance: 0, - startTimestamp: STUB_TIMESTAMP, + delegationId: `${STUB_ADDRESS}_${STUB_TIMESTAMP + 1}`, + vaultId: 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', + balance: 10_000_000, + endTimestamp: 90 * 24 * 60 * 60 * 1000 + STUB_TIMESTAMP + 1, + startTimestamp: STUB_TIMESTAMP + 1, }, { - type: 'vault', + // Kicked out due to not being in allowlist + type: 'stake', gatewayAddress: STUB_ADDRESS, delegationId: `${STUB_ADDRESS}_${STUB_TIMESTAMP}`, - vaultId: 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', - balance: 10_000_000, - endTimestamp: 90 * 24 * 60 * 60 * 1000 + STUB_TIMESTAMP, + balance: 0, startTimestamp: STUB_TIMESTAMP, }, ], @@ -605,6 +616,7 @@ describe('GatewayRegistry', async () => { delegateAddresses: [STUB_ADDRESS_6], // not allowed to delegate expectedDelegates: [STUB_ADDRESS_7, STUB_ADDRESS_8, STUB_ADDRESS_9], // Leftover from previous test and being forced to exit expectedAllowedDelegates: [], + timestamp: STUB_TIMESTAMP + 3, }); }); @@ -627,11 +639,13 @@ describe('GatewayRegistry', async () => { delegateAddresses: [STUB_ADDRESS_9], // no one is allowed yet expectedDelegates: [STUB_ADDRESS_8], // 8 is exiting expectedAllowedDelegates: [], + timestamp: STUB_TIMESTAMP + 1, }); const delegateItems = await getDelegatesItems({ memory: updatedMemory, gatewayAddress: STUB_ADDRESS, + timestamp: STUB_TIMESTAMP + 1, }); assert.deepStrictEqual( [ @@ -647,7 +661,7 @@ describe('GatewayRegistry', async () => { const { result: getAllowedDelegatesResult } = await getAllowedDelegates({ memory: updatedMemory, from: STUB_ADDRESS, - timestamp: STUB_TIMESTAMP, + timestamp: STUB_TIMESTAMP + 1, gatewayAddress: STUB_ADDRESS, }); assert.deepStrictEqual( @@ -963,6 +977,7 @@ describe('GatewayRegistry', async () => { const delegationsForDelegator = await getDelegations({ memory: decreaseStakeMemory, address: delegatorAddress, + timestamp: decreaseStakeTimestamp, }); assert.deepStrictEqual(delegationsForDelegator.items, [ { @@ -983,6 +998,7 @@ describe('GatewayRegistry', async () => { }, ]); sharedMemory = decreaseStakeMemory; + lastTimestamp = decreaseStakeTimestamp; }); it('should fail to withdraw a delegated stake if below the minimum withdrawal limitation', async () => { @@ -1032,6 +1048,7 @@ describe('GatewayRegistry', async () => { }); assert.deepStrictEqual(gatewayAfter, gatewayBefore); sharedMemory = decreaseStakeMemory; + lastTimestamp = decreaseStakeTimestamp; }); }); @@ -1078,7 +1095,9 @@ describe('GatewayRegistry', async () => { // no changes to the gateway after a withdrawal is cancelled assert.deepStrictEqual(gatewayAfter, gatewayBefore); sharedMemory = cancelWithdrawalMemory; + lastTimestamp = decreaseStakeTimestamp; }); + it('should allow cancelling an operator withdrawal', async () => { const decreaseStakeTimestamp = STUB_TIMESTAMP + 1000 * 60 * 15; // 15 minutes after stubbedTimestamp const stakeQty = INITIAL_OPERATOR_STAKE; @@ -1125,6 +1144,7 @@ describe('GatewayRegistry', async () => { operatorStake: INITIAL_OPERATOR_STAKE + decreaseQty, // the decrease was cancelled and returned to the operator }); sharedMemory = cancelWithdrawalMemory; + lastTimestamp = decreaseStakeTimestamp; }); }); diff --git a/tests/handlers.test.mjs b/tests/handlers.test.mjs index b8eb799a..8e2e2090 100644 --- a/tests/handlers.test.mjs +++ b/tests/handlers.test.mjs @@ -28,7 +28,7 @@ describe('handlers', async () => { const defaultIndex = handlersList.indexOf('_default'); const sanitizeIndex = handlersList.indexOf('sanitize'); const pruneIndex = handlersList.indexOf('prune'); - const expectedHandlerCount = 71; // TODO: update this if more handlers are added + const expectedHandlerCount = 72; // TODO: update this if more handlers are added assert.ok(evalIndex === 0); assert.ok(defaultIndex === 1); assert.ok(sanitizeIndex === 2); diff --git a/tests/helpers.mjs b/tests/helpers.mjs index 04b75766..cda56bfd 100644 --- a/tests/helpers.mjs +++ b/tests/helpers.mjs @@ -99,11 +99,10 @@ export const getBalances = async ({ memory, timestamp = STUB_TIMESTAMP }) => { const result = await handle({ options: { Tags: [{ name: 'Action', value: 'Balances' }], - Timestamp: timestamp, }, + timestamp, memory, }); - console.log('getBalances', JSON.stringify(result, null, 2)); const balances = JSON.parse(result.Messages?.[0]?.Data); return balances; @@ -291,7 +290,7 @@ export const getDelegatesItems = async ({ return JSON.parse(result.Messages?.[0]?.Data).items; }; -export const getDelegations = async ({ memory, address }) => { +export const getDelegations = async ({ memory, address, timestamp }) => { const result = await handle({ options: { Tags: [ @@ -300,6 +299,7 @@ export const getDelegations = async ({ memory, address }) => { ], }, memory, + timestamp, }); return JSON.parse(result.Messages?.[0]?.Data); }; @@ -325,7 +325,6 @@ export const getVaults = async ({ }, memory, }); - console.log('getVaults', JSON.stringify(rest, null, 2)); return { result: rest, memory: Memory, @@ -441,6 +440,7 @@ export const delegateStake = async ({ recipient: delegatorAddress, quantity, memory, + timestamp, }); const delegateResult = await handle({ @@ -474,8 +474,8 @@ export const getGateway = async ({ { name: 'Action', value: 'Gateway' }, { name: 'Address', value: address }, ], - Timestamp: timestamp, }, + timestamp, memory, }); const gateway = JSON.parse(gatewayResult.Messages?.[0]?.Data); @@ -724,8 +724,8 @@ export const buyRecord = async ({ { name: 'Process-Id', value: processId }, { name: 'Years', value: `${years}` }, ], - Timestamp: timestamp, }, + timestamp, memory, }); return { diff --git a/tests/invariants.mjs b/tests/invariants.mjs index 7efe43a3..b962bac0 100644 --- a/tests/invariants.mjs +++ b/tests/invariants.mjs @@ -45,6 +45,7 @@ async function assertNoBalanceVaultInvariants({ timestamp, memory }) { const { result } = await getVaults({ memory, limit: 1_000_000, // egregiously large limit to make sure we get them all + timestamp, }); for (const vault of JSON.parse(result.Messages?.[0]?.Data).items) { diff --git a/tests/primary.test.mjs b/tests/primary.test.mjs index dbdd2078..1b83e263 100644 --- a/tests/primary.test.mjs +++ b/tests/primary.test.mjs @@ -91,7 +91,7 @@ describe('primary names', function () { }; }; - const getPrimaryNameRequest = async ({ initiator, memory }) => { + const getPrimaryNameRequest = async ({ initiator, memory, timestamp }) => { const getPrimaryNameRequestResult = await handle({ options: { Tags: [ @@ -100,6 +100,7 @@ describe('primary names', function () { ], }, memory, + timestamp, }); return { result: getPrimaryNameRequestResult, @@ -205,7 +206,7 @@ describe('primary names', function () { const processId = ''.padEnd(43, 'a'); const recipient = ''.padEnd(43, 'b'); const requestTimestamp = 1234567890; - const { memory: buyRecordMemory } = await buyRecord({ + const { memory: buyRecordMemory, result } = await buyRecord({ name: 'test-name', processId, timestamp: requestTimestamp, @@ -254,6 +255,7 @@ describe('primary names', function () { { initiator: recipient, memory: requestPrimaryNameResult.Memory, + timestamp: requestTimestamp, }, ); @@ -535,7 +537,7 @@ describe('primary names', function () { Action: 'Remove-Primary-Names', Cast: false, Cron: false, - 'Epoch-Index': -19657, + 'Epoch-Index': -5618, From: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'From-Formatted': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'Message-Id': 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', @@ -543,7 +545,7 @@ describe('primary names', function () { 'Num-Removed-Primary-Names': 1, 'Removed-Primary-Names': ['test-name'], 'Removed-Primary-Name-Owners': [recipient], - Timestamp: 21600000, + Timestamp: requestTimestamp, 'Total-Primary-Name-Requests': 0, 'Total-Primary-Names': 0, }); diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 678a93fb..8c92d2bc 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -50,6 +50,7 @@ describe('Tick', async () => { recipient = STUB_ADDRESS, quantity = 100_000_000_000, memory = sharedMemory, + timestamp = STUB_TIMESTAMP, } = {}) => { const transferResult = await handle({ options: { @@ -63,6 +64,7 @@ describe('Tick', async () => { ], }, memory, + timestamp, }); // assert no error tag @@ -209,7 +211,7 @@ describe('Tick', async () => { // TODO: check delegates and operator stake are vaulted // expedite the timestamp to the future - const futureTimestamp = leavingGatewayData.endTimestamp + 1; + const futureTimestamp = leavingGateway.endTimestamp + 1; const futureTick = await handle({ options: { Tags: [{ name: 'Action', value: 'Tick' }], @@ -574,6 +576,7 @@ describe('Tick', async () => { const gateway = await getGateway({ memory: distributionTick.Memory, address: STUB_ADDRESS, + timestamp: distributionTimestamp, }); assert.deepStrictEqual(gateway, { status: 'joined', @@ -650,7 +653,7 @@ describe('Tick', async () => { const processId = 'process-id-'.padEnd(43, '1'); const transferMemory = await transfer({ recipient: fundedUser, - quantity: 100_000_000_000_000, + quantity: 900_000_000_000_000, memory: genesisEpochTick.Memory, timestamp: genesisEpochStart, }); diff --git a/tests/vaults.test.mjs b/tests/vaults.test.mjs index ceadc9e4..49403b30 100644 --- a/tests/vaults.test.mjs +++ b/tests/vaults.test.mjs @@ -493,7 +493,6 @@ describe('Vaults', async () => { sortBy: 'balance', sortOrder: 'asc', }); - console.log(JSON.stringify(paginatedVaultsResult, null, 2)); // parse items, nextCursor const { items, nextCursor, hasMore, sortBy, sortOrder, totalItems } = From 0d4ec105f34e603b06c9d71df01578eb114aa410 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Tue, 17 Dec 2024 13:57:41 -0600 Subject: [PATCH 44/76] refactor: fix merge issues PE-7319 --- src/main.lua | 29 +++++++++++------------------ tests/gar.test.mjs | 18 +++++++++--------- tests/tick.test.mjs | 2 +- 3 files changed, 21 insertions(+), 28 deletions(-) diff --git a/src/main.lua b/src/main.lua index d599b2c3..9213a563 100644 --- a/src/main.lua +++ b/src/main.lua @@ -24,6 +24,8 @@ NameRegistry = NameRegistry or {} Epochs = Epochs or {} LastTickedEpochIndex = LastTickedEpochIndex or -1 LastGracePeriodEntryEndTimestamp = LastGracePeriodEntryEndTimestamp or 0 +LastKnownMessageTimestamp = LastKnownMessageTimestamp or 0 +LastKnownMessageId = LastKnownMessageId or "" local utils = require("utils") local json = require("json") @@ -115,15 +117,6 @@ local ActionMap = { } -- Low fidelity trackers -LastKnownCirculatingSupply = LastKnownCirculatingSupply or 0 -- total circulating supply (e.g. balances - protocol balance) -LastKnownLockedSupply = LastKnownLockedSupply or 0 -- total vault balance across all vaults -LastKnownStakedSupply = LastKnownStakedSupply or 0 -- total operator stake across all gateways -LastKnownDelegatedSupply = LastKnownDelegatedSupply or 0 -- total delegated stake across all gateways -LastKnownWithdrawSupply = LastKnownWithdrawSupply or 0 -- total withdraw supply across all gateways (gateways and delegates) -LastKnownPnpRequestSupply = LastKnownPnpRequestSupply or 0 -- total supply stashed in outstanding Primary Name Protocol requests -LastTickedEpochIndex = LastTickedEpochIndex or -1 -LastKnownMessageTimestamp = LastKnownMessageTimestamp or 0 -LastKnownMessageId = LastKnownMessageId or "" --- @alias Message table -- an AO message TODO - update this type with the actual Message type --- @param msg Message @@ -410,15 +403,6 @@ local function assertAndSanitizeInputs(msg) msg.Tags = utils.validateAndSanitizeInputs(msg.Tags) msg.From = utils.formatAddress(msg.From) msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) or tonumber(msg.Tags.Timestamp) or nil - - if msg.Tags["Force-Prune"] then - gar.scheduleNextGatewaysPruning(0) - gar.scheduleNextRedelegationsPruning(0) - arns.scheduleNextReturnedNamesPrune(0) - arns.scheduleNextRecordsPrune(0) - primaryNames.scheduleNextPrimaryNamesPruning(0) - vaults.scheduleNextVaultsPruning(0) - end end local function updateLastKnownMessage(msg) @@ -484,6 +468,15 @@ end, function(msg) lastKnownTotalSupply = token.lastKnownTotalTokenSupply(), } + if msg.Tags["Force-Prune"] then + gar.scheduleNextGatewaysPruning(0) + gar.scheduleNextRedelegationsPruning(0) + arns.scheduleNextReturnedNamesPrune(0) + arns.scheduleNextRecordsPrune(0) + primaryNames.scheduleNextPrimaryNamesPruning(0) + vaults.scheduleNextVaultsPruning(0) + end + print("Pruning state at timestamp: " .. msg.Timestamp) local prunedStateResult = prune.pruneState(msg.Timestamp, msg.Id, LastGracePeriodEntryEndTimestamp) diff --git a/tests/gar.test.mjs b/tests/gar.test.mjs index 91a10214..a1a02d96 100644 --- a/tests/gar.test.mjs +++ b/tests/gar.test.mjs @@ -573,7 +573,7 @@ describe('GatewayRegistry', async () => { Tags: [ { name: 'Action', value: 'Paginated-Delegations' }, { name: 'Limit', value: '100' }, - { name: 'Sort-Order', value: 'desc' }, + { name: 'Sort-Order', value: 'asc' }, { name: 'Sort-By', value: 'startTimestamp' }, ], }, @@ -583,6 +583,14 @@ describe('GatewayRegistry', async () => { assertNoResultError(delegationsResult); assert.deepStrictEqual( [ + { + // Kicked out due to not being in allowlist + type: 'stake', + gatewayAddress: STUB_ADDRESS, + delegationId: `${STUB_ADDRESS}_${STUB_TIMESTAMP}`, + balance: 0, + startTimestamp: STUB_TIMESTAMP, + }, { type: 'vault', gatewayAddress: STUB_ADDRESS, @@ -592,14 +600,6 @@ describe('GatewayRegistry', async () => { endTimestamp: 90 * 24 * 60 * 60 * 1000 + STUB_TIMESTAMP + 1, startTimestamp: STUB_TIMESTAMP + 1, }, - { - // Kicked out due to not being in allowlist - type: 'stake', - gatewayAddress: STUB_ADDRESS, - delegationId: `${STUB_ADDRESS}_${STUB_TIMESTAMP}`, - balance: 0, - startTimestamp: STUB_TIMESTAMP, - }, ], JSON.parse(delegationsResult.Messages[0].Data).items, ); diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 8c92d2bc..56e611ac 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -653,7 +653,7 @@ describe('Tick', async () => { const processId = 'process-id-'.padEnd(43, '1'); const transferMemory = await transfer({ recipient: fundedUser, - quantity: 900_000_000_000_000, + quantity: 100_000_000_000_000, memory: genesisEpochTick.Memory, timestamp: genesisEpochStart, }); From af857d3cb99e554b3cc120a62666e7f33363bf7d Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Tue, 17 Dec 2024 13:59:52 -0600 Subject: [PATCH 45/76] refactor: remove empty comment PE-7319 --- src/main.lua | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/main.lua b/src/main.lua index 9213a563..726e50e0 100644 --- a/src/main.lua +++ b/src/main.lua @@ -116,8 +116,6 @@ local ActionMap = { PrimaryName = "Primary-Name", } --- Low fidelity trackers - --- @alias Message table -- an AO message TODO - update this type with the actual Message type --- @param msg Message --- @param response any From 5fe672c0bb15ecf26847cee51af10e6919b8814e Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 15:54:06 -0600 Subject: [PATCH 46/76] fix(sanitize): assert number for timestamp --- src/main.lua | 2 +- tests/monitor/monitor.test.mjs | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/main.lua b/src/main.lua index 726e50e0..3c02d8d7 100644 --- a/src/main.lua +++ b/src/main.lua @@ -388,7 +388,7 @@ end -- Sanitize inputs before every interaction local function assertAndSanitizeInputs(msg) assert( - msg.Timestamp and msg.Timestamp >= LastKnownMessageTimestamp, + msg.Timestamp and tonumber(msg.Timestamp) >= LastKnownMessageTimestamp, "Timestamp must be greater than or equal to the last known message timestamp of " .. LastKnownMessageTimestamp .. " but was " diff --git a/tests/monitor/monitor.test.mjs b/tests/monitor/monitor.test.mjs index 77af6e6e..892643f6 100644 --- a/tests/monitor/monitor.test.mjs +++ b/tests/monitor/monitor.test.mjs @@ -65,10 +65,13 @@ describe('setup', () => { ); const evalIndex = handlersList.indexOf('_eval'); const defaultIndex = handlersList.indexOf('_default'); + const sanitizeIndex = handlersList.indexOf('sanitize'); const pruneIndex = handlersList.indexOf('prune'); assert( - pruneIndex > evalIndex && pruneIndex === defaultIndex + 1, - `Prune index (${pruneIndex}) is not the first handler after _default (${defaultIndex + 1})`, + pruneIndex > sanitizeIndex && + sanitizeIndex > evalIndex && + pruneIndex === defaultIndex + 1, + `Prune index (${pruneIndex}) and sanitize index (${sanitizeIndex}) are not the first and second handlers after _default (${defaultIndex + 1})`, ); }); }); From 390b526c572abbb1894117dd4eb79d248041de42 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 16:19:58 -0600 Subject: [PATCH 47/76] chore(test): fix integration test --- tests/tick.test.mjs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 661129a8..11b0bc21 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -361,6 +361,7 @@ describe('Tick', async () => { // fast forward to the start of the first epoch const epochSettings = await getEpochSettings({ memory: newDelegateResult.Memory, + timestamp: delegateTimestamp, }); const genesisEpochTimestamp = epochSettings.epochZeroStartTimestamp; // now tick to create the first epoch after the epoch start timestamp From a620d4c3a6886347f0c3e372a4df04879dc3396e Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 16:45:21 -0600 Subject: [PATCH 48/76] chore(test): bump ar.io/sdk and update monitor tests --- .vscode/settings.json | 2 +- package.json | 2 +- tests/monitor/monitor.test.mjs | 27 +++++++++++++++-------- yarn.lock | 40 +++++++++++++++++++++++++--------- 4 files changed, 50 insertions(+), 21 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 816990a1..5f79587f 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -37,5 +37,5 @@ "editor.formatOnPaste": true, "editor.formatOnSaveMode": "file" }, - "cSpell.words": ["hashchain", "redelegate"] + "cSpell.words": ["ARIO", "hashchain", "redelegate"] } diff --git a/package.json b/package.json index e2ccbdf8..1c9f9827 100644 --- a/package.json +++ b/package.json @@ -15,7 +15,7 @@ "prepare": "husky" }, "devDependencies": { - "@ar.io/sdk": "alpha", + "@ar.io/sdk": "^3.1.0-alpha.4", "@permaweb/ao-loader": "^0.0.36", "@permaweb/aoconnect": "^0.0.59", "arweave": "^1.15.1", diff --git a/tests/monitor/monitor.test.mjs b/tests/monitor/monitor.test.mjs index 892643f6..4305f9da 100644 --- a/tests/monitor/monitor.test.mjs +++ b/tests/monitor/monitor.test.mjs @@ -1,11 +1,20 @@ -import { AOProcess, IO, IO_TESTNET_PROCESS_ID } from '@ar.io/sdk'; +import { + AOProcess, + ARIO, + ARIO_DEVNET_PROCESS_ID, + ARIO_TESTNET_PROCESS_ID, + Logger, +} from '@ar.io/sdk'; import { connect } from '@permaweb/aoconnect'; import { strict as assert } from 'node:assert'; import { describe, it, before, after } from 'node:test'; import { DockerComposeEnvironment, Wait } from 'testcontainers'; -const processId = process.env.IO_PROCESS_ID || IO_TESTNET_PROCESS_ID; -const io = IO.init({ +// set debug level logs for to get detailed messages +Logger.default.setLogLevel('info'); + +const processId = process.env.IO_PROCESS_ID || ARIO_DEVNET_PROCESS_ID; +const io = ARIO.init({ process: new AOProcess({ processId, ao: connect({ @@ -68,10 +77,10 @@ describe('setup', () => { const sanitizeIndex = handlersList.indexOf('sanitize'); const pruneIndex = handlersList.indexOf('prune'); assert( - pruneIndex > sanitizeIndex && - sanitizeIndex > evalIndex && - pruneIndex === defaultIndex + 1, - `Prune index (${pruneIndex}) and sanitize index (${sanitizeIndex}) are not the first and second handlers after _default (${defaultIndex + 1})`, + pruneIndex === sanitizeIndex + 1 && + sanitizeIndex === defaultIndex + 1 && + defaultIndex === evalIndex + 1, + `Prune index (${pruneIndex}) and sanitize index (${sanitizeIndex}) are not the first and second handlers after _default (${handlersList})`, ); }); }); @@ -356,7 +365,7 @@ describe('setup', () => { (Date.now() - epochZeroStartTimestamp) / durationMs, ); - let cursor = ''; + let cursor = undefined; let totalGateways = 0; const uniqueGateways = new Set(); do { @@ -535,7 +544,7 @@ describe('setup', () => { ); const testLogicPromise = (async () => { - let cursor = ''; + let cursor = undefined; let totalArns = 0; const uniqueNames = new Set(); do { diff --git a/yarn.lock b/yarn.lock index ee0d9ec6..a5165b90 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2,18 +2,20 @@ # yarn lockfile v1 -"@ar.io/sdk@alpha": - version "2.5.3-alpha.1" - resolved "https://registry.yarnpkg.com/@ar.io/sdk/-/sdk-2.5.3-alpha.1.tgz#f8ecbfd592dbe73850ee3bcd04fb33a16c3cf156" - integrity sha512-xPSBYJ+N/qQCwyH3UmTiPmhuTFAJwp8os7qnLARdwy4X1b3SlstNp6YVORBQlQVBmFPYpGD98PCReuHbId5xjA== +"@ar.io/sdk@^3.1.0-alpha.4": + version "3.1.0-alpha.4" + resolved "https://registry.yarnpkg.com/@ar.io/sdk/-/sdk-3.1.0-alpha.4.tgz#c1db0b4477890621fb7aadcd1d21e4462c4522b2" + integrity sha512-yYDFOosf3IUGbGCPJlniDPyjRTUOJoYweD4tRMa7BgQ1YuBVLlZmVzqmF+7eNHT3ileM2Zx9ewbsFO/cS3dAvQ== dependencies: "@dha-team/arbundles" "^1.0.1" "@permaweb/aoconnect" "^0.0.57" arweave "1.14.4" - axios "1.7.7" + axios "1.7.9" axios-retry "^4.3.0" + commander "^12.1.0" eventemitter3 "^5.0.1" plimit-lit "^3.0.1" + prompts "^2.4.2" winston "^3.13.0" zod "^3.23.8" @@ -650,10 +652,10 @@ axios-retry@^4.3.0: dependencies: is-retry-allowed "^2.2.0" -axios@1.7.7: - version "1.7.7" - resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.7.tgz#2f554296f9892a72ac8d8e4c5b79c14a91d0a47f" - integrity sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q== +axios@1.7.9: + version "1.7.9" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.9.tgz#d7d071380c132a24accda1b2cfc1535b79ec650a" + integrity sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw== dependencies: follow-redirects "^1.15.6" form-data "^4.0.0" @@ -897,7 +899,7 @@ combined-stream@^1.0.8: dependencies: delayed-stream "~1.0.0" -commander@~12.1.0: +commander@^12.1.0, commander@~12.1.0: version "12.1.0" resolved "https://registry.yarnpkg.com/commander/-/commander-12.1.0.tgz#01423b36f501259fdaac4d0e4d60c96c991585d3" integrity sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA== @@ -1311,6 +1313,11 @@ keccak@^3.0.2: node-gyp-build "^4.2.0" readable-stream "^3.6.0" +kleur@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" + integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== + kuler@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/kuler/-/kuler-2.0.0.tgz#e2c570a3800388fb44407e851531c1d670b061b3" @@ -1610,6 +1617,14 @@ process@^0.11.10: resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== +prompts@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" + integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q== + dependencies: + kleur "^3.0.3" + sisteransi "^1.0.5" + proper-lockfile@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/proper-lockfile/-/proper-lockfile-4.1.2.tgz#c8b9de2af6b2f1601067f98e01ac66baa223141f" @@ -1775,6 +1790,11 @@ simple-swizzle@^0.2.2: dependencies: is-arrayish "^0.3.1" +sisteransi@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" + integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== + slice-ansi@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-5.0.0.tgz#b73063c57aa96f9cd881654b15294d95d285c42a" From f02067b698ec8b11994d7c408f529a6dc619287a Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Tue, 17 Dec 2024 16:56:37 -0600 Subject: [PATCH 49/76] chore(test): fix prescribedObservers in unit test --- spec/epochs_spec.lua | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 2b811acb..10ced980 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -611,11 +611,11 @@ describe("epochs", function() }, prescribedNames = {}, prescribedObservers = { - ["test-this-very-valid-observer-wallet-addr-1"] = true, - ["test-this-very-valid-observer-wallet-addr-2"] = true, - ["test-this-very-valid-observer-wallet-addr-3"] = true, - ["test-this-very-valid-observer-wallet-addr-4"] = true, - ["test-this-very-valid-observer-wallet-addr-5"] = true, + ["test-this-very-valid-observer-wallet-addr-1"] = "test-this-very-valid-arweave-wallet-addr-1", + ["test-this-very-valid-observer-wallet-addr-2"] = "test-this-very-valid-arweave-wallet-addr-2", + ["test-this-very-valid-observer-wallet-addr-3"] = "test-this-very-valid-arweave-wallet-addr-3", + ["test-this-very-valid-observer-wallet-addr-4"] = "test-this-very-valid-arweave-wallet-addr-4", + ["test-this-very-valid-observer-wallet-addr-5"] = "test-this-very-valid-arweave-wallet-addr-5", }, } From 77e70d779ce64d4a3d0459a959877a69ee41dcbe Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Wed, 18 Dec 2024 09:28:11 -0600 Subject: [PATCH 50/76] fix(gar): do not remove delegate before decreasing stake --- src/gar.lua | 17 ++++++++++++++--- src/main.lua | 1 - 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/gar.lua b/src/gar.lua index 58928301..71d96eaf 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -479,6 +479,12 @@ end --- @param gateway Gateway --- @param quantity mARIO function increaseDelegateStakeAtGateway(delegate, gateway, quantity) + assert(delegate, "Delegate is required") + assert(gateway, "Gateway is required") + assert( + quantity and utils.isInteger(quantity) and quantity > 0, + "Quantity is required and must be an integer greater than 0" + ) delegate.delegatedStake = delegate.delegatedStake + quantity gateway.totalDelegatedStake = gateway.totalDelegatedStake + quantity end @@ -489,8 +495,14 @@ end --- @param ban boolean|nil do not add the delegate back to the gateway allowlist if their delegation is over function decreaseDelegateStakeAtGateway(delegateAddress, gateway, quantity, ban) local delegate = gateway.delegates[delegateAddress] - -- use this in an inverse way - increaseDelegateStakeAtGateway(delegate, gateway, -quantity) + assert(delegate, "Delegate is required") + assert( + quantity and utils.isInteger(quantity) and quantity > 0, + "Quantity is required and must be an integer greater than 0: " .. quantity + ) + assert(gateway, "Gateway is required") + delegate.delegatedStake = delegate.delegatedStake - quantity + gateway.totalDelegatedStake = gateway.totalDelegatedStake - quantity gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) if ban and gateway.settings.allowedDelegatesLookup then gateway.settings.allowedDelegatesLookup[delegateAddress] = nil @@ -2055,7 +2067,6 @@ function unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) assert(vault, "Vault not found") balances.increaseBalance(delegateAddress, vault.balance) - gateway.delegates[delegateAddress] = nil decreaseDelegateStakeAtGateway(delegateAddress, gateway, vault.balance) end diff --git a/src/main.lua b/src/main.lua index a063d4be..f295fb08 100644 --- a/src/main.lua +++ b/src/main.lua @@ -395,7 +395,6 @@ local function assertAndSanitizeInputs(msg) .. msg.Timestamp ) assert(msg.From, "From is required") - assert(msg.Id, "Id is required") assert(msg.Tags and type(msg.Tags) == "table", "Tags are required") msg.Tags = utils.validateAndSanitizeInputs(msg.Tags) From 8dc61ab407cecc3f7c3030c61fc0a5c39c3ed8fe Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Wed, 18 Dec 2024 09:31:57 -0600 Subject: [PATCH 51/76] fix(gar): delete the vault, do not decrease stake --- src/gar.lua | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/gar.lua b/src/gar.lua index 71d96eaf..7690f612 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -2067,7 +2067,9 @@ function unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) assert(vault, "Vault not found") balances.increaseBalance(delegateAddress, vault.balance) - decreaseDelegateStakeAtGateway(delegateAddress, gateway, vault.balance) + -- delete the delegate's vault + gateway.delegates[delegateAddress].vaults[vaultId] = nil + gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) end --- @param gateway Gateway From 7b55fd5ee23add7948d21d23b07ccc393745d35c Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Wed, 18 Dec 2024 09:38:46 -0600 Subject: [PATCH 52/76] fix(gar): allow 0 as no-op for increase and decrease --- src/gar.lua | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/gar.lua b/src/gar.lua index 7690f612..84e2729a 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -481,9 +481,10 @@ end function increaseDelegateStakeAtGateway(delegate, gateway, quantity) assert(delegate, "Delegate is required") assert(gateway, "Gateway is required") + -- zero is allowed as it is a no-op assert( - quantity and utils.isInteger(quantity) and quantity > 0, - "Quantity is required and must be an integer greater than 0" + quantity and utils.isInteger(quantity) and quantity >= 0, + "Quantity is required and must be an integer greater than or equal to 0: " .. quantity ) delegate.delegatedStake = delegate.delegatedStake + quantity gateway.totalDelegatedStake = gateway.totalDelegatedStake + quantity @@ -496,9 +497,10 @@ end function decreaseDelegateStakeAtGateway(delegateAddress, gateway, quantity, ban) local delegate = gateway.delegates[delegateAddress] assert(delegate, "Delegate is required") + -- zero is allowed as it is a no-op assert( - quantity and utils.isInteger(quantity) and quantity > 0, - "Quantity is required and must be an integer greater than 0: " .. quantity + quantity and utils.isInteger(quantity) and quantity >= 0, + "Quantity is required and must be an integer greater than or equal to 0: " .. quantity ) assert(gateway, "Gateway is required") delegate.delegatedStake = delegate.delegatedStake - quantity @@ -2067,7 +2069,7 @@ function unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) assert(vault, "Vault not found") balances.increaseBalance(delegateAddress, vault.balance) - -- delete the delegate's vault + -- delete the delegate's vault and prune the delegate if necessary gateway.delegates[delegateAddress].vaults[vaultId] = nil gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) end From c9074ddfffc67220485355682253490466049d0f Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Wed, 18 Dec 2024 11:09:17 -0600 Subject: [PATCH 53/76] test: setup expectations for disallowing grace period reassign name and create primary naem req PE-7326 --- spec/arns_spec.lua | 29 +++++++++++++++++++++++++++++ spec/primary_names_spec.lua | 20 ++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/spec/arns_spec.lua b/spec/arns_spec.lua index e5b5f90a..b9ef5a7f 100644 --- a/spec/arns_spec.lua +++ b/spec/arns_spec.lua @@ -767,6 +767,35 @@ describe("arns", function() assert.is_false(status) assert.match("Name must be extended before it can be reassigned", error) end) + + it("should not allow reassigning names during the grace period", function() + -- Setup record in grace period + _G.NameRegistry.records["test-name"] = { + endTimestamp = 123456789, + processId = testProcessId, + purchasePrice = 600000000, + startTimestamp = 0, + type = "lease", + undernameLimit = 10, + } + + -- Attempt to reassign + local newProcessId = "test-this-is-valid-arweave-wallet-address-2" + local status, error = pcall( + arns.reassignName, + "test-name", + testProcessId, + -- Just before the grace period ends + 123456789 + + constants.gracePeriodMs + - 1, + newProcessId + ) + + -- Assertions + assert.is_false(status) + assert.match("Name must be extended before it can be reassigned", error) + end) end) end diff --git a/spec/primary_names_spec.lua b/spec/primary_names_spec.lua index 5de9b732..06b09c8a 100644 --- a/spec/primary_names_spec.lua +++ b/spec/primary_names_spec.lua @@ -92,6 +92,26 @@ describe("Primary Names", function() ) end) + it("should fail if the arns record is in its grace period", function() + _G.NameRegistry.records = { + ["test"] = { + processId = "base-name-owner", + type = "lease", + endTimestamp = 1234567890, + }, + } + local status, err = pcall( + primaryNames.createPrimaryNameRequest, + "test", + "user-requesting-primary-name", + -- Just after grace period starts + 1234567890 + 1, + "test-msg-id" + ) + assert.is_false(status) + assert.match("ArNS record 'test' is not active", err) + end) + it( "should create a primary name request and transfer the cost from the initiator to the protocol balance", function() From 102df6db05bc2214a44f97032388d572439d95a7 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Wed, 18 Dec 2024 11:10:18 -0600 Subject: [PATCH 54/76] fix: disallow create prmary name req during a records grace period PE-7326 --- src/arns.lua | 13 ++++++------- src/primary_names.lua | 1 + 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/arns.lua b/src/arns.lua index 3791e166..f0d7f4d8 100644 --- a/src/arns.lua +++ b/src/arns.lua @@ -1042,7 +1042,7 @@ function arns.pruneReservedNames(currentTimestamp) end --- Asserts that a name can be reassigned ---- @param record StoredRecord The record to check +--- @param record StoredRecord | nil The record to check --- @param currentTimestamp number The current timestamp --- @param from string The address of the sender --- @param newProcessId string The new process id @@ -1055,11 +1055,11 @@ function arns.assertValidReassignName(record, currentTimestamp, from, newProcess assert(record.processId == from, "Not authorized to reassign this name") if record.endTimestamp then - local isWithinGracePeriod = record.endTimestamp < currentTimestamp - and record.endTimestamp + constants.gracePeriodMs > currentTimestamp - local isExpired = record.endTimestamp + constants.gracePeriodMs < currentTimestamp - assert(not isWithinGracePeriod, "Name must be extended before it can be reassigned") - assert(not isExpired, "Name is expired") + assert( + not arns.recordInGracePeriod(record, currentTimestamp), + "Name must be extended before it can be reassigned" + ) + assert(not arns.recordExpired(record, currentTimestamp), "Name is expired") end return true @@ -1075,7 +1075,6 @@ end function arns.reassignName(name, from, currentTimestamp, newProcessId, allowUnsafeProcessId) allowUnsafeProcessId = allowUnsafeProcessId or false local record = arns.getRecord(name) - assert(record, "Name is not registered") arns.assertValidReassignName(record, currentTimestamp, from, newProcessId, allowUnsafeProcessId) local updatedRecord = arns.modifyProcessId(name, newProcessId) return updatedRecord diff --git a/src/primary_names.lua b/src/primary_names.lua index 103ffdbb..f2487372 100644 --- a/src/primary_names.lua +++ b/src/primary_names.lua @@ -81,6 +81,7 @@ function primaryNames.createPrimaryNameRequest(name, initiator, timestamp, msgId local record = arns.getRecord(baseName) assert(record, "ArNS record '" .. baseName .. "' does not exist") + assert(arns.recordIsActive(record, timestamp), "ArNS record '" .. baseName .. "' is not active") local requestCost = arns.getTokenCost({ intent = "Primary-Name-Request", From 9b19c7de05eb51a43c02a379e270b3f32a687c8a Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Wed, 18 Dec 2024 12:36:37 -0600 Subject: [PATCH 55/76] fix(balance): fix tag check for balance --- src/main.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main.lua b/src/main.lua index f295fb08..b1d93f76 100644 --- a/src/main.lua +++ b/src/main.lua @@ -1840,17 +1840,17 @@ addEventingHandler(ActionMap.Balances, Handlers.utils.hasMatchingTag("Action", A end) addEventingHandler(ActionMap.Balance, Handlers.utils.hasMatchingTag("Action", ActionMap.Balance), function(msg) - local target = msg.Tags.Target or msg.Tags.Address or msg.From + local target = msg.Tags.Target or msg.Tags.Address or msg.Tags.Recipient or msg.From local balance = balances.getBalance(target) -- must adhere to token.lua spec for arconnect compatibility Send(msg, { Target = msg.From, Action = "Balance-Notice", + Account = msg.Tags.Address or msg.Tags.Recipient or msg.From, Data = balance, Balance = tostring(balance), Ticker = Ticker, - Address = target, }) end) From d1ee4c3d27567a2dd73404b73eeac1ae63fefc78 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Wed, 18 Dec 2024 13:14:23 -0600 Subject: [PATCH 56/76] fix(epochs): include prescribedObserversWithWeights to epoch handler This avoids a breaking change that impacts the network portal and ar-io-sdk types --- src/main.lua | 3 +++ tests/epochs.test.mjs | 15 ++++++++++++--- tests/tick.test.mjs | 24 ++++++++++++++++++++---- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/src/main.lua b/src/main.lua index b1d93f76..e3832ad9 100644 --- a/src/main.lua +++ b/src/main.lua @@ -1890,11 +1890,14 @@ addEventingHandler(ActionMap.Record, utils.hasMatchingTag("Action", ActionMap.Re Send(msg, recordNotice) end) +-- TODO: this handler will not scale well as gateways and delegates increase, we should slice out the larger pieces (e.g. distributions should be fetched via a paginated handler) addEventingHandler(ActionMap.Epoch, utils.hasMatchingTag("Action", ActionMap.Epoch), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) local epoch = epochs.getEpoch(epochIndex) + -- populate the prescribed observers with weights + epoch.prescribedObservers = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) Send(msg, { Target = msg.From, Action = "Epoch-Notice", Data = json.encode(epoch) }) end) diff --git a/tests/epochs.test.mjs b/tests/epochs.test.mjs index 355b8327..642bf8ab 100644 --- a/tests/epochs.test.mjs +++ b/tests/epochs.test.mjs @@ -55,9 +55,18 @@ describe('epochs', () => { startHeight: 1, distributionTimestamp: firstEpochStartTimestamp + epochLength + distributionDelay, - prescribedObservers: { - [STUB_ADDRESS]: STUB_OPERATOR_ADDRESS, - }, + prescribedObservers: [ + { + observerAddress: STUB_ADDRESS, + gatewayAddress: STUB_OPERATOR_ADDRESS, + stakeWeight: 1, + gatewayRewardRatioWeight: 1, + observerRewardRatioWeight: 1, + compositeWeight: 4, + normalizedCompositeWeight: 1, + tenureWeight: 4, + }, + ], prescribedNames: ['prescribed-name'], observations: { failureSummaries: [], diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 11b0bc21..c2f91b3d 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -401,9 +401,18 @@ describe('Tick', async () => { failureSummaries: [], reports: [], }, - prescribedObservers: { - [STUB_ADDRESS]: STUB_ADDRESS, - }, + prescribedObservers: [ + { + observerAddress: STUB_ADDRESS, + gatewayAddress: STUB_ADDRESS, + stakeWeight: 3, + gatewayRewardRatioWeight: 1, + observerRewardRatioWeight: 1, + compositeWeight: 12, + normalizedCompositeWeight: 1, + tenureWeight: 4, + }, + ], prescribedNames: [], // no names in the network distributions: { totalEligibleGateways: 1, @@ -446,7 +455,7 @@ describe('Tick', async () => { // assert no error tag assertNoResultError(distributionTick); - // check the rewards were distributed correctly + // check the rewards were distributed correctly and weights are updated const distributedEpochData = await getEpoch({ memory: distributionTick.memory, timestamp: distributionTimestamp, @@ -473,6 +482,13 @@ describe('Tick', async () => { [STUB_ADDRESS]: reportTxId, }, }, + prescribedObservers: [ + { + ...epochData.prescribedObservers[0], + compositeWeight: 22, + stakeWeight: 5.5, + }, + ], }); // assert the new epoch was created const newEpoch = await getEpoch({ From 20f161a946d25ac71bb85916a7a1f66c9f8357e8 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Wed, 18 Dec 2024 14:37:46 -0600 Subject: [PATCH 57/76] chore(test): fix pagination test --- src/main.lua | 5 ++++- tests/gar.test.mjs | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/main.lua b/src/main.lua index e3832ad9..3c653106 100644 --- a/src/main.lua +++ b/src/main.lua @@ -1896,8 +1896,11 @@ addEventingHandler(ActionMap.Epoch, utils.hasMatchingTag("Action", ActionMap.Epo local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) local epoch = epochs.getEpoch(epochIndex) + -- TODO: this check can be removed after 14 days of release once old epochs are pruned + if not epoch.prescribedObservers or not next(epoch.prescribedObservers).gatewayAddress then + epoch.prescribedObservers = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + end -- populate the prescribed observers with weights - epoch.prescribedObservers = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) Send(msg, { Target = msg.From, Action = "Epoch-Notice", Data = json.encode(epoch) }) end) diff --git a/tests/gar.test.mjs b/tests/gar.test.mjs index 40b4500f..380b687d 100644 --- a/tests/gar.test.mjs +++ b/tests/gar.test.mjs @@ -1229,6 +1229,7 @@ describe('GatewayRegistry', async () => { const { memory: addGatewayMemory2 } = await joinNetwork({ address: secondGatewayAddress, memory: sharedMemory, + timestamp: STUB_TIMESTAMP + 1, // join the network 1ms after the first gateway }); let cursor; let fetchedGateways = []; @@ -1239,9 +1240,12 @@ describe('GatewayRegistry', async () => { { name: 'Action', value: 'Paginated-Gateways' }, { name: 'Cursor', value: cursor }, { name: 'Limit', value: '1' }, + { name: 'Sort-By', value: 'startTimestamp' }, + { name: 'Sort-Order', value: 'asc' }, ], }, memory: addGatewayMemory2, + timestamp: STUB_TIMESTAMP + 1, }); // parse items, nextCursor const { items, nextCursor, hasMore, sortBy, sortOrder, totalItems } = @@ -1249,7 +1253,7 @@ describe('GatewayRegistry', async () => { assert.equal(totalItems, 2); assert.equal(items.length, 1); assert.equal(sortBy, 'startTimestamp'); - assert.equal(sortOrder, 'desc'); + assert.equal(sortOrder, 'asc'); // older gateways are first assert.equal(hasMore, !!nextCursor); cursor = nextCursor; fetchedGateways.push(...items); From 1605a26c9656860ebe0be701e30785e922337077 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Wed, 18 Dec 2024 14:52:05 -0600 Subject: [PATCH 58/76] fix(gar): change backwards compatibility check --- src/main.lua | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/main.lua b/src/main.lua index 3c653106..a2ed1f29 100644 --- a/src/main.lua +++ b/src/main.lua @@ -1897,7 +1897,11 @@ addEventingHandler(ActionMap.Epoch, utils.hasMatchingTag("Action", ActionMap.Epo or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) local epoch = epochs.getEpoch(epochIndex) -- TODO: this check can be removed after 14 days of release once old epochs are pruned - if not epoch.prescribedObservers or not next(epoch.prescribedObservers).gatewayAddress then + if + not epoch.prescribedObservers + or not epoch.prescribedObservers[1] + or not epoch.prescribedObservers[1].gatewayAddress + then epoch.prescribedObservers = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) end -- populate the prescribed observers with weights From 5b15b8544a7ad5d3619e4a9c9a9caaa8014ad703 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Wed, 18 Dec 2024 15:54:33 -0600 Subject: [PATCH 59/76] fix(epochs): add stake and startimestamp to prescribedObserver with weights --- spec/epochs_spec.lua | 2 ++ src/epochs.lua | 2 ++ tests/epochs.test.mjs | 11 ++++++++++- tests/tick.test.mjs | 3 +++ 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index 10ced980..eabe179d 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -94,6 +94,8 @@ describe("epochs", function() gatewayRewardRatioWeight = 1, observerRewardRatioWeight = 1, compositeWeight = 1, + stake = gar.getSettings().operators.minStake, + startTimestamp = startTimestamp, }, } local result = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) diff --git a/src/epochs.lua b/src/epochs.lua index d2c92a06..9ec8625d 100644 --- a/src/epochs.lua +++ b/src/epochs.lua @@ -131,6 +131,8 @@ function epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) gatewayRewardRatioWeight = gateway.weights.gatewayRewardRatioWeight, observerRewardRatioWeight = gateway.weights.observerRewardRatioWeight, compositeWeight = gateway.weights.compositeWeight, + stake = gateway.operatorStake, + startTimestamp = gateway.startTimestamp, }) end end diff --git a/tests/epochs.test.mjs b/tests/epochs.test.mjs index 642bf8ab..5a01ade0 100644 --- a/tests/epochs.test.mjs +++ b/tests/epochs.test.mjs @@ -11,7 +11,12 @@ import { } from './helpers.mjs'; import { describe, it, before } from 'node:test'; import assert from 'node:assert'; -import { STUB_ADDRESS, STUB_OPERATOR_ADDRESS } from '../tools/constants.mjs'; +import { + INITIAL_OPERATOR_STAKE, + STUB_ADDRESS, + STUB_OPERATOR_ADDRESS, + STUB_TIMESTAMP, +} from '../tools/constants.mjs'; const firstEpochStartTimestamp = 1719900000000; const epochLength = 1000 * 60 * 60 * 24; // 24 hours @@ -65,6 +70,8 @@ describe('epochs', () => { compositeWeight: 4, normalizedCompositeWeight: 1, tenureWeight: 4, + stake: INITIAL_OPERATOR_STAKE, + startTimestamp: STUB_TIMESTAMP, }, ], prescribedNames: ['prescribed-name'], @@ -107,6 +114,8 @@ describe('epochs', () => { observerAddress: STUB_ADDRESS, observerRewardRatioWeight: 1, stakeWeight: 1, + stake: INITIAL_OPERATOR_STAKE, + startTimestamp: STUB_TIMESTAMP, tenureWeight: 4, }, ]); diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index c2f91b3d..921bd5ea 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -411,6 +411,8 @@ describe('Tick', async () => { compositeWeight: 12, normalizedCompositeWeight: 1, tenureWeight: 4, + stake: INITIAL_OPERATOR_STAKE, + startTimestamp: STUB_TIMESTAMP, }, ], prescribedNames: [], // no names in the network @@ -485,6 +487,7 @@ describe('Tick', async () => { prescribedObservers: [ { ...epochData.prescribedObservers[0], + stake: INITIAL_OPERATOR_STAKE + expectedGatewayOperatorReward, compositeWeight: 22, stakeWeight: 5.5, }, From 7982993e76183387267100907c25c2150e8528d4 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 17 Dec 2024 21:41:30 -0800 Subject: [PATCH 60/76] test(arns): check for invariants after each arns test PE-7211 --- tests/arns.test.mjs | 46 ++++++++++++++++++++++++++++++++++++++------- tests/helpers.mjs | 6 ++++++ 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/tests/arns.test.mjs b/tests/arns.test.mjs index 9d60fcd6..60614e7a 100644 --- a/tests/arns.test.mjs +++ b/tests/arns.test.mjs @@ -40,7 +40,7 @@ describe('ArNS', async () => { afterEach(async () => { await assertNoInvariants({ - timestamp: STUB_TIMESTAMP, + timestamp: 1719988800001, // after latest known timestamp from a test memory: sharedMemory, }); }); @@ -160,11 +160,11 @@ describe('ArNS', async () => { describe('Buy-Record', () => { it('should buy a record with an Arweave address', async () => { - await runBuyRecord({ sender: STUB_ADDRESS }); + sharedMemory = (await runBuyRecord({ sender: STUB_ADDRESS })).memory; }); it('should buy a record with an Ethereum address', async () => { - await runBuyRecord({ sender: testEthAddress }); + sharedMemory = await runBuyRecord({ sender: testEthAddress }); }); it('should fail to buy a permanently registered record', async () => { @@ -223,6 +223,7 @@ describe('ArNS', async () => { 'Name is already registered', ); assert(alreadyRegistered); + sharedMemory = failedBuyRecordResult.Memory; }); it('should buy a record and default the name to lower case', async () => { @@ -261,6 +262,7 @@ describe('ArNS', async () => { type: 'lease', undernameLimit: 10, }); + sharedMemory = realRecord.Memory; }); }); @@ -326,9 +328,10 @@ describe('ArNS', async () => { }); const record = JSON.parse(result.Messages[0].Data); assert.equal(record.undernameLimit, 11); + return increaseUndernameResult.Memory; }; await assertIncreaseUndername(STUB_ADDRESS); - await assertIncreaseUndername(testEthAddress); + sharedMemory = await assertIncreaseUndername(testEthAddress); }); it('should increase the undernames by spending from stakes', async () => { @@ -413,9 +416,10 @@ describe('ArNS', async () => { }); const record = JSON.parse(result.Messages[0].Data); assert.equal(record.undernameLimit, 11); + return increaseUndernameResult.Memory; }; await assertIncreaseUndername(STUB_ADDRESS); - await assertIncreaseUndername(testEthAddress); + sharedMemory = await assertIncreaseUndername(testEthAddress); }); }); @@ -436,6 +440,7 @@ describe('ArNS', async () => { assert(priceList[key].permabuy); assert(Object.keys(priceList[key].lease).length == 5); }); + sharedMemory = priceListResult.Memory; }); }); @@ -478,6 +483,7 @@ describe('ArNS', async () => { stakes: [], }, }); + sharedMemory = result.Memory; }); it('should return the correct cost of increasing an undername limit', async () => { @@ -514,6 +520,7 @@ describe('ArNS', async () => { const tokenCost = JSON.parse(result.Messages[0].Data); const expectedPrice = 500000000 * 0.001 * 1 * 1; assert.equal(tokenCost, expectedPrice); + sharedMemory = result.Memory; }); it('should return the correct cost of extending an existing leased record', async () => { @@ -549,6 +556,7 @@ describe('ArNS', async () => { }); const tokenCost = JSON.parse(result.Messages[0].Data); assert.equal(tokenCost, 200000000); // known cost for extending a 9 character name by 2 years (500 ARIO * 0.2 * 2) + sharedMemory = result.Memory; }); it('should get the cost of upgrading an existing leased record to permanently owned', async () => { @@ -584,6 +592,7 @@ describe('ArNS', async () => { const tokenCost = JSON.parse(upgradeNameResult.Messages[0].Data); assert.equal(tokenCost, basePermabuyPrice); + sharedMemory = upgradeNameResult.Memory; }); it('should return the correct cost of creating a primary name request', async () => { @@ -628,6 +637,7 @@ describe('ArNS', async () => { }); const undernameTokenCost = JSON.parse(undernameResult.Messages[0].Data); assert.equal(undernameTokenCost, tokenCost); + sharedMemory = undernameResult.Memory; }); }); @@ -679,6 +689,7 @@ describe('ArNS', async () => { record.endTimestamp, recordBefore.endTimestamp + 60 * 1000 * 60 * 24 * 365, ); + sharedMemory = recordResult.Memory; }); it('should properly handle extending a leased record paying with balance and stakes', async () => { @@ -751,6 +762,7 @@ describe('ArNS', async () => { recordBefore.endTimestamp + 60 * 1000 * 60 * 24 * 365, record.endTimestamp, ); + sharedMemory = recordResult.Memory; }); }); @@ -813,6 +825,7 @@ describe('ArNS', async () => { undernameLimit: 10, purchasePrice: basePermabuyPrice, // expected price for a permanent 9 character name }); + sharedMemory = upgradeNameResult.Memory; }); it('should properly handle upgrading a name paying with balance and stakes', async () => { @@ -889,6 +902,7 @@ describe('ArNS', async () => { purchasePrice: 2500000000, // expected price for a permanent 9 character name }, ); + sharedMemory = upgradeNameResult.Memory; }); }); @@ -1071,6 +1085,7 @@ describe('ArNS', async () => { assert.equal(balances[initiator], expectedRewardForInitiator); assert.equal(balances[PROCESS_ID], expectedProtocolBalance); assert.equal(balances[newBuyerAddress], 0); + sharedMemory = balancesResult.Memory; }); const runReturnedNameTest = async ({ fundFrom }) => { @@ -1310,14 +1325,15 @@ describe('ArNS', async () => { const balances = JSON.parse(balancesResult.Messages[0].Data); assert.equal(balances[PROCESS_ID], expectedProtocolBalance); assert.equal(balances[bidderAddress], 0); + return balancesResult.Memory; }; it('should create a lease expiration initiated returned name and accept buy records for it', async () => { - await runReturnedNameTest({}); + sharedMemory = await runReturnedNameTest({}); }); it('should create a lease expiration initiated returned name and accept a buy record funded by stakes', async () => { - await runReturnedNameTest({ fundFrom: 'stakes' }); + sharedMemory = await runReturnedNameTest({ fundFrom: 'stakes' }); }); }); @@ -1437,6 +1453,7 @@ describe('ArNS', async () => { ); const expectedFloorPrice = baseLeasePriceFor9CharNameFor1Year; assert.equal(tokenCostForReturnedNameAfterThePeriod, expectedFloorPrice); + sharedMemory = tokenCostResultForReturnedNameAfterThePeriod.Memory; }); }); @@ -1469,6 +1486,7 @@ describe('ArNS', async () => { ); assert.equal(releaseNameErrorTag, undefined); assert.equal(reassignNameResult.Messages?.[0]?.Target, processId); + sharedMemory = reassignNameResult.Memory; }); it('should reassign an arns name to a new process id with initiator', async () => { @@ -1501,6 +1519,7 @@ describe('ArNS', async () => { assert.equal(releaseNameErrorTag, undefined); assert.equal(reassignNameResult.Messages?.[0]?.Target, processId); assert.equal(reassignNameResult.Messages?.[1]?.Target, STUB_MESSAGE_ID); // Check for the message sent to the initiator + sharedMemory = reassignNameResult.Memory; }); it('should not reassign an arns name with invalid ownership', async () => { @@ -1531,6 +1550,7 @@ describe('ArNS', async () => { (tag) => tag.name === 'Error', ); assert.ok(releaseNameErrorTag, 'Error tag should be present'); + sharedMemory = reassignNameResult.Memory; }); it('should not reassign an arns name with invalid new process id', async () => { @@ -1561,6 +1581,7 @@ describe('ArNS', async () => { (tag) => tag.name === 'Error', ); assert.ok(releaseNameErrorTag, 'Error tag should be present'); + sharedMemory = reassignNameResult.Memory; }); }); @@ -1636,6 +1657,7 @@ describe('ArNS', async () => { paginatedRecords.map((record) => record.name), expectedNames, ); + sharedMemory = buyRecordsMemory; }); }); @@ -1725,6 +1747,7 @@ describe('ArNS', async () => { name: 'ArNS Discount', }, ]); + sharedMemory = result.Memory; }); it('should return the correct cost for a buy record by a non-eligible gateway', async () => { @@ -1745,6 +1768,7 @@ describe('ArNS', async () => { const costDetails = JSON.parse(result.Messages[0].Data); assert.equal(costDetails.tokenCost, baseLeasePriceFor9CharNameFor1Year); assert.deepEqual(costDetails.discounts, []); + sharedMemory = result.Memory; }); describe('for an existing record', () => { @@ -1823,6 +1847,7 @@ describe('ArNS', async () => { name: 'ArNS Discount', }, ]); + sharedMemory = result.Memory; }); it('should not apply the discount to extending the lease for a non-eligible gateway', async () => { @@ -1838,6 +1863,7 @@ describe('ArNS', async () => { const { tokenCost, discounts } = JSON.parse(result.Messages[0].Data); assert.equal(tokenCost, baseLeaseOneYearExtensionPrice); assert.deepEqual(discounts, []); + sharedMemory = result.Memory; }); it('balances should be updated when the extend lease action is performed', async () => { @@ -1891,6 +1917,7 @@ describe('ArNS', async () => { nonEligibleGatewayBalanceBefore - baseLeaseOneYearExtensionPrice, nonEligibleBalanceAfter, ); + sharedMemory = nonEligibleGatewayResult.Memory; }); describe('upgrading the lease to a permabuy', () => { @@ -1923,6 +1950,7 @@ describe('ArNS', async () => { name: 'ArNS Discount', }, ]); + sharedMemory = result.Memory; }); it('should not apply the discount to increasing the undername limit for a non-eligible gateway', async () => { @@ -1940,6 +1968,7 @@ describe('ArNS', async () => { ); assert.equal(tokenCost, basePermabuyPrice); assert.deepEqual(discounts, []); + sharedMemory = result.Memory; }); }); @@ -1975,6 +2004,7 @@ describe('ArNS', async () => { name: 'ArNS Discount', }, ]); + sharedMemory = result.Memory; }); it('should not apply the discount to increasing the undername limit for a non-eligible gateway', async () => { @@ -1993,6 +2023,7 @@ describe('ArNS', async () => { ); assert.equal(tokenCost, undernameCostsForOneYear); assert.deepEqual(discounts, []); + sharedMemory = result.Memory; }); }); }); @@ -2014,6 +2045,7 @@ describe('ArNS', async () => { assert.equal(sortBy, 'name'); assert.equal(sortOrder, 'desc'); assert.equal(totalItems, 0); + sharedMemory = result.Memory; }); }); }); diff --git a/tests/helpers.mjs b/tests/helpers.mjs index 10ec9a68..e510122e 100644 --- a/tests/helpers.mjs +++ b/tests/helpers.mjs @@ -97,6 +97,7 @@ export function assertValidSupplyEventData(result) { } export const getBalances = async ({ memory, timestamp = STUB_TIMESTAMP }) => { + assert(memory, 'Memory is required'); const result = await handle({ options: { Tags: [{ name: 'Action', value: 'Balances' }], @@ -105,6 +106,11 @@ export const getBalances = async ({ memory, timestamp = STUB_TIMESTAMP }) => { memory, }); + const balancesData = result.Messages?.[0]?.Data; + if (!balancesData) { + const { Memory, ...rest } = result; + assert(false, `Something went wrong: ${JSON.stringify(rest, null, 2)}`); + } const balances = JSON.parse(result.Messages?.[0]?.Data); return balances; }; From fb158e04ef347b1576fd29b0da180605a70f2ce4 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 17 Dec 2024 21:50:52 -0800 Subject: [PATCH 61/76] test(tick): check invariants after tick tests PE-7211 --- tests/tick.test.mjs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 921bd5ea..bc3a94dc 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -4,8 +4,6 @@ import assert from 'node:assert'; import { DEFAULT_HANDLE_OPTIONS, STUB_ADDRESS, - validGatewayTags, - PROCESS_OWNER, PROCESS_ID, STUB_TIMESTAMP, INITIAL_OPERATOR_STAKE, @@ -47,7 +45,7 @@ describe('Tick', async () => { afterEach(async () => { await assertNoInvariants({ - timestamp: STUB_TIMESTAMP, + timestamp: genesisEpochStart + 1000 * 60 * 60 * 24 * 365, memory: sharedMemory, }); }); @@ -137,6 +135,7 @@ describe('Tick', async () => { initiator: PROCESS_ID, premiumMultiplier: 50, }); + sharedMemory = returnedNameData.Memory; }); it('should prune gateways that are expired', async () => { @@ -186,6 +185,7 @@ describe('Tick', async () => { }); assert.deepEqual(undefined, prunedGateway); + sharedMemory = futureTick.memory; }); // vaulting is not working as expected, need to fix before enabling this test @@ -306,6 +306,7 @@ describe('Tick', async () => { }); const balanceData = JSON.parse(ownerBalance.Messages[0].Data); assert.equal(balanceData, balanceBeforeData); + sharedMemory = ownerBalance.Memory; }); /** @@ -555,6 +556,7 @@ describe('Tick', async () => { address: delegateAddress, }, ]); + sharedMemory = distributionTick.memory; }); it('should not increase demandFactor and baseRegistrationFee when records are bought until the end of the epoch', async () => { @@ -643,6 +645,7 @@ describe('Tick', async () => { timestamp: firstEpochEndTimestamp + 1, }); assert.equal(firstEpochEndDemandFactorResult, 1.0500000000000000444); + sharedMemory = firstEpochEndTick.Memory; }); it('should reset to baseRegistrationFee when demandFactor is 0.5 for consecutive epochs', async () => { @@ -701,5 +704,6 @@ describe('Tick', async () => { assert.equal(demandFactorAfterFeeAdjustment, 1); assert.equal(baseFeeAfterConsecutiveTicksWithNoPurchases, 300_000_000); + sharedMemory = tickMemory; }); }); From ecb8338426e704df94544a6b9562c078c7c50869 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 17 Dec 2024 23:46:27 -0800 Subject: [PATCH 62/76] test(vaults): assert not invariants after vaults tests PE-7211 --- tests/vaults.test.mjs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/tests/vaults.test.mjs b/tests/vaults.test.mjs index 49403b30..1340a0ae 100644 --- a/tests/vaults.test.mjs +++ b/tests/vaults.test.mjs @@ -18,6 +18,7 @@ import { assertNoInvariants } from './invariants.mjs'; describe('Vaults', async () => { let sharedMemory = startMemory; + let endingMemory; beforeEach(async () => { const { Memory: totalTokenSupplyMemory } = await totalTokenSupply({ memory: startMemory, @@ -28,7 +29,7 @@ describe('Vaults', async () => { afterEach(async () => { await assertNoInvariants({ timestamp: STUB_TIMESTAMP, - memory: sharedMemory, + memory: endingMemory, }); }); @@ -111,6 +112,7 @@ describe('Vaults', async () => { createVaultResultData.endTimestamp, createVaultResult.startTimestamp + lockLengthMs, ); + endingMemory = createVaultResult.Memory; }); it('should throw an error if vault size is too small', async () => { @@ -154,6 +156,7 @@ describe('Vaults', async () => { balanceAfterVault.Messages[0].Data, ); assert.deepEqual(balanceAfterVaultData, balanceBeforeData); + endingMemory = balanceAfterVault.Memory; }); }); @@ -212,6 +215,7 @@ describe('Vaults', async () => { createVaultResultData.balance, quantity, ); + endingMemory = extendVaultResult.Memory; }); }); @@ -273,6 +277,7 @@ describe('Vaults', async () => { increaseVaultBalanceResultData.balance, createVaultResultData.balance + quantity, ); + endingMemory = increaseVaultBalanceResult.Memory; }); }); @@ -330,6 +335,7 @@ describe('Vaults', async () => { createdVaultData.endTimestamp, STUB_TIMESTAMP + lockLengthMs, ); + endingMemory = createVaultedTransferResult.Memory; }); it('should fail if the vault size is too small', async () => { @@ -353,6 +359,7 @@ describe('Vaults', async () => { 'Invalid quantity. Must be integer greater than or equal to 100000000 mARIO', ), ); + endingMemory = createVaultedTransferResult.Memory; }); it('should fail if the recipient address is invalid and Allow-Unsafe-Addresses is not provided', async () => { @@ -373,6 +380,7 @@ describe('Vaults', async () => { ); assert.ok(errorTag); assert(errorTag.value.includes('Invalid recipient')); + endingMemory = createVaultedTransferResult.Memory; }); it('should create a vault for the recipient with an invalid address and Allow-Unsafe-Addresses is provided', async () => { @@ -401,6 +409,7 @@ describe('Vaults', async () => { createdVaultData.endTimestamp, STUB_TIMESTAMP + lockLengthMs, ); + endingMemory = createVaultedTransferResult.Memory; }); }); @@ -444,7 +453,7 @@ describe('Vaults', async () => { let cursor = ''; let fetchedVaults = []; while (true) { - const { result: paginatedVaultsResult } = await getVaults({ + const { result: paginatedVaultsResult, memory } = await getVaults({ memory: paginatedVaultMemory, cursor, limit: 1, @@ -461,6 +470,7 @@ describe('Vaults', async () => { assert.equal(hasMore, !!nextCursor); cursor = nextCursor; fetchedVaults.push(...items); + endingMemory = memory; if (!cursor) break; } @@ -486,7 +496,7 @@ describe('Vaults', async () => { let cursor = ''; let fetchedVaults = []; while (true) { - const { result: paginatedVaultsResult } = await getVaults({ + const { result: paginatedVaultsResult, memory } = await getVaults({ memory: paginatedVaultMemory, cursor, limit: 1, @@ -505,6 +515,7 @@ describe('Vaults', async () => { assert.equal(hasMore, !!nextCursor); cursor = nextCursor; fetchedVaults.push(...items); + endingMemory = memory; if (!cursor) break; } From b7f6cfd32d7c967f8553c484c36e2fab46422a90 Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 17 Dec 2024 23:51:40 -0800 Subject: [PATCH 63/76] test(primary names): assert no invariants after primary names tests PE-7211 --- tests/primary.test.mjs | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/tests/primary.test.mjs b/tests/primary.test.mjs index 1b83e263..6c2b9111 100644 --- a/tests/primary.test.mjs +++ b/tests/primary.test.mjs @@ -14,6 +14,7 @@ import { assertNoInvariants } from './invariants.mjs'; describe('primary names', function () { let sharedMemory; + let endingMemory; beforeEach(async () => { const { Memory: totalTokenSupplyMemory } = await totalTokenSupply({ memory: startMemory, @@ -23,8 +24,8 @@ describe('primary names', function () { afterEach(async () => { await assertNoInvariants({ - timestamp: STUB_TIMESTAMP, - memory: sharedMemory, + timestamp: STUB_TIMESTAMP + 1000 * 60 * 60 * 24 * 365, + memory: endingMemory, }); }); @@ -348,17 +349,19 @@ describe('primary names', function () { }); // reverse lookup the owner of the primary name - const { result: ownerOfPrimaryNameResult } = await getOwnerOfPrimaryName({ - name: 'test-name', - memory: approvePrimaryNameRequestResult.Memory, - timestamp: approvedTimestamp, - }); + const { result: ownerOfPrimaryNameResult, memory } = + await getOwnerOfPrimaryName({ + name: 'test-name', + memory: approvePrimaryNameRequestResult.Memory, + timestamp: approvedTimestamp, + }); const ownerResult = JSON.parse(ownerOfPrimaryNameResult.Messages[0].Data); assert.deepStrictEqual(ownerResult, { ...expectedNewPrimaryName, processId, }); + endingMemory = memory; }); it('should immediately approve a primary name for an existing base name when the caller of the request is the base name owner', async function () { @@ -458,17 +461,19 @@ describe('primary names', function () { }); // reverse lookup the owner of the primary name - const { result: ownerOfPrimaryNameResult } = await getOwnerOfPrimaryName({ - name: 'test-name', - memory: requestPrimaryNameResult.Memory, - timestamp: approvalTimestamp, - }); + const { result: ownerOfPrimaryNameResult, memory } = + await getOwnerOfPrimaryName({ + name: 'test-name', + memory: requestPrimaryNameResult.Memory, + timestamp: approvalTimestamp, + }); const ownerResult = JSON.parse(ownerOfPrimaryNameResult.Messages[0].Data); assert.deepStrictEqual(ownerResult, { ...expectedNewPrimaryName, processId, }); + endingMemory = memory; }); it('should allow removing a primary named by the owner or the owner of the base record', async function () { @@ -550,7 +555,7 @@ describe('primary names', function () { 'Total-Primary-Names': 0, }); // assert the primary name is no longer set - const { result: primaryNameForAddressResult } = + const { result: primaryNameForAddressResult, memory } = await getPrimaryNameForAddress({ address: recipient, memory: removePrimaryNameResult.Memory, @@ -562,6 +567,7 @@ describe('primary names', function () { (tag) => tag.name === 'Error', ).value; assert.ok(errorTag, 'Expected an error tag'); + endingMemory = memory; }); describe('getPaginatedPrimaryNames', function () { @@ -588,6 +594,7 @@ describe('primary names', function () { sortBy: 'owner', sortOrder: 'asc', }); + endingMemory = getPaginatedPrimaryNamesResult.Memory; }); }); @@ -615,6 +622,7 @@ describe('primary names', function () { sortBy: 'startTimestamp', sortOrder: 'asc', }); + endingMemory = getPaginatedPrimaryNameRequestsResult.Memory; }); }); }); From a10044e05827645c4e792147d31832704227f05b Mon Sep 17 00:00:00 2001 From: Ariel Melendez Date: Tue, 17 Dec 2024 23:56:08 -0800 Subject: [PATCH 64/76] test(transfers): assert no invariants after transfer tests PE-7211 --- tests/invariants.mjs | 2 +- tests/transfer.test.mjs | 21 ++++++++++++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/tests/invariants.mjs b/tests/invariants.mjs index b962bac0..3ee01464 100644 --- a/tests/invariants.mjs +++ b/tests/invariants.mjs @@ -25,7 +25,7 @@ function assertValidTimestampsAtTimestamp({ ); assert( endTimestamp === null || endTimestamp > startTimestamp, - `Invariant violated: endTimestamp of ${endTimestamp} for vault ${address}`, + `Invariant violated: endTimestamp of ${endTimestamp} is not greater than startTimestamp ${startTimestamp}`, ); } diff --git a/tests/transfer.test.mjs b/tests/transfer.test.mjs index c558c8d6..19db3280 100644 --- a/tests/transfer.test.mjs +++ b/tests/transfer.test.mjs @@ -1,9 +1,19 @@ import { handle, startMemory } from './helpers.mjs'; -import { describe, it } from 'node:test'; +import { afterEach, describe, it } from 'node:test'; import assert from 'node:assert'; -import { STUB_ADDRESS, PROCESS_OWNER } from '../tools/constants.mjs'; +import { + STUB_ADDRESS, + PROCESS_OWNER, + STUB_TIMESTAMP, +} from '../tools/constants.mjs'; +import { assertNoInvariants } from './invariants.mjs'; describe('Transfers', async () => { + let endingMemory; + afterEach(() => { + assertNoInvariants({ memory: endingMemory, timestamp: STUB_TIMESTAMP }); + }); + it('should transfer tokens to another wallet', async () => { const checkTransfer = async (recipient, sender, quantity) => { let mem = startMemory; @@ -59,6 +69,7 @@ describe('Transfers', async () => { const balances = JSON.parse(result.Messages[0].Data); assert.equal(balances[recipient], quantity); assert.equal(balances[sender], senderBalanceData - quantity); + return result.Memory; }; const arweave1 = STUB_ADDRESS; @@ -69,7 +80,7 @@ describe('Transfers', async () => { await checkTransfer(arweave1, arweave2, 100000000); await checkTransfer(eth1, arweave2, 100000000); - await checkTransfer(eth2, eth1, 100000000); + endingMemory = await checkTransfer(eth2, eth1, 100000000); }); it('should not transfer tokens to another wallet if the sender does not have enough tokens', async () => { @@ -107,6 +118,7 @@ describe('Transfers', async () => { // the new balance won't be defined assert.equal(balances[recipient] || 0, 0); assert.equal(balances[sender], senderBalanceData); + endingMemory = result.Memory; }); for (const allowUnsafeAddresses of [false, undefined]) { @@ -151,6 +163,7 @@ describe('Transfers', async () => { const balances = JSON.parse(result.Messages[0].Data); assert.equal(balances[recipient] || 0, 0); assert.equal(balances[sender], senderBalanceData); + endingMemory = result.Memory; }); } @@ -196,6 +209,7 @@ describe('Transfers', async () => { const balances = JSON.parse(result.Messages[0].Data); assert.equal(balances[recipient] || 0, 100000000); assert.equal(balances[sender], senderBalanceData - 100000000); + endingMemory = result.Memory; }); it('should not transfer when an invalid quantity is provided', async () => { @@ -232,5 +246,6 @@ describe('Transfers', async () => { const balances = JSON.parse(result.Messages[0].Data); assert.equal(balances[recipient] || 0, 0); assert.equal(balances[sender], senderBalanceData); + endingMemory = result.Memory; }); }); From 22299ad97ac5f0cf7dcf4dcda77cf998c04d0b73 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 19 Dec 2024 10:43:28 -0600 Subject: [PATCH 65/76] test: expect operators to redelegate 1 mARIO from a delegate to their own stake PE-7342 --- spec/gar_spec.lua | 52 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/spec/gar_spec.lua b/spec/gar_spec.lua index 345e0d0c..3a10fa5b 100644 --- a/spec/gar_spec.lua +++ b/spec/gar_spec.lua @@ -3048,6 +3048,7 @@ describe("gar", function() local testRedelegatorAddress = "test-re-delegator-1234567890123456789012345" local testSourceAddress = "unique-source-address-123456789012345678901" local testTargetAddress = "unique-target-address-123456789012345678901" + describe("redelegateStake", function() local timestamp = 12345 local testRedelgationGateway = utils.deepCopy({ @@ -3167,6 +3168,56 @@ describe("gar", function() assert.are.same(targetGateway, _G.GatewayRegistry[testTargetAddress]) end) + it( + "should allow operators to redelegate to its own stake when that stake is below the minimum delegated stake value", + function() + local sourceGateway = utils.deepCopy(testRedelgationGateway) + local targetGateway = utils.deepCopy(testRedelgationGateway) + + sourceGateway.delegates = { + [testRedelegatorAddress] = { + delegatedStake = minDelegatedStake + 1, + startTimestamp = 0, + vaults = {}, + }, + } + _G.GatewayRegistry = { + [testRedelegatorAddress] = targetGateway, + [testSourceAddress] = sourceGateway, + } + + local result = gar.redelegateStake({ + delegateAddress = testRedelegatorAddress, + sourceAddress = testSourceAddress, + targetAddress = testRedelegatorAddress, + qty = 1, -- Move 1 mARIO to the operator gateway + currentTimestamp = timestamp, + }) + + assert.are.same({ + sourceAddress = testSourceAddress, + targetAddress = testRedelegatorAddress, + redelegationFee = 0, + feeResetTimestamp = timestamp + sevenDays, + redelegationsSinceFeeReset = 1, + }, result) + + assert.are.same({ + timestamp = timestamp, + redelegations = 1, + }, _G.Redelegations[testRedelegatorAddress]) + + -- setup expectations on gateway tables + sourceGateway.delegates[testRedelegatorAddress] = { + delegatedStake = minDelegatedStake, + startTimestamp = 0, + vaults = {}, + } + sourceGateway.totalDelegatedStake = minDelegatedStake - 1 + targetGateway.operatorStake = minOperatorStake + 1 + end + ) + it( "should redelegate stake for a fee if the delegator has already done redelegations in the last seven epochs", function() @@ -4404,6 +4455,7 @@ describe("gar", function() error:find("Allow listing only possible when allowDelegatedStaking is set to 'allowlist'") ~= nil ) end) + it( "should disallow delegates if allowDelegatedStaking is true and the allowedDelegatesLookup is not nil", function() From 435301010dbb51b58951c3481059033b51f63b7d Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 19 Dec 2024 10:44:02 -0600 Subject: [PATCH 66/76] feat: allow operators to redelegate to their own stake when below minimum delegation amt PE-7342 --- src/gar.lua | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/gar.lua b/src/gar.lua index 84e2729a..46a41bba 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -1823,7 +1823,6 @@ function gar.redelegateStake(params) "This Gateway does not allow this delegate to stake." ) - local previousRedelegations = gar.getRedelegation(delegateAddress) local redelegationFeeRate = gar.getRedelegationFee(delegateAddress).redelegationFeeRate local redelegationFee = math.ceil(stakeToTakeFromSource * (redelegationFeeRate / 100)) local stakeToDelegate = stakeToTakeFromSource - redelegationFee @@ -1889,27 +1888,27 @@ function gar.redelegateStake(params) end end - local existingTargetDelegate = targetGateway.delegates[delegateAddress] - local minimumStakeForGatewayAndDelegate - if existingTargetDelegate and existingTargetDelegate.delegatedStake ~= 0 then - -- It already has a stake that is not zero - minimumStakeForGatewayAndDelegate = 1 -- Delegate must provide at least one additional mARIO - else - -- Consider if the operator increases the minimum amount after you've already staked - minimumStakeForGatewayAndDelegate = targetGateway.settings.minDelegatedStake - end - - -- Check if the delegate has enough stake to redelegate - assert( - stakeToDelegate >= minimumStakeForGatewayAndDelegate, - "Quantity must be greater than the minimum delegated stake amount." - ) - -- The stake can now be applied to the targetGateway if targetAddress == delegateAddress then -- move the stake to the operator's stake targetGateway.operatorStake = targetGateway.operatorStake + stakeToDelegate else + local existingTargetDelegate = targetGateway.delegates[delegateAddress] + local minimumStakeForGatewayAndDelegate + if existingTargetDelegate and existingTargetDelegate.delegatedStake ~= 0 then + -- It already has a stake that is not zero + minimumStakeForGatewayAndDelegate = 1 -- Delegate must provide at least one additional mARIO + else + -- Consider if the operator increases the minimum amount after you've already staked + minimumStakeForGatewayAndDelegate = targetGateway.settings.minDelegatedStake + end + + -- Check if the delegate has enough stake to redelegate + assert( + stakeToDelegate >= minimumStakeForGatewayAndDelegate, + "Quantity must be greater than the minimum delegated stake amount." + ) + targetGateway.delegates[delegateAddress] = targetGateway.delegates[delegateAddress] or gar.createDelegateAtGateway(currentTimestamp, targetGateway, delegateAddress) increaseDelegateStakeAtGateway(targetGateway.delegates[delegateAddress], targetGateway, stakeToDelegate) @@ -1918,6 +1917,7 @@ function gar.redelegateStake(params) -- Move redelegation fee to protocol balance balances.increaseBalance(ao.id, redelegationFee) + local previousRedelegations = gar.getRedelegation(delegateAddress) local redelegationsSinceFeeReset = (previousRedelegations and previousRedelegations.redelegations or 0) + 1 -- update the source and target gateways, and the delegator's redelegation fee data From 9500bd880b3aa370754937353c45cd85edd890a9 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Thu, 19 Dec 2024 12:27:36 -0600 Subject: [PATCH 67/76] chore(test): update ar-io-sdk version This should give us better error messages on errors --- package.json | 2 +- yarn.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/package.json b/package.json index 1c9f9827..97cf89af 100644 --- a/package.json +++ b/package.json @@ -15,7 +15,7 @@ "prepare": "husky" }, "devDependencies": { - "@ar.io/sdk": "^3.1.0-alpha.4", + "@ar.io/sdk": "^3.1.0-alpha.9", "@permaweb/ao-loader": "^0.0.36", "@permaweb/aoconnect": "^0.0.59", "arweave": "^1.15.1", diff --git a/yarn.lock b/yarn.lock index a5165b90..6c52fdb4 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2,10 +2,10 @@ # yarn lockfile v1 -"@ar.io/sdk@^3.1.0-alpha.4": - version "3.1.0-alpha.4" - resolved "https://registry.yarnpkg.com/@ar.io/sdk/-/sdk-3.1.0-alpha.4.tgz#c1db0b4477890621fb7aadcd1d21e4462c4522b2" - integrity sha512-yYDFOosf3IUGbGCPJlniDPyjRTUOJoYweD4tRMa7BgQ1YuBVLlZmVzqmF+7eNHT3ileM2Zx9ewbsFO/cS3dAvQ== +"@ar.io/sdk@^3.1.0-alpha.9": + version "3.1.0-alpha.9" + resolved "https://registry.yarnpkg.com/@ar.io/sdk/-/sdk-3.1.0-alpha.9.tgz#d6c148ec494afaf9f27523ba5be82b4ee1fb29c4" + integrity sha512-VQhI9XVNqQAjylRFNy/20glEuLOfZvuEeKrVsMX6JRDrYV3BvWeJBVG1swManaikRdEzENmCH0MwBkr6fjSVEA== dependencies: "@dha-team/arbundles" "^1.0.1" "@permaweb/aoconnect" "^0.0.57" From 605a032e806f045bf9b364df74e019d7fcc9712e Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 19 Dec 2024 12:54:10 -0600 Subject: [PATCH 68/76] feat: audit msg.Timestamp, remove input of msg.Tags.Timestamp PE-7338 --- src/main.lua | 123 ++++++++++++++++++++++----------------------------- 1 file changed, 52 insertions(+), 71 deletions(-) diff --git a/src/main.lua b/src/main.lua index d6e1a5bb..757950f1 100644 --- a/src/main.lua +++ b/src/main.lua @@ -116,7 +116,6 @@ local ActionMap = { PrimaryName = "Primary-Name", } ---- @alias Message table -- an AO message TODO - update this type with the actual Message type --- @param msg Message --- @param response any local function Send(msg, response) @@ -399,7 +398,7 @@ local function assertAndSanitizeInputs(msg) msg.Tags = utils.validateAndSanitizeInputs(msg.Tags) msg.From = utils.formatAddress(msg.From) - msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) or tonumber(msg.Tags.Timestamp) or nil + msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) -- Timestamp should always be provided by the CU end local function updateLastKnownMessage(msg) @@ -409,6 +408,17 @@ local function updateLastKnownMessage(msg) end end +--- @class ParsedMessage +--- @field Id string +--- @field From string +--- @field Timestamp number +--- @field Tags table + +--- @param handlerName string +--- @param pattern fun():string +--- @param handleFn fun(msg: ParsedMessage) +--- @param critical boolean? +--- @param printEvent boolean? local function addEventingHandler(handlerName, pattern, handleFn, critical, printEvent) critical = critical or false printEvent = printEvent == nil and true or printEvent @@ -629,7 +639,6 @@ end) addEventingHandler(ActionMap.CreateVault, utils.hasMatchingTag("Action", ActionMap.CreateVault), function(msg) local quantity = msg.Tags.Quantity local lockLengthMs = msg.Tags["Lock-Length"] - local timestamp = msg.Timestamp local msgId = msg.Id assert( lockLengthMs and lockLengthMs > 0 and utils.isInteger(lockLengthMs), @@ -639,8 +648,7 @@ addEventingHandler(ActionMap.CreateVault, utils.hasMatchingTag("Action", ActionM quantity and utils.isInteger(quantity) and quantity >= constants.MIN_VAULT_SIZE, "Invalid quantity. Must be integer greater than or equal to " .. constants.MIN_VAULT_SIZE .. " mARIO" ) - assert(timestamp, "Timestamp is required for a tick interaction") - local vault = vaults.createVault(msg.From, quantity, lockLengthMs, timestamp, msgId) + local vault = vaults.createVault(msg.From, quantity, lockLengthMs, msg.Timestamp, msgId) if vault ~= nil then msg.ioEvent:addField("Vault-Id", msgId) @@ -667,7 +675,6 @@ addEventingHandler(ActionMap.VaultedTransfer, utils.hasMatchingTag("Action", Act local recipient = msg.Tags.Recipient local quantity = msg.Tags.Quantity local lockLengthMs = msg.Tags["Lock-Length"] - local timestamp = msg.Timestamp local msgId = msg.Id local allowUnsafeAddresses = msg.Tags["Allow-Unsafe-Addresses"] or false assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") @@ -679,11 +686,10 @@ addEventingHandler(ActionMap.VaultedTransfer, utils.hasMatchingTag("Action", Act quantity and utils.isInteger(quantity) and quantity >= constants.MIN_VAULT_SIZE, "Invalid quantity. Must be integer greater than or equal to " .. constants.MIN_VAULT_SIZE .. " mARIO" ) - assert(timestamp, "Timestamp is required for a tick interaction") assert(recipient ~= msg.From, "Cannot transfer to self") local vault = - vaults.vaultedTransfer(msg.From, recipient, quantity, lockLengthMs, timestamp, msgId, allowUnsafeAddresses) + vaults.vaultedTransfer(msg.From, recipient, quantity, lockLengthMs, msg.Timestamp, msgId, allowUnsafeAddresses) if vault ~= nil then msg.ioEvent:addField("Vault-Id", msgId) @@ -724,16 +730,14 @@ end) addEventingHandler(ActionMap.ExtendVault, utils.hasMatchingTag("Action", ActionMap.ExtendVault), function(msg) local vaultId = msg.Tags["Vault-Id"] - local timestamp = msg.Timestamp local extendLengthMs = msg.Tags["Extend-Length"] assert(utils.isValidAddress(vaultId, true), "Invalid vault id") assert( extendLengthMs and extendLengthMs > 0 and utils.isInteger(extendLengthMs), "Invalid extension length. Must be integer greater than 0" ) - assert(timestamp, "Timestamp is required for a tick interaction") - local vault = vaults.extendVault(msg.From, extendLengthMs, timestamp, vaultId) + local vault = vaults.extendVault(msg.From, extendLengthMs, msg.Timestamp, vaultId) if vault ~= nil then msg.ioEvent:addField("Vault-Id", vaultId) @@ -782,14 +786,12 @@ addEventingHandler(ActionMap.BuyRecord, utils.hasMatchingTag("Action", ActionMap local purchaseType = msg.Tags["Purchase-Type"] and string.lower(msg.Tags["Purchase-Type"]) or "lease" local years = msg.Tags.Years or nil local processId = msg.Tags["Process-Id"] - local timestamp = msg.Timestamp local fundFrom = msg.Tags["Fund-From"] local allowUnsafeProcessId = msg.Tags["Allow-Unsafe-Addresses"] assert( type(purchaseType) == "string" and purchaseType == "lease" or purchaseType == "permabuy", "Invalid purchase type" ) - assert(timestamp, "Timestamp is required for a tick interaction") arns.assertValidArNSName(name) assert(utils.isValidAddress(processId, true), "Process Id must be a valid address.") if years then @@ -804,7 +806,7 @@ addEventingHandler(ActionMap.BuyRecord, utils.hasMatchingTag("Action", ActionMap purchaseType, years, msg.From, - timestamp, + msg.Timestamp, processId, msg.Id, fundFrom, @@ -855,12 +857,10 @@ end) addEventingHandler("upgradeName", utils.hasMatchingTag("Action", ActionMap.UpgradeName), function(msg) local fundFrom = msg.Tags["Fund-From"] local name = string.lower(msg.Tags.Name) - local timestamp = msg.Timestamp assert(type(name) == "string", "Invalid name") - assert(timestamp, "Timestamp is required") assertValidFundFrom(fundFrom) - local result = arns.upgradeRecord(msg.From, name, timestamp, msg.Id, fundFrom) + local result = arns.upgradeRecord(msg.From, name, msg.Timestamp, msg.Id, fundFrom) local record = {} if result ~= nil then @@ -888,15 +888,13 @@ addEventingHandler(ActionMap.ExtendLease, utils.hasMatchingTag("Action", ActionM local fundFrom = msg.Tags["Fund-From"] local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil local years = msg.Tags.Years - local timestamp = msg.Timestamp assert(type(name) == "string", "Invalid name") assert( years and years > 0 and years < 5 and utils.isInteger(years), "Invalid years. Must be integer between 1 and 5" ) - assert(timestamp, "Timestamp is required") assertValidFundFrom(fundFrom) - local result = arns.extendLease(msg.From, name, years, timestamp, msg.Id, fundFrom) + local result = arns.extendLease(msg.From, name, years, msg.Timestamp, msg.Id, fundFrom) local recordResult = {} if result ~= nil then addRecordResultFields(msg.ioEvent, result) @@ -918,16 +916,14 @@ addEventingHandler( local fundFrom = msg.Tags["Fund-From"] local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil local quantity = msg.Tags.Quantity - local timestamp = msg.Timestamp assert(type(name) == "string", "Invalid name") assert( quantity and quantity > 0 and quantity < 9990 and utils.isInteger(quantity), "Invalid quantity. Must be an integer value greater than 0 and less than 9990" ) - assert(timestamp, "Timestamp is required") assertValidFundFrom(fundFrom) - local result = arns.increaseundernameLimit(msg.From, name, quantity, timestamp, msg.Id, fundFrom) + local result = arns.increaseundernameLimit(msg.From, name, quantity, msg.Timestamp, msg.Id, fundFrom) local recordResult = {} if result ~= nil then recordResult = result.record @@ -982,7 +978,6 @@ addEventingHandler(ActionMap.TokenCost, utils.hasMatchingTag("Action", ActionMap local years = msg.Tags.Years or nil local quantity = msg.Tags.Quantity or nil local purchaseType = msg.Tags["Purchase-Type"] or "lease" - local timestamp = msg.Timestamp or msg.Tags.Timestamp local intendedAction = { intent = intent, @@ -990,7 +985,7 @@ addEventingHandler(ActionMap.TokenCost, utils.hasMatchingTag("Action", ActionMap years = years, quantity = quantity, purchaseType = purchaseType, - currentTimestamp = timestamp, + currentTimestamp = msg.Timestamp, from = msg.From, } @@ -1010,7 +1005,6 @@ addEventingHandler(ActionMap.CostDetails, utils.hasMatchingTag("Action", ActionM local years = msg.Tags.Years or 1 local quantity = msg.Tags.Quantity local purchaseType = msg.Tags["Purchase-Type"] or "lease" - local timestamp = msg.Timestamp or msg.Tags.Timestamp assertTokenCostTags(msg) assertValidFundFrom(fundFrom) @@ -1020,7 +1014,7 @@ addEventingHandler(ActionMap.CostDetails, utils.hasMatchingTag("Action", ActionM years, quantity, purchaseType, - timestamp, + msg.Timestamp, msg.From, fundFrom ) @@ -1073,14 +1067,14 @@ addEventingHandler(ActionMap.JoinNetwork, utils.hasMatchingTag("Action", ActionM local fromAddress = msg.From local observerAddress = msg.Tags["Observer-Address"] or fromAddress local stake = msg.Tags["Operator-Stake"] - local timestamp = msg.Timestamp assert(not msg.Tags.Services or updatedServices, "Services must be a valid JSON string") msg.ioEvent:addField("Resolved-Observer-Address", observerAddress) msg.ioEvent:addField("Sender-Previous-Balance", Balances[fromAddress] or 0) - local gateway = gar.joinNetwork(fromAddress, stake, updatedSettings, updatedServices, observerAddress, timestamp) + local gateway = + gar.joinNetwork(fromAddress, stake, updatedSettings, updatedServices, observerAddress, msg.Timestamp) msg.ioEvent:addField("Sender-New-Balance", Balances[fromAddress] or 0) if gateway ~= nil then msg.ioEvent:addField("GW-Start-Timestamp", gateway.startTimestamp) @@ -1101,7 +1095,6 @@ addEventingHandler(ActionMap.JoinNetwork, utils.hasMatchingTag("Action", ActionM end) addEventingHandler(ActionMap.LeaveNetwork, utils.hasMatchingTag("Action", ActionMap.LeaveNetwork), function(msg) - local timestamp = msg.Timestamp local unsafeGatewayBeforeLeaving = gar.getGatewayUnsafe(msg.From) local gwPrevTotalDelegatedStake = 0 local gwPrevStake = 0 @@ -1111,9 +1104,8 @@ addEventingHandler(ActionMap.LeaveNetwork, utils.hasMatchingTag("Action", Action end assert(unsafeGatewayBeforeLeaving, "Gateway not found") - assert(timestamp, "Timestamp is required") - local gateway = gar.leaveNetwork(msg.From, timestamp, msg.Id) + local gateway = gar.leaveNetwork(msg.From, msg.Timestamp, msg.Id) if gateway ~= nil then msg.ioEvent:addField("GW-Vaults-Count", utils.lengthOfTable(gateway.vaults or {})) @@ -1196,8 +1188,6 @@ addEventingHandler( function(msg) local quantity = msg.Tags.Quantity local instantWithdraw = msg.Tags.Instant and msg.Tags.Instant == "true" or false - local timestamp = msg.Timestamp - assert(timestamp, "Timestamp is required") assert( quantity and utils.isInteger(quantity) and quantity > constants.minimumWithdrawalAmount, "Invalid quantity. Must be integer greater than " .. constants.minimumWithdrawalAmount @@ -1209,7 +1199,7 @@ addEventingHandler( msg.ioEvent:addField("Sender-Previous-Balance", Balances[msg.From]) - local result = gar.decreaseOperatorStake(msg.From, quantity, timestamp, msg.Id, instantWithdraw) + local result = gar.decreaseOperatorStake(msg.From, quantity, msg.Timestamp, msg.Id, instantWithdraw) local decreaseOperatorStakeResult = { gateway = result and result.gateway or {}, penaltyRate = result and result.penaltyRate or 0, @@ -1266,7 +1256,6 @@ addEventingHandler( addEventingHandler(ActionMap.DelegateStake, utils.hasMatchingTag("Action", ActionMap.DelegateStake), function(msg) local gatewayTarget = msg.Tags.Target or msg.Tags.Address local quantity = msg.Tags.Quantity - local timestamp = msg.Timestamp assert(utils.isValidAddress(gatewayTarget, true), "Invalid gateway address") assert( msg.Tags.Quantity and msg.Tags.Quantity > 0 and utils.isInteger(msg.Tags.Quantity), @@ -1275,7 +1264,7 @@ addEventingHandler(ActionMap.DelegateStake, utils.hasMatchingTag("Action", Actio msg.ioEvent:addField("Target-Formatted", gatewayTarget) - local gateway = gar.delegateStake(msg.From, gatewayTarget, quantity, timestamp) + local gateway = gar.delegateStake(msg.From, gatewayTarget, quantity, msg.Timestamp) local delegateResult = {} if gateway ~= nil then local newStake = gateway.delegates[msg.From].delegatedStake @@ -1343,13 +1332,11 @@ addEventingHandler( function(msg) local target = msg.Tags.Target or msg.Tags.Address or msg.From -- if not provided, use sender local vaultId = msg.Tags["Vault-Id"] - local timestamp = msg.Timestamp msg.ioEvent:addField("Target-Formatted", target) assert(utils.isValidAddress(target, true), "Invalid gateway address") assert(utils.isValidAddress(vaultId, true), "Invalid vault id") - assert(timestamp, "Timestamp is required") - local result = gar.instantGatewayWithdrawal(msg.From, target, vaultId, timestamp) + local result = gar.instantGatewayWithdrawal(msg.From, target, vaultId, msg.Timestamp) if result ~= nil then local vaultBalance = result.vaultBalance msg.ioEvent:addField("Stake-Amount-Withdrawn", vaultBalance) @@ -1385,7 +1372,6 @@ addEventingHandler( local target = msg.Tags.Target or msg.Tags.Address local quantity = msg.Tags.Quantity local instantWithdraw = msg.Tags.Instant and msg.Tags.Instant == "true" or false - local timestamp = msg.Timestamp msg.ioEvent:addField("Target-Formatted", target) msg.ioEvent:addField("Quantity", quantity) assert( @@ -1393,7 +1379,7 @@ addEventingHandler( "Invalid quantity. Must be integer greater than " .. constants.minimumWithdrawalAmount ) - local result = gar.decreaseDelegateStake(target, msg.From, quantity, timestamp, msg.Id, instantWithdraw) + local result = gar.decreaseDelegateStake(target, msg.From, quantity, msg.Timestamp, msg.Id, instantWithdraw) local decreaseDelegateStakeResult = { gateway = result and result.gateway or {}, penaltyRate = result and result.penaltyRate or 0, @@ -1502,9 +1488,14 @@ addEventingHandler( -- TODO: we could standardize this on our prepended handler to inject and ensure formatted addresses and converted values local observerAddress = msg.Tags["Observer-Address"] or unsafeGateway.observerAddress - local timestamp = msg.Timestamp - local result = - gar.updateGatewaySettings(msg.From, updatedSettings, updatedServices, observerAddress, timestamp, msg.Id) + local result = gar.updateGatewaySettings( + msg.From, + updatedSettings, + updatedServices, + observerAddress, + msg.Timestamp, + msg.Id + ) Send(msg, { Target = msg.From, Tags = { Action = ActionMap.UpdateGatewaySettings .. "-Notice" }, @@ -1517,16 +1508,14 @@ addEventingHandler(ActionMap.ReassignName, utils.hasMatchingTag("Action", Action local newProcessId = msg.Tags["Process-Id"] local name = string.lower(msg.Tags.Name) local initiator = msg.Tags.Initiator - local timestamp = msg.Timestamp local allowUnsafeProcessId = msg.Tags["Allow-Unsafe-Addresses"] assert(name and #name > 0, "Name is required") assert(utils.isValidAddress(newProcessId, true), "Process Id must be a valid address.") - assert(timestamp, "Timestamp is required") if initiator ~= nil then assert(utils.isValidAddress(initiator, true), "Invalid initiator address.") end - local reassignment = arns.reassignName(name, msg.From, timestamp, newProcessId, allowUnsafeProcessId) + local reassignment = arns.reassignName(name, msg.From, msg.Timestamp, newProcessId, allowUnsafeProcessId) Send(msg, { Target = msg.From, @@ -1549,13 +1538,12 @@ end) addEventingHandler(ActionMap.SaveObservations, utils.hasMatchingTag("Action", ActionMap.SaveObservations), function(msg) local reportTxId = msg.Tags["Report-Tx-Id"] local failedGateways = utils.splitAndTrimString(msg.Tags["Failed-Gateways"], ",") - local timestamp = msg.Timestamp assert(utils.isValidArweaveAddress(reportTxId), "Invalid report tx id. Must be a valid Arweave address.") for _, gateway in ipairs(failedGateways) do assert(utils.isValidAddress(gateway, true), "Invalid failed gateway address: " .. gateway) end - local observations = epochs.saveObservations(msg.From, reportTxId, failedGateways, timestamp) + local observations = epochs.saveObservations(msg.From, reportTxId, failedGateways, msg.Timestamp) if observations ~= nil then local failureSummariesCount = utils.lengthOfTable(observations.failureSummaries or {}) if failureSummariesCount > 0 then @@ -1658,12 +1646,11 @@ end) -- distribute rewards -- NOTE: THIS IS A CRITICAL HANDLER AND WILL DISCARD THE MEMORY ON ERROR addEventingHandler("distribute", utils.hasMatchingTag("Action", "Tick"), function(msg) - local msgTimestamp = msg.Timestamp local msgId = msg.Id local blockHeight = tonumber(msg["Block-Height"]) local hashchain = msg["Hash-Chain"] local lastTickedEpochIndex = LastTickedEpochIndex - local targetCurrentEpochIndex = epochs.getEpochIndexForTimestamp(msgTimestamp) + local targetCurrentEpochIndex = epochs.getEpochIndexForTimestamp(msg.Timestamp) assert(blockHeight, "Block height is required") assert(hashchain, "Hash chain is required") @@ -1697,7 +1684,7 @@ addEventingHandler("distribute", utils.hasMatchingTag("Action", "Tick"), functio local _, _, epochDistributionTimestamp = epochs.getEpochTimestampsForIndex(i) -- use the minimum of the msg timestamp or the epoch distribution timestamp, this ensures an epoch gets created for the genesis block -- and that we don't try and distribute before an epoch is created - local tickTimestamp = math.min(msgTimestamp or 0, epochDistributionTimestamp) + local tickTimestamp = math.min(msg.Timestamp, epochDistributionTimestamp) -- TODO: if we need to "recover" epochs, we can't rely on just the current message hashchain and block height, -- we should set the prescribed observers and names to empty arrays and distribute rewards accordingly local tickResult = tick.tickEpoch(tickTimestamp, blockHeight, hashchain, msgId) @@ -1894,7 +1881,7 @@ end) addEventingHandler(ActionMap.Epoch, utils.hasMatchingTag("Action", ActionMap.Epoch), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) - or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local epoch = epochs.getEpoch(epochIndex) -- TODO: this check can be removed after 14 days of release once old epochs are pruned if @@ -1934,7 +1921,7 @@ addEventingHandler( utils.hasMatchingTag("Action", ActionMap.PrescribedObservers), function(msg) local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) - or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local prescribedObserversWithWeights = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) Send(msg, { Target = msg.From, @@ -1946,7 +1933,7 @@ addEventingHandler( addEventingHandler(ActionMap.Observations, utils.hasMatchingTag("Action", ActionMap.Observations), function(msg) local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) - or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local observations = epochs.getObservationsForEpoch(epochIndex) Send(msg, { Target = msg.From, @@ -1959,7 +1946,7 @@ end) addEventingHandler(ActionMap.PrescribedNames, utils.hasMatchingTag("Action", ActionMap.PrescribedNames), function(msg) -- check if the epoch number is provided, if not get the epoch number from the timestamp local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) - or epochs.getEpochIndexForTimestamp(msg.Timestamp or msg.Tags.Timestamp) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) local prescribedNames = epochs.getPrescribedNamesForEpoch(epochIndex) Send(msg, { Target = msg.From, @@ -2079,7 +2066,6 @@ addEventingHandler("releaseName", utils.hasMatchingTag("Action", ActionMap.Relea local name = msg.Tags.Name and string.lower(msg.Tags.Name) local processId = msg.From local initiator = msg.Tags.Initiator or msg.From - local timestamp = msg.Timestamp assert(name and #name > 0, "Name is required") -- this could be an undername, so we don't want to assertValidArNSName assert(processId and utils.isValidAddress(processId, true), "Process-Id must be a valid address") @@ -2093,11 +2079,11 @@ addEventingHandler("releaseName", utils.hasMatchingTag("Action", ActionMap.Relea #primaryNames.getPrimaryNamesForBaseName(name) == 0, "Primary names are associated with this name. They must be removed before releasing the name." ) - assert(timestamp, "Timestamp is required") + -- we should be able to create the returned name here local removedRecord = arns.removeRecord(name) local removedPrimaryNamesAndOwners = primaryNames.removePrimaryNamesForBaseName(name) -- NOTE: this should be empty if there are no primary names allowed before release - local returnedName = arns.createReturnedName(name, timestamp, initiator) + local returnedName = arns.createReturnedName(name, msg.Timestamp, initiator) local returnedNameData = { removedRecord = removedRecord, removedPrimaryNamesAndOwners = removedPrimaryNamesAndOwners, @@ -2214,12 +2200,11 @@ addEventingHandler("allowDelegates", utils.hasMatchingTag("Action", ActionMap.Al end) addEventingHandler("disallowDelegates", utils.hasMatchingTag("Action", ActionMap.DisallowDelegates), function(msg) - local timestamp = msg.Timestamp local disallowedDelegates = msg.Tags["Disallowed-Delegates"] and utils.splitAndTrimString(msg.Tags["Disallowed-Delegates"], ",") assert(disallowedDelegates and #disallowedDelegates > 0, "Disallowed-Delegates is required") msg.ioEvent:addField("Input-Disallowed-Delegates-Count", utils.lengthOfTable(disallowedDelegates)) - local result = gar.disallowDelegates(disallowedDelegates, msg.From, msg.Id, timestamp) + local result = gar.disallowDelegates(disallowedDelegates, msg.From, msg.Id, msg.Timestamp) if result ~= nil then msg.ioEvent:addField("New-Disallowed-Delegates", result.removedDelegates or {}) msg.ioEvent:addField("New-Disallowed-Delegates-Count", utils.lengthOfTable(result.removedDelegates)) @@ -2257,12 +2242,10 @@ addEventingHandler(ActionMap.RedelegateStake, utils.hasMatchingTag("Action", Act local delegateAddress = msg.From local quantity = msg.Tags.Quantity or nil local vaultId = msg.Tags["Vault-Id"] - local timestamp = msg.Timestamp assert(utils.isValidAddress(sourceAddress, true), "Invalid source gateway address") assert(utils.isValidAddress(targetAddress, true), "Invalid target gateway address") assert(utils.isValidAddress(delegateAddress, true), "Invalid delegator address") - assert(timestamp, "Timestamp is required") if vaultId then assert(utils.isValidAddress(vaultId, true), "Invalid vault id") end @@ -2273,7 +2256,7 @@ addEventingHandler(ActionMap.RedelegateStake, utils.hasMatchingTag("Action", Act targetAddress = targetAddress, delegateAddress = delegateAddress, qty = quantity, - currentTimestamp = timestamp, + currentTimestamp = msg.Timestamp, vaultId = vaultId, }) @@ -2370,13 +2353,11 @@ addEventingHandler("requestPrimaryName", utils.hasMatchingTag("Action", ActionMa local fundFrom = msg.Tags["Fund-From"] local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil local initiator = msg.From - local timestamp = msg.Timestamp assert(name, "Name is required") assert(initiator, "Initiator is required") - assert(timestamp, "Timestamp is required") assertValidFundFrom(fundFrom) - local primaryNameResult = primaryNames.createPrimaryNameRequest(name, initiator, timestamp, msg.Id, fundFrom) + local primaryNameResult = primaryNames.createPrimaryNameRequest(name, initiator, msg.Timestamp, msg.Id, fundFrom) addPrimaryNameRequestData(msg.ioEvent, primaryNameResult) @@ -2411,13 +2392,13 @@ addEventingHandler( function(msg) local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil local recipient = msg.Tags.Recipient or msg.From - local timestamp = msg.Timestamp + assert(name, "Name is required") assert(recipient, "Recipient is required") assert(msg.From, "From is required") - assert(timestamp, "Timestamp is required") - local approvedPrimaryNameResult = primaryNames.approvePrimaryNameRequest(recipient, name, msg.From, timestamp) + local approvedPrimaryNameResult = + primaryNames.approvePrimaryNameRequest(recipient, name, msg.From, msg.Timestamp) addPrimaryNameRequestData(msg.ioEvent, approvedPrimaryNameResult) --- send a notice to the from From c2c95d6fbfe75ca3e5e9c98a0754fabefcd2c6a3 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 19 Dec 2024 13:13:26 -0600 Subject: [PATCH 69/76] refactor: improve type annotations PE-7338 --- src/main.lua | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/main.lua b/src/main.lua index 757950f1..59090df8 100644 --- a/src/main.lua +++ b/src/main.lua @@ -116,7 +116,7 @@ local ActionMap = { PrimaryName = "Primary-Name", } ---- @param msg Message +--- @param msg ParsedMessage --- @param response any local function Send(msg, response) if msg.reply then @@ -158,8 +158,8 @@ local function adjustSuppliesForFundingPlan(fundingPlan, rewardForInitiator) LastKnownCirculatingSupply = LastKnownCirculatingSupply - fundingPlan.balance + rewardForInitiator end ---- @param ioEvent table ---- @param result BuyRecordResult|RecordInteractionResult +--- @param ioEvent IOEvent +--- @param result BuyRecordResult|RecordInteractionResult|CreatePrimaryNameResult|PrimaryNameRequestApproval local function addResultFundingPlanFields(ioEvent, result) ioEvent:addFieldsWithPrefixIfExist(result.fundingPlan, "FP-", { "balance" }) local fundingPlanVaultsCount = 0 @@ -200,7 +200,7 @@ local function addResultFundingPlanFields(ioEvent, result) adjustSuppliesForFundingPlan(result.fundingPlan, result.returnedName and result.returnedName.rewardForInitiator) end ---- @param ioEvent table +--- @param ioEvent IOEvent ---@param result RecordInteractionResult|BuyRecordResult local function addRecordResultFields(ioEvent, result) ioEvent:addFieldsIfExist(result, { @@ -323,7 +323,7 @@ local function addPruneGatewaysResult(ioEvent, pruneGatewaysResult) end end ---- @param ioEvent table +--- @param ioEvent IOEvent local function addNextPruneTimestampsData(ioEvent) ioEvent:addField("Next-Returned-Names-Prune-Timestamp", arns.nextReturnedNamesPruneTimestamp()) ioEvent:addField("Next-Epochs-Prune-Timestamp", epochs.nextEpochsPruneTimestamp()) @@ -334,7 +334,7 @@ local function addNextPruneTimestampsData(ioEvent) ioEvent:addField("Next-Primary-Names-Prune-Timestamp", primaryNames.nextPrimaryNamesPruneTimestamp()) end ---- @param ioEvent table +--- @param ioEvent IOEvent --- @param prunedStateResult PruneStateResult local function addNextPruneTimestampsResults(ioEvent, prunedStateResult) --- @type PrunedGatewaysResult @@ -368,13 +368,13 @@ local function assertValidFundFrom(fundFrom) assert(validFundFrom[fundFrom], "Invalid fund from type. Must be one of: any, balance, stakes") end ---- @param ioEvent table +--- @param ioEvent IOEvent local function addPrimaryNameCounts(ioEvent) ioEvent:addField("Total-Primary-Names", utils.lengthOfTable(primaryNames.getUnsafePrimaryNames())) ioEvent:addField("Total-Primary-Name-Requests", utils.lengthOfTable(primaryNames.getUnsafePrimaryNameRequests())) end ---- @param ioEvent table +--- @param ioEvent IOEvent --- @param primaryNameResult CreatePrimaryNameResult|PrimaryNameRequestApproval local function addPrimaryNameRequestData(ioEvent, primaryNameResult) ioEvent:addFieldsIfExist(primaryNameResult, { "baseNameOwner" }) @@ -408,14 +408,20 @@ local function updateLastKnownMessage(msg) end end +--- @alias IOEvent table -- TODO: Type this + --- @class ParsedMessage --- @field Id string +--- @field Action string --- @field From string ---- @field Timestamp number +--- @field Timestamp Timestamp --- @field Tags table +--- @field ioEvent IOEvent +--- @field Cast boolean? +--- @field reply? fun(response: any) --- @param handlerName string ---- @param pattern fun():string +--- @param pattern fun(msg: ParsedMessage):'continue'|boolean --- @param handleFn fun(msg: ParsedMessage) --- @param critical boolean? --- @param printEvent boolean? @@ -563,7 +569,7 @@ end, function(msg) addSupplyData(msg.ioEvent) end - return prunedStateResult + -- return prunedStateResult -- TODO: need to return? end, CRITICAL, false) -- Write handlers From 5085734beb0b224b2de5648bba3b53004c808cb9 Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Thu, 19 Dec 2024 13:21:23 -0600 Subject: [PATCH 70/76] refactor: remove redundant return PE-7338 --- src/main.lua | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/main.lua b/src/main.lua index 59090df8..2e6932b3 100644 --- a/src/main.lua +++ b/src/main.lua @@ -568,8 +568,6 @@ end, function(msg) then addSupplyData(msg.ioEvent) end - - -- return prunedStateResult -- TODO: need to return? end, CRITICAL, false) -- Write handlers From 8d40736f1e02b1eaf3f8105cf2fb4fea297200dd Mon Sep 17 00:00:00 2001 From: Derek Sonnenberg Date: Fri, 20 Dec 2024 12:03:46 -0600 Subject: [PATCH 71/76] refactor: standardize ARIOtoMARIO PE-7198 --- spec/epochs_spec.lua | 2 +- src/constants.lua | 117 +++++++++++++++++---------------- src/gar.lua | 4 +- src/main.lua | 4 +- tests/handlers.test.mjs | 8 +-- tests/helpers.mjs | 3 + tests/invariants.mjs | 4 +- tests/monitor/monitor.test.mjs | 13 ++-- 8 files changed, 80 insertions(+), 75 deletions(-) diff --git a/spec/epochs_spec.lua b/spec/epochs_spec.lua index eabe179d..17f1cbec 100644 --- a/spec/epochs_spec.lua +++ b/spec/epochs_spec.lua @@ -13,7 +13,7 @@ local testSettings = { delegateRewardShareRatio = 0, } local startTimestamp = 1704092400000 -local protocolBalance = 500000000 * 1000000 +local protocolBalance = constants.ARIOToMARIO(500000000) local hashchain = "NGU1fq_ssL9m6kRbRU1bqiIDBht79ckvAwRMGElkSOg" -- base64 of "some sample hash" describe("epochs", function() diff --git a/src/constants.lua b/src/constants.lua index 1b677207..ea858a41 100644 --- a/src/constants.lua +++ b/src/constants.lua @@ -9,6 +9,14 @@ constants.oneWeekMs = constants.oneDayMs * 7 constants.twoWeeksMs = constants.oneWeekMs * 2 constants.oneYearMs = 31536000 * 1000 +constants.mARIOPerARIO = 1000000 + +--- @param ARIO number +--- @return mARIO +function constants.ARIOToMARIO(ARIO) + return ARIO * constants.mARIOPerARIO +end + -- EPOCHS constants.defaultEpochDurationMs = constants.oneDayMs constants.maximumRewardRate = 0.001 @@ -19,10 +27,9 @@ constants.rewardDecayLastEpoch = 547 -- GAR constants.DEFAULT_UNDERNAME_COUNT = 10 constants.DEADLINE_DURATION_MS = constants.oneHourMs -constants.totalTokenSupply = 1000000000 * 1000000 -- 1 billion tokens +constants.totalTokenSupply = constants.ARIOToMARIO(1000000000) -- 1 billion tokens constants.MIN_EXPEDITED_WITHDRAWAL_PENALTY_RATE = 0.10 -- the minimum penalty rate for an expedited withdrawal (10% of the amount being withdrawn) constants.MAX_EXPEDITED_WITHDRAWAL_PENALTY_RATE = 0.50 -- the maximum penalty rate for an expedited withdrawal (50% of the amount being withdrawn) -constants.mARIOPerARIO = 1000000 constants.minimumWithdrawalAmount = constants.mARIOPerARIO -- the minimum amount that can be withdrawn from the GAR constants.redelegationFeeResetIntervalMs = constants.defaultEpochDurationMs * 7 -- 7 epochs constants.maxDelegateRewardShareRatio = 95 -- 95% of rewards can be shared with delegates @@ -43,7 +50,7 @@ constants.ANNUAL_PERCENTAGE_FEE = 0.2 -- 20% constants.ARNS_NAME_DOES_NOT_EXIST_MESSAGE = "Name not found in the ArNS Registry!" constants.UNDERNAME_LEASE_FEE_PERCENTAGE = 0.001 constants.UNDERNAME_PERMABUY_FEE_PERCENTAGE = 0.005 -constants.PRIMARY_NAME_REQUEST_COST = 10000000 -- 10 ARIO +constants.PRIMARY_NAME_REQUEST_COST = constants.ARIOToMARIO(10) -- 10 ARIO constants.gracePeriodMs = constants.defaultEpochDurationMs * 14 -- 14 epochs constants.maxLeaseLengthYears = 5 constants.returnedNamePeriod = constants.defaultEpochDurationMs * 14 -- 14 epochs @@ -67,63 +74,63 @@ constants.demandSettings = { } -- VAULTS -constants.MIN_VAULT_SIZE = 100000000 -- 100 ARIO +constants.MIN_VAULT_SIZE = constants.ARIOToMARIO(100) -- 100 ARIO constants.MAX_TOKEN_LOCK_TIME_MS = 12 * 365 * 24 * 60 * 60 * 1000 -- The maximum amount of blocks tokens can be locked in a vault (12 years of blocks) constants.MIN_TOKEN_LOCK_TIME_MS = 14 * 24 * 60 * 60 * 1000 -- The minimum amount of blocks tokens can be locked in a vault (14 days of blocks) -- ARNS FEES constants.genesisFees = { - [1] = 2000000000000, - [2] = 200000000000, - [3] = 40000000000, - [4] = 10000000000, - [5] = 4000000000, - [6] = 2000000000, - [7] = 1000000000, - [8] = 600000000, - [9] = 500000000, - [10] = 500000000, - [11] = 500000000, - [12] = 500000000, - [13] = 400000000, - [14] = 400000000, - [15] = 400000000, - [16] = 400000000, - [17] = 400000000, - [18] = 400000000, - [19] = 400000000, - [20] = 400000000, - [21] = 400000000, - [22] = 400000000, - [23] = 400000000, - [24] = 400000000, - [25] = 400000000, - [26] = 400000000, - [27] = 400000000, - [28] = 400000000, - [29] = 400000000, - [30] = 400000000, - [31] = 400000000, - [32] = 400000000, - [33] = 400000000, - [34] = 400000000, - [35] = 400000000, - [36] = 400000000, - [37] = 400000000, - [38] = 400000000, - [39] = 400000000, - [40] = 400000000, - [41] = 400000000, - [42] = 400000000, - [43] = 400000000, - [44] = 400000000, - [45] = 400000000, - [46] = 400000000, - [47] = 400000000, - [48] = 400000000, - [49] = 400000000, - [50] = 400000000, - [51] = 400000000, + [1] = constants.ARIOToMARIO(2000000), + [2] = constants.ARIOToMARIO(200000), + [3] = constants.ARIOToMARIO(40000), + [4] = constants.ARIOToMARIO(10000), + [5] = constants.ARIOToMARIO(4000), + [6] = constants.ARIOToMARIO(2000), + [7] = constants.ARIOToMARIO(1000), + [8] = constants.ARIOToMARIO(600), + [9] = constants.ARIOToMARIO(500), + [10] = constants.ARIOToMARIO(500), + [11] = constants.ARIOToMARIO(500), + [12] = constants.ARIOToMARIO(500), + [13] = constants.ARIOToMARIO(400), + [14] = constants.ARIOToMARIO(400), + [15] = constants.ARIOToMARIO(400), + [16] = constants.ARIOToMARIO(400), + [17] = constants.ARIOToMARIO(400), + [18] = constants.ARIOToMARIO(400), + [19] = constants.ARIOToMARIO(400), + [20] = constants.ARIOToMARIO(400), + [21] = constants.ARIOToMARIO(400), + [22] = constants.ARIOToMARIO(400), + [23] = constants.ARIOToMARIO(400), + [24] = constants.ARIOToMARIO(400), + [25] = constants.ARIOToMARIO(400), + [26] = constants.ARIOToMARIO(400), + [27] = constants.ARIOToMARIO(400), + [28] = constants.ARIOToMARIO(400), + [29] = constants.ARIOToMARIO(400), + [30] = constants.ARIOToMARIO(400), + [31] = constants.ARIOToMARIO(400), + [32] = constants.ARIOToMARIO(400), + [33] = constants.ARIOToMARIO(400), + [34] = constants.ARIOToMARIO(400), + [35] = constants.ARIOToMARIO(400), + [36] = constants.ARIOToMARIO(400), + [37] = constants.ARIOToMARIO(400), + [38] = constants.ARIOToMARIO(400), + [39] = constants.ARIOToMARIO(400), + [40] = constants.ARIOToMARIO(400), + [41] = constants.ARIOToMARIO(400), + [42] = constants.ARIOToMARIO(400), + [43] = constants.ARIOToMARIO(400), + [44] = constants.ARIOToMARIO(400), + [45] = constants.ARIOToMARIO(400), + [46] = constants.ARIOToMARIO(400), + [47] = constants.ARIOToMARIO(400), + [48] = constants.ARIOToMARIO(400), + [49] = constants.ARIOToMARIO(400), + [50] = constants.ARIOToMARIO(400), + [51] = constants.ARIOToMARIO(400), } -- General diff --git a/src/gar.lua b/src/gar.lua index 46a41bba..68269090 100644 --- a/src/gar.lua +++ b/src/gar.lua @@ -104,14 +104,14 @@ GatewayRegistrySettings = { maxTenureWeight = 4, }, operators = { - minStake = 10000 * 1000000, -- 10,000 ARIO + minStake = constants.ARIOToMARIO(10000), -- 10,000 ARIO withdrawLengthMs = 90 * 24 * 60 * 60 * 1000, -- 90 days to lower operator stake leaveLengthMs = 90 * 24 * 60 * 60 * 1000, -- 90 days that balance will be vaulted failedEpochCountMax = 30, -- number of epochs failed before marked as leaving failedEpochSlashRate = 0.2, -- 20% of stake is returned to protocol balance }, delegates = { - minStake = 10 * 1000000, -- 10 ARIO + minStake = constants.ARIOToMARIO(10), -- 10 ARIO withdrawLengthMs = 90 * 24 * 60 * 60 * 1000, -- 90 days }, } diff --git a/src/main.lua b/src/main.lua index 2e6932b3..a5c38811 100644 --- a/src/main.lua +++ b/src/main.lua @@ -14,8 +14,8 @@ Protocol = Protocol or ao.env.Process.Id Balances = Balances or {} if not Balances[Protocol] then -- initialize the balance for the process id Balances = { - [Protocol] = math.floor(50000000 * 1000000), -- 50M ARIO - [Owner] = math.floor(constants.totalTokenSupply - (50000000 * 1000000)), -- 950M ARIO + [Protocol] = math.floor(constants.ARIOToMARIO(50000000)), -- 50M ARIO + [Owner] = math.floor(constants.totalTokenSupply - (constants.ARIOToMARIO(50000000))), -- 950M ARIO } end Vaults = Vaults or {} diff --git a/tests/handlers.test.mjs b/tests/handlers.test.mjs index 8fbc770b..d8b79957 100644 --- a/tests/handlers.test.mjs +++ b/tests/handlers.test.mjs @@ -1,4 +1,4 @@ -import { handle } from './helpers.mjs'; +import { ARIOToMARIO, handle } from './helpers.mjs'; import { describe, it } from 'node:test'; import assert from 'node:assert'; @@ -54,7 +54,7 @@ describe('handlers', async () => { const tokenSupplyData = JSON.parse( tokenSupplyResult.Messages?.[0]?.Data, ); - assert.ok(tokenSupplyData === 1000000000 * 1000000); + assert.ok(tokenSupplyData === ARIOToMARIO(1000000000)); }); }); @@ -83,11 +83,11 @@ describe('handlers', async () => { const supplyData = JSON.parse(supplyResult.Messages?.[0]?.Data); assert.ok( - supplyData.total === 1000000000 * 1000000, + supplyData.total === ARIOToMARIO(1000000000), 'total supply should be 1 billion ARIO but was ' + supplyData.total, ); assert.ok( - supplyData.circulating === 1000000000 * 1000000 - 50000000000000, + supplyData.circulating === ARIOToMARIO(1000000000) - 50000000000000, 'circulating supply should be 0.95 billion ARIO but was ' + supplyData.circulating, ); diff --git a/tests/helpers.mjs b/tests/helpers.mjs index e510122e..f5be73ae 100644 --- a/tests/helpers.mjs +++ b/tests/helpers.mjs @@ -24,6 +24,9 @@ export const genesisEpochTimestamp = 1719900000000; // Tuesday, July 2, 2024, 06 export const epochLength = 1000 * 60 * 60 * 24; // 24 hours export const distributionDelay = 1000 * 60 * 40; // 40 minutes +export const mARIOPerARIO = 1_000_000; +export const ARIOToMARIO = (amount) => amount * mARIOPerARIO; + const { handle: originalHandle, memory } = await createAosLoader(); export const startMemory = memory; diff --git a/tests/invariants.mjs b/tests/invariants.mjs index 3ee01464..85338804 100644 --- a/tests/invariants.mjs +++ b/tests/invariants.mjs @@ -1,5 +1,5 @@ import assert from 'node:assert'; -import { getBalances, getVaults, handle } from './helpers.mjs'; +import { ARIOToMARIO, getBalances, getVaults, handle } from './helpers.mjs'; function assertValidBalance(balance, expectedMin = 1) { assert( @@ -85,7 +85,7 @@ async function assertNoTotalSupplyInvariants({ timestamp, memory }) { const supplyData = JSON.parse(supplyResult.Messages?.[0]?.Data); assert.ok( - supplyData.total === 1000000000 * 1000000, + supplyData.total === ARIOToMARIO(1000000000), 'total supply should be 1,000,000,000,000,000 mARIO but was ' + supplyData.total, ); diff --git a/tests/monitor/monitor.test.mjs b/tests/monitor/monitor.test.mjs index 4305f9da..81456d8a 100644 --- a/tests/monitor/monitor.test.mjs +++ b/tests/monitor/monitor.test.mjs @@ -1,14 +1,9 @@ -import { - AOProcess, - ARIO, - ARIO_DEVNET_PROCESS_ID, - ARIO_TESTNET_PROCESS_ID, - Logger, -} from '@ar.io/sdk'; +import { AOProcess, ARIO, ARIO_DEVNET_PROCESS_ID, Logger } from '@ar.io/sdk'; import { connect } from '@permaweb/aoconnect'; import { strict as assert } from 'node:assert'; import { describe, it, before, after } from 'node:test'; import { DockerComposeEnvironment, Wait } from 'testcontainers'; +import { ARIOToMARIO } from '../helpers.mjs'; // set debug level logs for to get detailed messages Logger.default.setLogLevel('info'); @@ -144,7 +139,7 @@ describe('setup', () => { it('should always be 1 billion ARIO', async () => { const supplyData = await io.getTokenSupply(); assert( - supplyData.total === 1000000000 * 1000000, + supplyData.total === ARIOToMARIO(1000000000), `Total supply is not 1 billion ARIO: ${supplyData.total}`, ); assert( @@ -230,7 +225,7 @@ describe('setup', () => { supplyData.protocolBalance; assert( supplyData.total === computedTotal && - computedTotal === 1000000000 * 1000000, + computedTotal === ARIOToMARIO(1000000000), `Computed total supply (${computedTotal}) is not equal to the sum of protocol balance, circulating, locked, staked, and delegated and withdrawn provided by the contract (${supplyData.total}) and does not match the expected total of 1 billion ARIO`, ); From 2ec34f1ea56e5f6b3198c8b81127b31ddc1c4aa6 Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Fri, 20 Dec 2024 14:14:53 -0600 Subject: [PATCH 72/76] chore(test): fix monitor test and add to CI --- .github/workflows/build.yaml | 7 +++++++ tests/monitor/monitor.test.mjs | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3f9719ee..6e6eccb1 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -82,6 +82,13 @@ jobs: IO_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} AO_CU_URL: ${{ vars.AO_CU_URL }} + monitor: + runs-on: ubuntu-latest + steps: + - run: yarn test:monitor + env: + IO_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} + evolve: runs-on: ubuntu-latest needs: [integration, unit, sdk, lint] diff --git a/tests/monitor/monitor.test.mjs b/tests/monitor/monitor.test.mjs index 81456d8a..19061f75 100644 --- a/tests/monitor/monitor.test.mjs +++ b/tests/monitor/monitor.test.mjs @@ -3,11 +3,13 @@ import { connect } from '@permaweb/aoconnect'; import { strict as assert } from 'node:assert'; import { describe, it, before, after } from 'node:test'; import { DockerComposeEnvironment, Wait } from 'testcontainers'; -import { ARIOToMARIO } from '../helpers.mjs'; // set debug level logs for to get detailed messages Logger.default.setLogLevel('info'); +export const mARIOPerARIO = 1_000_000; +export const ARIOToMARIO = (amount) => amount * mARIOPerARIO; + const processId = process.env.IO_PROCESS_ID || ARIO_DEVNET_PROCESS_ID; const io = ARIO.init({ process: new AOProcess({ From 036d9e45568c5afaf0ce0fa655aea384eb2bbd9e Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Fri, 20 Dec 2024 14:16:21 -0600 Subject: [PATCH 73/76] chore(git): copy repo --- .github/workflows/build.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 6e6eccb1..caf6dfa7 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -85,6 +85,13 @@ jobs: monitor: runs-on: ubuntu-latest steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + cache: 'yarn' + + - run: yarn --frozen-lockfile - run: yarn test:monitor env: IO_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} From a60235956d7e78de918c9debe5e38a477bdbd54e Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Fri, 20 Dec 2024 14:17:54 -0600 Subject: [PATCH 74/76] chore(test): fix monitor command in git --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index caf6dfa7..6ac54051 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -92,7 +92,7 @@ jobs: cache: 'yarn' - run: yarn --frozen-lockfile - - run: yarn test:monitor + - run: yarn monitor env: IO_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} From 80f27d65589b1cc694ee83637e36bb893bc8905a Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Fri, 20 Dec 2024 14:22:50 -0600 Subject: [PATCH 75/76] chore(git): update env var to ARIO_NETWORK_PROCESS_ID --- .github/workflows/build.yaml | 14 +++++++------- .github/workflows/monitor.yaml | 2 +- package.json | 4 ++-- tests/monitor/monitor.test.mjs | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 6ac54051..d71ca876 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -79,7 +79,7 @@ jobs: - run: yarn test:esm # full e2e integration tests working-directory: ./ar-io-sdk env: - IO_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} + ARIO_NETWORK_PROCESS_ID: ${{ vars.ARIO_NETWORK_PROCESS_ID }} AO_CU_URL: ${{ vars.AO_CU_URL }} monitor: @@ -94,7 +94,7 @@ jobs: - run: yarn --frozen-lockfile - run: yarn monitor env: - IO_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} + ARIO_NETWORK_PROCESS_ID: ${{ vars.ARIO_NETWORK_PROCESS_ID }} evolve: runs-on: ubuntu-latest @@ -111,7 +111,7 @@ jobs: - run: yarn evolve env: WALLET: ${{ secrets.WALLET }} - IO_NETWORK_PROCESS_ID: ${{ vars.IO_NETWORK_PROCESS_ID }} + ARIO_NETWORK_PROCESS_ID: ${{ vars.ARIO_NETWORK_PROCESS_ID }} AO_CU_URL: ${{ vars.AO_CU_URL }} - name: Notify Success if: success() @@ -134,12 +134,12 @@ jobs: }, { "title": "Process ID", - "value": "${{ vars.IO_NETWORK_PROCESS_ID }}", + "value": "${{ vars.ARIO_NETWORK_PROCESS_ID }}", "short": true }, { "title": "View on ao.link", - "value": "https://www.ao.link/#/entity/${{ vars.IO_NETWORK_PROCESS_ID }}?tab=source-code", + "value": "https://www.ao.link/#/entity/${{ vars.ARIO_NETWORK_PROCESS_ID }}?tab=source-code", "short": false } , @@ -165,7 +165,7 @@ jobs: "fallback": "Failed to update IO Process!", "color": "danger", "title": "Details", - "text": 'The IO "${{ github.ref_name == 'main' && 'testnet' || 'devnet' }} Process ( ${{ vars.IO_NETWORK_PROCESS_ID }}) FAILED to update!', + "text": 'The IO "${{ github.ref_name == 'main' && 'testnet' || 'devnet' }} Process ( ${{ vars.ARIO_NETWORK_PROCESS_ID }}) FAILED to update!', "fields": [{ "title": "Network", "value": "${{ github.ref_name == 'main' && 'testnet' || 'devnet' }}", @@ -173,7 +173,7 @@ jobs: }, { "title": "Process ID", - "value": "${{ vars.IO_NETWORK_PROCESS_ID }}", + "value": "${{ vars.ARIO_NETWORK_PROCESS_ID }}", "short": true }, { diff --git a/.github/workflows/monitor.yaml b/.github/workflows/monitor.yaml index b6a21d92..f2976fc8 100644 --- a/.github/workflows/monitor.yaml +++ b/.github/workflows/monitor.yaml @@ -32,7 +32,7 @@ jobs: run: yarn monitor id: monitor env: - IO_PROCESS_ID: ${{ matrix.network == 'testnet' && 'agYcCFJtrMG6cqMuZfskIkFTGvUPddICmtQSBIoPdiA' || 'GaQrvEMKBpkjofgnBi_B3IgIDmY_XYelVLB6GcRGrHc' }} + ARIO_NETWORK_PROCESS_ID: ${{ matrix.network == 'testnet' && 'agYcCFJtrMG6cqMuZfskIkFTGvUPddICmtQSBIoPdiA' || 'GaQrvEMKBpkjofgnBi_B3IgIDmY_XYelVLB6GcRGrHc' }} - name: Notify Failure if: failure() diff --git a/package.json b/package.json index 97cf89af..d519420e 100644 --- a/package.json +++ b/package.json @@ -9,8 +9,8 @@ "test:unit": "rm -rf coverage && mkdir -p coverage && busted . && luacov", "test:coverage": "rm -rf luacov-html && yarn test:unit && luacov --reporter html && open luacov-html/index.html", "monitor": "node --test tests/monitor/monitor.test.mjs", - "monitor:devnet": "IO_PROCESS_ID=GaQrvEMKBpkjofgnBi_B3IgIDmY_XYelVLB6GcRGrHc node --test tests/monitor/monitor.test.mjs", - "monitor:testnet": "IO_PROCESS_ID=agYcCFJtrMG6cqMuZfskIkFTGvUPddICmtQSBIoPdiA node --test tests/monitor/monitor.test.mjs", + "monitor:devnet": "ARIO_NETWORK_PROCESS_ID=GaQrvEMKBpkjofgnBi_B3IgIDmY_XYelVLB6GcRGrHc node --test tests/monitor/monitor.test.mjs", + "monitor:testnet": "ARIO_NETWORK_PROCESS_ID=agYcCFJtrMG6cqMuZfskIkFTGvUPddICmtQSBIoPdiA node --test tests/monitor/monitor.test.mjs", "evolve": "yarn build && node tools/evolve.mjs", "prepare": "husky" }, diff --git a/tests/monitor/monitor.test.mjs b/tests/monitor/monitor.test.mjs index 19061f75..0d4caa7f 100644 --- a/tests/monitor/monitor.test.mjs +++ b/tests/monitor/monitor.test.mjs @@ -10,7 +10,7 @@ Logger.default.setLogLevel('info'); export const mARIOPerARIO = 1_000_000; export const ARIOToMARIO = (amount) => amount * mARIOPerARIO; -const processId = process.env.IO_PROCESS_ID || ARIO_DEVNET_PROCESS_ID; +const processId = process.env.ARIO_NETWORK_PROCESS_ID || ARIO_DEVNET_PROCESS_ID; const io = ARIO.init({ process: new AOProcess({ processId, From 7edaece742599cb7819d691fd83a60bad4ed8aad Mon Sep 17 00:00:00 2001 From: dtfiedler Date: Fri, 20 Dec 2024 14:30:10 -0600 Subject: [PATCH 76/76] chore(evolve): use AIO_NETWORK_PROCESS_ID in evolve script --- tools/evolve.mjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/evolve.mjs b/tools/evolve.mjs index f63970a2..a47d10f3 100644 --- a/tools/evolve.mjs +++ b/tools/evolve.mjs @@ -6,7 +6,7 @@ import { execSync } from 'child_process'; const wallet = JSON.parse(process.env.WALLET); const signer = createAoSigner(new ArweaveSigner(wallet)); const networkProcess = new AOProcess({ - processId: process.env.IO_NETWORK_PROCESS_ID, // TODO: Update to ARIO_NETWORK_PROCESS_ID + processId: process.env.ARIO_NETWORK_PROCESS_ID, ao: connect({ CU_URL: process.env.AO_CU_URL, }),