diff --git a/.solcover.js b/.solcover.js index c42fba8..bccb033 100644 --- a/.solcover.js +++ b/.solcover.js @@ -3,4 +3,5 @@ module.exports = { grep: '[Gg]as.*[Aa]nalysis', invert: true, }, + skipFiles: ['mocks/', 'interfaces/', 'libraries/SedaDataTypes.sol'], }; diff --git a/bun.lockb b/bun.lockb index b2e8442..31fec40 100755 Binary files a/bun.lockb and b/bun.lockb differ diff --git a/contracts/core/SedaCoreV1.sol b/contracts/core/SedaCoreV1.sol index 358cfc4..6e3aaf5 100644 --- a/contracts/core/SedaCoreV1.sol +++ b/contracts/core/SedaCoreV1.sol @@ -3,6 +3,7 @@ pragma solidity ^0.8.24; import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; +import {PausableUpgradeable} from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; import {UUPSUpgradeable} from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; import {IRequestHandler} from "../interfaces/IRequestHandler.sol"; @@ -15,10 +16,26 @@ import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; /// @title SedaCoreV1 /// @notice Core contract for the Seda protocol, managing requests and results /// @dev Implements ResultHandler and RequestHandler functionalities, and manages active requests -contract SedaCoreV1 is ISedaCore, RequestHandlerBase, ResultHandlerBase, UUPSUpgradeable, OwnableUpgradeable { - using EnumerableSet for EnumerableSet.Bytes32Set; +contract SedaCoreV1 is + ISedaCore, + RequestHandlerBase, + ResultHandlerBase, + UUPSUpgradeable, + OwnableUpgradeable, + PausableUpgradeable +{ + // ============ Types & Constants============ + + struct RequestDetails { + address requestor; + uint256 timestamp; + uint256 requestFee; + uint256 resultFee; + uint256 batchFee; + uint256 gasLimit; + } - // ============ Constants ============ + using EnumerableSet for EnumerableSet.Bytes32Set; // Constant storage slot for the state following the ERC-7201 standard bytes32 private constant CORE_V1_STORAGE_SLOT = @@ -33,14 +50,12 @@ contract SedaCoreV1 is ISedaCore, RequestHandlerBase, ResultHandlerBase, UUPSUpg /// @custom:storage-location erc7201:sedacore.storage.v1 struct SedaCoreStorage { - // Enumerable Set to store the request IDs that are pending - // `pendingRequests` keeps track of all active data requests that have been posted but not yet fulfilled. - // This set is used to manage the lifecycle of requests, allowing easy retrieval and status tracking. - // When a request is posted, it is added to `pendingRequests`. - // When a result is posted and the request is fulfilled, it is removed from `pendingRequests` + // Tracks active data requests to ensure proper lifecycle management and prevent + // duplicate fulfillments. Requests are removed only after successful fulfillment EnumerableSet.Bytes32Set pendingRequests; - // Mapping to store request timestamps for pending DRs - mapping(bytes32 => uint256) requestTimestamps; + // Associates request IDs with their metadata to enable fee distribution and + // timestamp validation during result submission + mapping(bytes32 => RequestDetails) requestDetails; } // ============ Constructor & Initializer ============ @@ -54,9 +69,13 @@ contract SedaCoreV1 is ISedaCore, RequestHandlerBase, ResultHandlerBase, UUPSUpg /// @param sedaProverAddress The address of the Seda prover contract /// @dev This function replaces the constructor for proxy compatibility and can only be called once function initialize(address sedaProverAddress) external initializer { - __ResultHandler_init(sedaProverAddress); - __Ownable_init(msg.sender); + // Initialize inherited contracts __UUPSUpgradeable_init(); + __Ownable_init(msg.sender); + __Pausable_init(); + + // Initialize derived contracts + __ResultHandler_init(sedaProverAddress); } // ============ Public Functions ============ @@ -65,11 +84,35 @@ contract SedaCoreV1 is ISedaCore, RequestHandlerBase, ResultHandlerBase, UUPSUpg /// @dev Overrides the base implementation to also add the request ID and timestamp to storage function postRequest( SedaDataTypes.RequestInputs calldata inputs - ) public override(RequestHandlerBase, IRequestHandler) returns (bytes32) { - bytes32 requestId = super.postRequest(inputs); + ) public payable override(RequestHandlerBase, IRequestHandler) whenNotPaused returns (bytes32) { + return postRequest(inputs, 0, 0, 0); + } + + function postRequest( + SedaDataTypes.RequestInputs calldata inputs, + uint256 requestFee, + uint256 resultFee, + uint256 batchFee + ) public payable whenNotPaused returns (bytes32) { + // Validate that the sent ETH matches exactly the sum of all specified fees + // This prevents users from accidentally overpaying or underpaying fees + if (msg.value != requestFee + resultFee + batchFee) { + revert InvalidFeeAmount(); + } + + // Call parent contract's postResult base implementation + bytes32 requestId = RequestHandlerBase.postRequest(inputs); + + // Store pending request and request details _addRequest(requestId); - // Store the request timestamp - _storageV1().requestTimestamps[requestId] = block.timestamp; + _storageV1().requestDetails[requestId] = RequestDetails({ + requestor: msg.sender, + timestamp: block.timestamp, + requestFee: requestFee, + resultFee: resultFee, + batchFee: batchFee, + gasLimit: inputs.execGasLimit + inputs.tallyGasLimit + }); return requestId; } @@ -80,41 +123,155 @@ contract SedaCoreV1 is ISedaCore, RequestHandlerBase, ResultHandlerBase, UUPSUpg SedaDataTypes.Result calldata result, uint64 batchHeight, bytes32[] calldata proof - ) public override(ResultHandlerBase, IResultHandler) returns (bytes32) { - uint256 requestTimestamp = _storageV1().requestTimestamps[result.drId]; - // Validate result timestamp comes after request timestamp - // Note: requestTimestamp = 0 for requests not tracked by this contract (always passes validation) - if (result.blockTimestamp <= requestTimestamp) { - revert InvalidResultTimestamp(result.drId, result.blockTimestamp, requestTimestamp); + ) public payable override(ResultHandlerBase, IResultHandler) whenNotPaused returns (bytes32) { + RequestDetails memory requestDetails = _storageV1().requestDetails[result.drId]; + + // Ensures results can't be submitted with timestamps from before the request was made, + // preventing potential replay or front-running attacks + // Note: Validation always passes for non-tracked requests (where requestDetails.timestamp is 0) + if (result.blockTimestamp <= requestDetails.timestamp) { + revert InvalidResultTimestamp(result.drId, result.blockTimestamp, requestDetails.timestamp); } - bytes32 resultId = super.postResult(result, batchHeight, proof); + // Call parent contract's postResult implementation and retrieve both the result ID + // and the batch sender address for subsequent fee distribution logic + (bytes32 resultId, address batchSender) = super.postResultAndGetBatchSender(result, batchHeight, proof); + // Clean up state _removeRequest(result.drId); - delete _storageV1().requestTimestamps[result.drId]; + delete _storageV1().requestDetails[result.drId]; + + // Fee distribution: handles three types of fees (request, result, batch) + // and manages refunds back to the requestor when applicable + + // Amount to refund to requestor + uint256 refundAmount; + + // Request fee distribution: + // - if invalid payback address, send all request fee to requestor + // - if valid payback address, split request fee proportionally based on gas used vs gas limit + if (requestDetails.requestFee > 0) { + address payableAddress = result.paybackAddress.length == 20 + ? address(bytes20(result.paybackAddress)) + : address(0); + + if (payableAddress == address(0)) { + refundAmount += requestDetails.requestFee; + } else { + // Split request fee proportionally based on gas used vs gas limit + uint256 submitterFee = (result.gasUsed * requestDetails.requestFee) / requestDetails.gasLimit; + if (submitterFee > 0) { + _transferFee(payableAddress, submitterFee); + emit FeeDistributed(result.drId, payableAddress, submitterFee, ISedaCore.FeeType.REQUEST); + } + refundAmount += requestDetails.requestFee - submitterFee; + } + } + + // Result fee distribution: + // - send all result fee to `msg.sender` (result sender/solver) + if (requestDetails.resultFee > 0) { + _transferFee(msg.sender, requestDetails.resultFee); + emit FeeDistributed(result.drId, msg.sender, requestDetails.resultFee, ISedaCore.FeeType.RESULT); + } + + // Batch fee distribution: + // - if no batch sender, send all batch fee to requestor + // - if valid batch sender, send batch fee to batch sender + if (requestDetails.batchFee > 0) { + if (batchSender == address(0)) { + // If no batch sender, send all batch fee to requestor + refundAmount += requestDetails.batchFee; + } else { + // Send batch fee to batch sender + _transferFee(batchSender, requestDetails.batchFee); + emit FeeDistributed(result.drId, batchSender, requestDetails.batchFee, ISedaCore.FeeType.BATCH); + } + } + + // Aggregate refund to requestor containing: + // - unused request fees (when gas used < gas limit) + // - full request fee (when invalid payback address) + // - batch fee (when no batch sender) + if (refundAmount > 0) { + _transferFee(requestDetails.requestor, refundAmount); + emit FeeDistributed(result.drId, requestDetails.requestor, refundAmount, ISedaCore.FeeType.REFUND); + } return resultId; } + /// @inheritdoc ISedaCore + /// @dev Allows the owner to increase fees for a pending request + function increaseFees( + bytes32 requestId, + uint256 additionalRequestFee, + uint256 additionalResultFee, + uint256 additionalBatchFee + ) public payable override whenNotPaused { + // Validate ETH payment matches fee sum to prevent over/underpayment + if (msg.value != additionalRequestFee + additionalResultFee + additionalBatchFee) { + revert InvalidFeeAmount(); + } + + RequestDetails storage details = _storageV1().requestDetails[requestId]; + if (details.timestamp == 0) { + revert RequestNotFound(requestId); + } + + details.requestFee += additionalRequestFee; + details.resultFee += additionalResultFee; + details.batchFee += additionalBatchFee; + + emit FeesIncreased(requestId, additionalRequestFee, additionalResultFee, additionalBatchFee); + } + + /// @notice Pauses all contract operations + /// @dev Can only be called by the contract owner + /// @dev When paused, all state-modifying functions will revert + function pause() external onlyOwner { + _pause(); + } + + /// @notice Unpauses contract operations + /// @dev Can only be called by the contract owner + /// @dev Restores normal contract functionality after being paused + function unpause() external onlyOwner { + _unpause(); + } + // ============ Public View Functions ============ /// @notice Retrieves a list of active requests /// @dev This function is gas-intensive due to iteration over the pendingRequests array. /// Users should be cautious when using high `limit` values in production environments, as it can result in high gas consumption. + /// @dev This function will revert when the contract is paused /// @param offset The starting index in the pendingRequests array /// @param limit The maximum number of requests to return /// @return An array of SedaDataTypes.Request structs - function getPendingRequests(uint256 offset, uint256 limit) public view returns (SedaDataTypes.Request[] memory) { + function getPendingRequests( + uint256 offset, + uint256 limit + ) public view whenNotPaused returns (PendingRequest[] memory) { uint256 totalRequests = _storageV1().pendingRequests.length(); if (offset >= totalRequests) { - return new SedaDataTypes.Request[](0); + return new PendingRequest[](0); } uint256 actualLimit = (offset + limit > totalRequests) ? totalRequests - offset : limit; - SedaDataTypes.Request[] memory queriedPendingRequests = new SedaDataTypes.Request[](actualLimit); + PendingRequest[] memory queriedPendingRequests = new PendingRequest[](actualLimit); for (uint256 i = 0; i < actualLimit; i++) { bytes32 requestId = _storageV1().pendingRequests.at(offset + i); - queriedPendingRequests[i] = getRequest(requestId); + RequestDetails memory details = _storageV1().requestDetails[requestId]; + + queriedPendingRequests[i] = PendingRequest({ + request: getRequest(requestId), + requestor: details.requestor, + timestamp: details.timestamp, + requestFee: details.requestFee, + resultFee: details.resultFee, + batchFee: details.batchFee + }); } return queriedPendingRequests; @@ -149,6 +306,15 @@ contract SedaCoreV1 is ISedaCore, RequestHandlerBase, ResultHandlerBase, UUPSUpg _storageV1().pendingRequests.remove(requestId); } + /// @dev Helper function to safely transfer fees + /// @param recipient Address to receive the fee + /// @param amount Amount to transfer + function _transferFee(address recipient, uint256 amount) internal { + // Using low-level call instead of transfer() + (bool success, ) = payable(recipient).call{value: amount}(""); + if (!success) revert FeeTransferFailed(); + } + /// @dev Required override for UUPSUpgradeable. Ensures only the owner can upgrade the implementation. /// @inheritdoc UUPSUpgradeable /// @param newImplementation Address of the new implementation contract diff --git a/contracts/core/abstract/RequestHandlerBase.sol b/contracts/core/abstract/RequestHandlerBase.sol index de60e99..e08d0e4 100644 --- a/contracts/core/abstract/RequestHandlerBase.sol +++ b/contracts/core/abstract/RequestHandlerBase.sol @@ -35,7 +35,7 @@ abstract contract RequestHandlerBase is IRequestHandler { /// @inheritdoc IRequestHandler function postRequest( SedaDataTypes.RequestInputs calldata inputs - ) public virtual override(IRequestHandler) returns (bytes32) { + ) public payable virtual override(IRequestHandler) returns (bytes32) { if (inputs.replicationFactor == 0) { revert InvalidReplicationFactor(); } diff --git a/contracts/core/abstract/ResultHandlerBase.sol b/contracts/core/abstract/ResultHandlerBase.sol index 285e936..b91eb6d 100644 --- a/contracts/core/abstract/ResultHandlerBase.sol +++ b/contracts/core/abstract/ResultHandlerBase.sol @@ -58,7 +58,8 @@ abstract contract ResultHandlerBase is IResultHandler, Initializable { bytes32[] calldata proof ) external view returns (bytes32) { bytes32 resultId = SedaDataTypes.deriveResultId(result); - if (!_resultHandlerStorage().sedaProver.verifyResultProof(resultId, batchHeight, proof)) { + (bool isValid, ) = _resultHandlerStorage().sedaProver.verifyResultProof(resultId, batchHeight, proof); + if (!isValid) { revert InvalidResultProof(resultId); } @@ -75,36 +76,58 @@ abstract contract ResultHandlerBase is IResultHandler, Initializable { // ============ Public Functions ============ /// @inheritdoc IResultHandler + /// @dev This is left abstract as implementations need to decide how to handle the batch sender + /// @dev See postResultAndGetBatchSender for the core result posting logic function postResult( SedaDataTypes.Result calldata result, uint64 batchHeight, bytes32[] calldata proof - ) public virtual override(IResultHandler) returns (bytes32) { + ) public payable virtual override(IResultHandler) returns (bytes32); + + /// @inheritdoc IResultHandler + function getResult(bytes32 requestId) public view override(IResultHandler) returns (SedaDataTypes.Result memory) { + SedaDataTypes.Result memory result = _resultHandlerStorage().results[requestId]; + if (bytes(result.version).length == 0) { + revert ResultNotFound(requestId); + } + return result; + } + + // ============ Internal Functions ============ + + /// @notice Posts a result and returns both the result ID and batch sender address + /// @dev Similar to postResult but also returns the batch sender address for fee distribution + /// @param result The result data to post + /// @param batchHeight The height of the batch containing the result + /// @param proof The Merkle proof verifying the result + /// @return resultId The unique identifier for the posted result + /// @return batchSender The address of the solver that posted the batch + function postResultAndGetBatchSender( + SedaDataTypes.Result calldata result, + uint64 batchHeight, + bytes32[] calldata proof + ) internal returns (bytes32, address) { bytes32 resultId = SedaDataTypes.deriveResultId(result); if (_resultHandlerStorage().results[result.drId].drId != bytes32(0)) { revert ResultAlreadyExists(resultId); } - if (!_resultHandlerStorage().sedaProver.verifyResultProof(resultId, batchHeight, proof)) { + + (bool isValid, address batchSender) = _resultHandlerStorage().sedaProver.verifyResultProof( + resultId, + batchHeight, + proof + ); + + if (!isValid) { revert InvalidResultProof(resultId); } _resultHandlerStorage().results[result.drId] = result; emit ResultPosted(resultId); - return resultId; + return (resultId, batchSender); } - /// @inheritdoc IResultHandler - function getResult(bytes32 requestId) public view override(IResultHandler) returns (SedaDataTypes.Result memory) { - SedaDataTypes.Result memory result = _resultHandlerStorage().results[requestId]; - if (bytes(result.version).length == 0) { - revert ResultNotFound(requestId); - } - return _resultHandlerStorage().results[requestId]; - } - - // ============ Internal Functions ============ - /// @notice Returns the storage struct for the contract /// @dev Uses ERC-7201 storage pattern to access the storage struct at a specific slot /// @return s The storage struct containing the contract's state variables diff --git a/contracts/interfaces/IProver.sol b/contracts/interfaces/IProver.sol index 6adb8e7..754b139 100644 --- a/contracts/interfaces/IProver.sol +++ b/contracts/interfaces/IProver.sol @@ -6,7 +6,7 @@ import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; /// @title IProver Interface /// @notice Interface for the Prover contract in the Seda protocol interface IProver { - event BatchPosted(uint256 indexed batchHeight, bytes32 batchHash); + event BatchPosted(uint256 indexed batchHeight, bytes32 batchHash, address sender); /// @notice Gets the height of the most recently posted batch /// @return uint64 The height of the last batch, 0 if no batches exist @@ -31,5 +31,5 @@ interface IProver { bytes32 resultId, uint64 batchHeight, bytes32[] calldata merkleProof - ) external view returns (bool); + ) external view returns (bool, address); } diff --git a/contracts/interfaces/IRequestHandler.sol b/contracts/interfaces/IRequestHandler.sol index f75afff..b1f42d1 100644 --- a/contracts/interfaces/IRequestHandler.sol +++ b/contracts/interfaces/IRequestHandler.sol @@ -9,6 +9,7 @@ interface IRequestHandler { error InvalidReplicationFactor(); error RequestAlreadyExists(bytes32); error RequestNotFound(bytes32); + error FeeTransferFailed(); event RequestPosted(bytes32 indexed requestId); @@ -20,5 +21,5 @@ interface IRequestHandler { /// @notice Allows users to post a new data request. /// @param inputs The input parameters for the data request. /// @return requestId The unique identifier for the posted request. - function postRequest(SedaDataTypes.RequestInputs calldata inputs) external returns (bytes32); + function postRequest(SedaDataTypes.RequestInputs calldata inputs) external payable returns (bytes32); } diff --git a/contracts/interfaces/IResultHandler.sol b/contracts/interfaces/IResultHandler.sol index 6f086b0..f250c7d 100644 --- a/contracts/interfaces/IResultHandler.sol +++ b/contracts/interfaces/IResultHandler.sol @@ -26,7 +26,7 @@ interface IResultHandler { SedaDataTypes.Result calldata result, uint64 batchHeight, bytes32[] memory proof - ) external returns (bytes32); + ) external payable returns (bytes32); /// @notice Returns the address of the Seda prover contract /// @return The address of the Seda prover contract diff --git a/contracts/interfaces/ISedaCore.sol b/contracts/interfaces/ISedaCore.sol index f6c1c18..5cdfe90 100644 --- a/contracts/interfaces/ISedaCore.sol +++ b/contracts/interfaces/ISedaCore.sol @@ -8,9 +8,79 @@ import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; /// @title ISedaCoreV1 /// @notice Interface for the main Seda protocol contract that handles both requests and results interface ISedaCore is IResultHandler, IRequestHandler { + /// @notice Aggregates request data and fees to help solvers evaluate pending requests + /// @dev Used as return type for getPendingRequests() view function, not for storage + struct PendingRequest { + SedaDataTypes.Request request; + address requestor; + uint256 timestamp; + uint256 requestFee; + uint256 resultFee; + uint256 batchFee; + } + + /// @notice Enum representing different types of fee distributions + /// @dev Used to identify fee types in events and fee distribution logic + /// @param REQUEST Fee paid to solver submitting the data request to SEDA network + /// @param RESULT Fee paid to solver submitting the data result from SEDA network + /// @param BATCH Fee paid to solver that submitted the batch containing the result + /// @param REFUND Fee refunded back to the original requestor + enum FeeType { + REQUEST, + RESULT, + BATCH, + REFUND + } + + /// @notice Error thrown when the fee amount is not equal to the sum of the request, result, and batch fees + error InvalidFeeAmount(); + + /// @notice Emitted when fees are distributed for a data request and result + /// @param drId The unique identifier for the data request + /// @param recipient The address receiving the fee distribution + /// @param amount The amount of fees distributed to the recipient + event FeeDistributed(bytes32 indexed drId, address indexed recipient, uint256 amount, FeeType indexed feeType); + + /// @notice Emitted when fees are increased for a data request + /// @param drId The unique identifier for the data request + /// @param additionalRequestFee The additional request fee + /// @param additionalResultFee The additional result fee + /// @param additionalBatchFee The additional batch fee + event FeesIncreased( + bytes32 indexed drId, + uint256 additionalRequestFee, + uint256 additionalResultFee, + uint256 additionalBatchFee + ); + /// @notice Retrieves a paginated list of pending requests /// @param offset The starting position in the list /// @param limit The maximum number of requests to return - /// @return An array of Request structs - function getPendingRequests(uint256 offset, uint256 limit) external view returns (SedaDataTypes.Request[] memory); + /// @return An array of PendingRequest structs + function getPendingRequests(uint256 offset, uint256 limit) external view returns (PendingRequest[] memory); + + /// @notice Posts a request with associated fees + /// @param inputs The input parameters for the data request + /// @param requestFee Fee paid to result submitter + /// @param resultFee Fee for result submission + /// @param batchFee Fee for batch processing + /// @return requestId The unique identifier for the posted request + function postRequest( + SedaDataTypes.RequestInputs calldata inputs, + uint256 requestFee, + uint256 resultFee, + uint256 batchFee + ) external payable returns (bytes32); + + /// @notice Increases the fees for an existing request. New fees must be greater than current fees. + /// @param requestId The unique identifier of the request to update + /// @param additionalRequestFee Additional fee to add for request submission + /// @param additionalResultFee Additional fee to add for result submission + /// @param additionalBatchFee Additional fee to add for batch processing + function increaseFees( + bytes32 requestId, + uint256 additionalRequestFee, + uint256 additionalResultFee, + uint256 additionalBatchFee + ) external payable; } diff --git a/contracts/mocks/MockProver.sol b/contracts/mocks/MockProver.sol new file mode 100644 index 0000000..afcc31a --- /dev/null +++ b/contracts/mocks/MockProver.sol @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import {MerkleProof} from "@openzeppelin/contracts/utils/cryptography/MerkleProof.sol"; + +import {ProverBase} from "../provers/abstract/ProverBase.sol"; +import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; + +/// @title MockProver +/// @notice A mock implementation of ProverBase for testing purposes +/// @dev Allows any batch to be posted without signature verification +contract MockProver is ProverBase { + // ============ Storage ============ + struct BatchData { + bytes32 resultsRoot; + address sender; + } + + uint64 private _lastBatchHeight; + mapping(uint64 => BatchData) private _batches; + + // ============ Constructor ============ + + /// @notice Constructor that sets the initial batch + /// @param initialBatch The initial batch data + constructor(SedaDataTypes.Batch memory initialBatch) { + _lastBatchHeight = initialBatch.batchHeight; + _batches[initialBatch.batchHeight] = BatchData({resultsRoot: initialBatch.resultsRoot, sender: address(0)}); + } + + // ============ External Functions ============ + + /// @notice Posts a new batch without any verification + /// @dev Ignores signatures and validator proofs, only checks batch height + function postBatch( + SedaDataTypes.Batch calldata newBatch, + bytes[] calldata, // signatures (ignored) + SedaDataTypes.ValidatorProof[] calldata // validatorProofs (ignored) + ) external override { + if (newBatch.batchHeight <= _lastBatchHeight) { + revert InvalidBatchHeight(); + } + + _lastBatchHeight = newBatch.batchHeight; + _batches[newBatch.batchHeight] = BatchData({resultsRoot: newBatch.resultsRoot, sender: msg.sender}); + + emit BatchPosted(newBatch.batchHeight, SedaDataTypes.deriveBatchId(newBatch), msg.sender); + } + + /// @notice Verifies a result proof against a batch's results root + /// @dev For testing purposes, returns true only for existing batches with stored roots + function verifyResultProof( + bytes32 resultId, + uint64 batchHeight, + bytes32[] calldata merkleProof + ) external view override returns (bool, address) { + // Only return true if we have a stored batch at this height + BatchData memory batch = _batches[batchHeight]; + bytes32 leaf = keccak256(abi.encodePacked(RESULT_DOMAIN_SEPARATOR, resultId)); + return MerkleProof.verify(merkleProof, batch.resultsRoot, leaf) ? (true, batch.sender) : (false, address(0)); + } + + /// @notice Returns the last batch height + function getLastBatchHeight() external view override returns (uint64) { + return _lastBatchHeight; + } +} diff --git a/contracts/mocks/Secp256k1ProverResettable.sol b/contracts/mocks/Secp256k1ProverResettable.sol index 2a374fe..2ec3191 100644 --- a/contracts/mocks/Secp256k1ProverResettable.sol +++ b/contracts/mocks/Secp256k1ProverResettable.sol @@ -19,9 +19,9 @@ contract Secp256k1ProverResettable is Secp256k1ProverV1 { function resetProverState(SedaDataTypes.Batch memory batch) external onlyOwner { // Reset storage to zero values Secp256k1ProverStorage storage s = _storageV1(); - s.batchToResultsRoot[batch.batchHeight] = batch.resultsRoot; + s.batches[batch.batchHeight] = BatchData({resultsRoot: batch.resultsRoot, sender: address(0)}); s.lastBatchHeight = batch.batchHeight; s.lastValidatorsRoot = batch.validatorsRoot; - emit BatchPosted(batch.batchHeight, SedaDataTypes.deriveBatchId(batch)); + emit BatchPosted(batch.batchHeight, SedaDataTypes.deriveBatchId(batch), msg.sender); } } diff --git a/contracts/mocks/SedaPermissioned.sol b/contracts/mocks/SedaPermissioned.sol index 8c7ffa5..e2301f6 100644 --- a/contracts/mocks/SedaPermissioned.sol +++ b/contracts/mocks/SedaPermissioned.sol @@ -25,6 +25,10 @@ contract SedaPermissioned is ISedaCore, RequestHandlerBase, AccessControl, Pausa bytes32 public constant RELAYER_ROLE = keccak256("RELAYER_ROLE"); bytes32 public constant ADMIN_ROLE = keccak256("ADMIN_ROLE"); + // ============ Errors ============ + + error FeesNotImplemented(); + // ============ State Variables ============ uint16 public maxReplicationFactor; @@ -64,7 +68,7 @@ contract SedaPermissioned is ISedaCore, RequestHandlerBase, AccessControl, Pausa SedaDataTypes.Result calldata result, uint64, bytes32[] calldata - ) external override onlyRole(RELAYER_ROLE) whenNotPaused returns (bytes32) { + ) external payable override(IResultHandler) onlyRole(RELAYER_ROLE) whenNotPaused returns (bytes32) { bytes32 resultId = SedaDataTypes.deriveResultId(result); if (results[result.drId].drId != bytes32(0)) { revert ResultAlreadyExists(resultId); @@ -95,12 +99,31 @@ contract SedaPermissioned is ISedaCore, RequestHandlerBase, AccessControl, Pausa // ============ Public Functions ============ + /// @inheritdoc IRequestHandler + /// @notice Posts a new request without any fees + /// @param inputs The request inputs containing the request parameters + /// @return The unique identifier of the posted request + function postRequest( + SedaDataTypes.RequestInputs calldata inputs + ) public payable override(RequestHandlerBase, IRequestHandler) returns (bytes32) { + return postRequest(inputs, 0, 0, 0); + } + + /// @inheritdoc ISedaCore /// @notice Posts a new request /// @param inputs The request inputs /// @return requestId The ID of the posted request function postRequest( - SedaDataTypes.RequestInputs calldata inputs - ) public override(IRequestHandler, RequestHandlerBase) whenNotPaused returns (bytes32) { + SedaDataTypes.RequestInputs calldata inputs, + uint256, + uint256, + uint256 + ) public payable override(ISedaCore) whenNotPaused returns (bytes32) { + // Check if amount is greater than 0 + if (msg.value != 0) { + revert FeesNotImplemented(); + } + // Check max replication factor first if (inputs.replicationFactor > maxReplicationFactor) { revert InvalidReplicationFactor(); @@ -115,26 +138,41 @@ contract SedaPermissioned is ISedaCore, RequestHandlerBase, AccessControl, Pausa return requestId; } + /// @inheritdoc ISedaCore /// @notice Retrieves a list of pending request IDs /// @param offset The starting index in the pendingRequests set /// @param limit The maximum number of request IDs to return /// @return An array of pending request IDs - function getPendingRequests(uint256 offset, uint256 limit) public view returns (SedaDataTypes.Request[] memory) { + function getPendingRequests(uint256 offset, uint256 limit) public view returns (PendingRequest[] memory) { uint256 totalRequests = pendingRequests.length(); if (offset >= totalRequests) { - return new SedaDataTypes.Request[](0); + return new PendingRequest[](0); } uint256 actualLimit = (offset + limit > totalRequests) ? totalRequests - offset : limit; - SedaDataTypes.Request[] memory queriedPendingRequests = new SedaDataTypes.Request[](actualLimit); + PendingRequest[] memory queriedPendingRequests = new PendingRequest[](actualLimit); for (uint256 i = 0; i < actualLimit; i++) { bytes32 requestId = pendingRequests.at(offset + i); - queriedPendingRequests[i] = getRequest(requestId); + + queriedPendingRequests[i] = PendingRequest({ + request: getRequest(requestId), + requestor: address(0), + timestamp: 0, + requestFee: 0, + resultFee: 0, + batchFee: 0 + }); } return queriedPendingRequests; } + /// @inheritdoc ISedaCore + /// @dev This is a mock implementation that does nothing + function increaseFees(bytes32, uint256, uint256, uint256) external payable override(ISedaCore) { + revert FeesNotImplemented(); + } + // ============ Admin Functions ============ /// @notice Adds a relayer diff --git a/contracts/provers/Secp256k1ProverV1.sol b/contracts/provers/Secp256k1ProverV1.sol index 6400639..df5cecb 100644 --- a/contracts/provers/Secp256k1ProverV1.sol +++ b/contracts/provers/Secp256k1ProverV1.sol @@ -5,6 +5,7 @@ import {ECDSA} from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; import {MerkleProof} from "@openzeppelin/contracts/utils/cryptography/MerkleProof.sol"; import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; +import {PausableUpgradeable} from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; import {UUPSUpgradeable} from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; import {ProverBase} from "./abstract/ProverBase.sol"; @@ -17,27 +18,38 @@ import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; /// - Increasing batch and block heights /// - Valid validator proofs and signatures /// - Sufficient voting power to meet the consensus threshold -contract Secp256k1ProverV1 is ProverBase, Initializable, UUPSUpgradeable, OwnableUpgradeable { +contract Secp256k1ProverV1 is ProverBase, Initializable, UUPSUpgradeable, OwnableUpgradeable, PausableUpgradeable { // ============ Constants ============ // The percentage of voting power required for consensus (66.666666%, represented as parts per 100,000,000) uint32 public constant CONSENSUS_PERCENTAGE = 66_666_666; + // Domain separator for Secp256k1 Merkle Tree leaves bytes1 internal constant SECP256K1_DOMAIN_SEPARATOR = 0x01; + // Constant storage slot for the state following the ERC-7201 standard bytes32 private constant PROVER_V1_STORAGE_SLOT = keccak256(abi.encode(uint256(keccak256("secp256k1prover.storage.v1")) - 1)) & ~bytes32(uint256(0xff)); // ============ Errors ============ + error ConsensusNotReached(); // ============ Storage ============ + struct BatchData { + bytes32 resultsRoot; + address sender; + } + /// @custom:storage-location secp256k1prover.storage.v1 struct Secp256k1ProverStorage { + // Hight of the most recently processed batch to ensure strictly increasing batch order uint64 lastBatchHeight; + // Merkle root of the current validator set, used to verify validator proofs in subsequent batches bytes32 lastValidatorsRoot; - mapping(uint64 => bytes32) batchToResultsRoot; + // Mapping of batch heights to batch data, including results root and sender address + mapping(uint64 => BatchData) batches; } // ============ Constructor & Initializer ============ @@ -54,13 +66,13 @@ contract Secp256k1ProverV1 is ProverBase, Initializable, UUPSUpgradeable, Ownabl // Initialize inherited contracts __Ownable_init(msg.sender); __UUPSUpgradeable_init(); - + __Pausable_init(); // Existing initialization code Secp256k1ProverStorage storage s = _storageV1(); - s.batchToResultsRoot[initialBatch.batchHeight] = initialBatch.resultsRoot; + s.batches[initialBatch.batchHeight] = BatchData({resultsRoot: initialBatch.resultsRoot, sender: address(0)}); s.lastBatchHeight = initialBatch.batchHeight; s.lastValidatorsRoot = initialBatch.validatorsRoot; - emit BatchPosted(initialBatch.batchHeight, SedaDataTypes.deriveBatchId(initialBatch)); + emit BatchPosted(initialBatch.batchHeight, SedaDataTypes.deriveBatchId(initialBatch), address(0)); } // ============ External Functions ============ @@ -80,41 +92,59 @@ contract Secp256k1ProverV1 is ProverBase, Initializable, UUPSUpgradeable, Ownabl SedaDataTypes.Batch calldata newBatch, bytes[] calldata signatures, SedaDataTypes.ValidatorProof[] calldata validatorProofs - ) external override(ProverBase) { + ) external override(ProverBase) whenNotPaused { Secp256k1ProverStorage storage s = _storageV1(); - // Check that new batch invariants hold + // Prevents replay attacks via strictly ordered batches if (newBatch.batchHeight <= s.lastBatchHeight) { revert InvalidBatchHeight(); } + // Each signature needs a validator Merkle Proof if (signatures.length != validatorProofs.length) { revert MismatchedSignaturesAndProofs(); } - // Derive Batch Id bytes32 batchId = SedaDataTypes.deriveBatchId(newBatch); - // Check that all validator proofs are valid and accumulate voting power + // Accumulate voting power from valid validators to ensure sufficient consensus + // Each validator must prove membership and provide a valid signature uint64 votingPower = 0; for (uint256 i = 0; i < validatorProofs.length; i++) { + // Verify validator is part of the current validator set using Merkle proof if (!_verifyValidatorProof(validatorProofs[i], s.lastValidatorsRoot)) { revert InvalidValidatorProof(); } + // Verify signature is valid and signed by the validator if (!_verifySignature(batchId, signatures[i], validatorProofs[i].signer)) { revert InvalidSignature(); } votingPower += validatorProofs[i].votingPower; } - // Check voting power consensus + // Check that voting power meets or exceeds the consensus threshold (2/3) if (votingPower < CONSENSUS_PERCENTAGE) { revert ConsensusNotReached(); } - // Update current batch + // After consensus is reached, commit the new batch and update validator set + // This establishes the new state for future batch validations s.lastBatchHeight = newBatch.batchHeight; s.lastValidatorsRoot = newBatch.validatorsRoot; - s.batchToResultsRoot[newBatch.batchHeight] = newBatch.resultsRoot; - emit BatchPosted(newBatch.batchHeight, batchId); + s.batches[newBatch.batchHeight] = BatchData({resultsRoot: newBatch.resultsRoot, sender: msg.sender}); + emit BatchPosted(newBatch.batchHeight, batchId, msg.sender); + } + + /// @notice Pauses all contract operations + /// @dev Can only be called by the contract owner + /// @dev When paused, all state-modifying functions will revert + function pause() external onlyOwner { + _pause(); + } + + /// @notice Unpauses contract operations + /// @dev Can only be called by the contract owner + /// @dev Restores normal contract functionality after being paused + function unpause() external onlyOwner { + _unpause(); } // ============ External View Functions ============ @@ -128,10 +158,10 @@ contract Secp256k1ProverV1 is ProverBase, Initializable, UUPSUpgradeable, Ownabl bytes32 resultId, uint64 batchHeight, bytes32[] calldata merkleProof - ) external view override(ProverBase) returns (bool) { - Secp256k1ProverStorage storage s = _storageV1(); + ) external view override(ProverBase) returns (bool, address) { + BatchData memory batch = _storageV1().batches[batchHeight]; bytes32 leaf = keccak256(abi.encodePacked(RESULT_DOMAIN_SEPARATOR, resultId)); - return MerkleProof.verify(merkleProof, s.batchToResultsRoot[batchHeight], leaf); + return MerkleProof.verify(merkleProof, batch.resultsRoot, leaf) ? (true, batch.sender) : (false, address(0)); } /// @notice Returns the last processed batch height @@ -146,13 +176,6 @@ contract Secp256k1ProverV1 is ProverBase, Initializable, UUPSUpgradeable, Ownabl return _storageV1().lastValidatorsRoot; } - /// @notice Returns the results root for a specific batch height - /// @param batchHeight The batch height to query - /// @return The results root for the specified batch - function getBatchResultsRoot(uint64 batchHeight) external view returns (bytes32) { - return _storageV1().batchToResultsRoot[batchHeight]; - } - // ============ Internal Functions ============ /// @notice Returns the storage struct for the contract diff --git a/contracts/provers/abstract/ProverBase.sol b/contracts/provers/abstract/ProverBase.sol index ddbf85e..f741ce0 100644 --- a/contracts/provers/abstract/ProverBase.sol +++ b/contracts/provers/abstract/ProverBase.sol @@ -34,7 +34,7 @@ abstract contract ProverBase is IProver { bytes32 resultId, uint64 batchHeight, bytes32[] calldata merkleProof - ) external view virtual override(IProver) returns (bool); + ) external view virtual override(IProver) returns (bool, address); /// @inheritdoc IProver function getLastBatchHeight() external view virtual override(IProver) returns (uint64); diff --git a/package.json b/package.json index 7f7c5b0..13590db 100644 --- a/package.json +++ b/package.json @@ -41,7 +41,7 @@ "prettier": "^3.4.2", "prettier-plugin-solidity": "^1.4.1", "rimraf": "^6.0.1", - "solhint": "^5.0.3", + "solhint": "^5.0.4", "valibot": "^0.42.1" }, "files": [ diff --git a/test/core/SedaCoreV1.test.ts b/test/core/SedaCoreV1.test.ts index 9a8626f..93dd129 100644 --- a/test/core/SedaCoreV1.test.ts +++ b/test/core/SedaCoreV1.test.ts @@ -3,40 +3,96 @@ import { SimpleMerkleTree } from '@openzeppelin/merkle-tree'; import { expect } from 'chai'; import { ethers, upgrades } from 'hardhat'; -import { compareRequests, compareResults, convertToRequestInputs } from '../helpers'; -import { computeResultLeafHash, deriveDataResultId, deriveRequestId, generateDataFixtures } from '../utils'; +import type { Secp256k1ProverV1, SedaCoreV1 } from '../../typechain-types'; +import { compareRequests, compareResults, convertPendingToRequestInputs } from '../helpers'; +import { + computeResultLeafHash, + computeValidatorLeafHash, + deriveBatchId, + deriveDataResultId, + deriveRequestId, + generateDataFixtures, +} from '../utils'; describe('SedaCoreV1', () => { async function deployCoreFixture() { - // Generate test fixtures and modify the last result's timestamp to be 1 second (1 unix timestamp) - // This simulates an invalid result with a timestamp from 1970-01-01T00:00:01Z + // Generate test fixtures const { requests, results } = generateDataFixtures(10); + + // Modify the last result's timestamp to be 1 second (1 unix timestamp) + // This simulates an invalid result with a timestamp from 1970-01-01T00:00:01Z results[results.length - 1].blockTimestamp = 1; + // Modify results to have: + // - a zero payback address + // - a non-zero payback address + // - a longer non-EVM-compatible payback address (40 bytes) + // - a shorter non-EVM-compatible payback address (10 bytes) + results[0].paybackAddress = ethers.ZeroAddress; + results[1].paybackAddress = '0x0123456789012345678901234567890123456789'; + results[2].paybackAddress = '0x01234567890123456789012345678901234567890123456789012345678901234567890123456789'; + results[3].paybackAddress = '0x01234567890123456789'; + + // Modify results to have different gas used + results[1].gasUsed = 500000; // 1/4 of the gas limit + results[4].gasUsed = 0; + results[4].paybackAddress = '0x0123456789012345678901234567890123456789'; + results[5].paybackAddress = '0x0123456789012345678901234567890123456789'; + results[5].gasUsed = BigInt(requests[5].execGasLimit) + BigInt(requests[5].tallyGasLimit); + const leaves = results.map(deriveDataResultId).map(computeResultLeafHash); // Create merkle tree and proofs - const tree = SimpleMerkleTree.of(leaves, { sortLeaves: true }); - const proofs = results.map((_, index) => tree.getProof(index)); + const resultsTree = SimpleMerkleTree.of(leaves, { sortLeaves: true }); + const proofs = results.map((_, index) => resultsTree.getProof(index)); - const data = { requests, results, proofs }; + // Create 2 validators + const wallets = Array.from({ length: 2 }, (_, i) => { + const seed = ethers.id(`validator${i}`); + return new ethers.Wallet(seed.slice(2, 66)); + }); + + const validators = wallets.map((wallet) => wallet.address); + const votingPowers = Array(wallets.length).fill(10_000_000); + votingPowers[0] = 90_000_000; // 90% voting power + + const validatorLeaves = validators.map((validator, index) => + computeValidatorLeafHash(validator, votingPowers[index]), + ); + + // Validators: Create merkle tree and proofs + const validatorsTree = SimpleMerkleTree.of(validatorLeaves, { + sortLeaves: true, + }); + const validatorProofs = validators.map((signer, index) => { + const proof = validatorsTree.getProof(index); + return { + signer, + votingPower: votingPowers[index], + merkleProof: proof, + }; + }); const initialBatch = { batchHeight: 0, blockHeight: 0, - validatorsRoot: ethers.ZeroHash, - resultsRoot: tree.root, + validatorsRoot: validatorsTree.root, + resultsRoot: resultsTree.root, provingMetadata: ethers.ZeroHash, }; const ProverFactory = await ethers.getContractFactory('Secp256k1ProverV1'); - const prover = await upgrades.deployProxy(ProverFactory, [initialBatch], { initializer: 'initialize' }); + const prover = await upgrades.deployProxy(ProverFactory, [initialBatch], { + initializer: 'initialize', + }); await prover.waitForDeployment(); const CoreFactory = await ethers.getContractFactory('SedaCoreV1'); const core = await upgrades.deployProxy(CoreFactory, [await prover.getAddress()], { initializer: 'initialize' }); await core.waitForDeployment(); + const data = { requests, results, proofs, wallets, initialBatch, validatorProofs }; + return { prover, core, data }; } @@ -91,7 +147,7 @@ describe('SedaCoreV1', () => { await core.postRequest(data.requests[0]); let requests = await core.getPendingRequests(0, 1); expect(requests.length).to.equal(1); - compareRequests(requests[0], data.requests[0]); + compareRequests(requests[0].request, data.requests[0]); await core.postResult(data.results[0], 0, data.proofs[0]); requests = await core.getPendingRequests(0, 1); @@ -147,7 +203,7 @@ describe('SedaCoreV1', () => { const allRequests = await core.getPendingRequests(0, data.requests.length); for (let i = 0; i < data.requests.length; i++) { - compareRequests(allRequests[i], data.requests[i]); + compareRequests(allRequests[i].request, data.requests[i]); } }); @@ -195,7 +251,7 @@ describe('SedaCoreV1', () => { const gasUsed = await core.postResult.estimateGas(data.results[2], 0, data.proofs[2]); // This is rough esimate - expect(gasUsed).to.be.lessThan(300000); + expect(gasUsed).to.be.lessThan(500000); }); it('should maintain pending requests (with removals)', async () => { @@ -212,7 +268,7 @@ describe('SedaCoreV1', () => { // Verify that all requests are pending // (order should be preserved because there are no removals) - let pending = (await core.getPendingRequests(0, 10)).map(convertToRequestInputs); + let pending = (await core.getPendingRequests(0, 10)).map(convertPendingToRequestInputs); expect(pending.length).to.equal(5); expect(pending).to.deep.include.members(requests); @@ -224,7 +280,7 @@ describe('SedaCoreV1', () => { const expectedPending = [requests[1], requests[3], requests[4]]; // Retrieve pending requests (order is not preserved because there were 2 removals) - pending = (await core.getPendingRequests(0, 10)).map(convertToRequestInputs); + pending = (await core.getPendingRequests(0, 10)).map(convertPendingToRequestInputs); expect(pending.length).to.equal(3); expect(pending).to.deep.include.members(expectedPending); @@ -235,7 +291,7 @@ describe('SedaCoreV1', () => { const finalPending = [requests[1], requests[3]]; // Retrieve final pending requests - pending = (await core.getPendingRequests(0, 10)).map(convertToRequestInputs); + pending = (await core.getPendingRequests(0, 10)).map(convertPendingToRequestInputs); expect(pending.length).to.equal(2); expect(pending).to.deep.include.members(finalPending); }); @@ -261,8 +317,427 @@ describe('SedaCoreV1', () => { expect(pendingRequests).to.not.include(data.requests[2]); // Verify the remaining requests are still in order - compareRequests(pendingRequests[0], data.requests[0]); - compareRequests(pendingRequests[1], data.requests[1]); + compareRequests(pendingRequests[0].request, data.requests[0]); + compareRequests(pendingRequests[1].request, data.requests[1]); + }); + }); + + describe('fee management', () => { + describe('basic fee scenarios', () => { + it('should enforce exact fee payment', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const fees = { + request: ethers.parseEther('1.0'), + result: ethers.parseEther('2.0'), + batch: ethers.parseEther('3.0'), + }; + const totalFee = fees.request + fees.result + fees.batch; + + await expect( + core.postRequest(data.requests[0], fees.request, fees.result, fees.batch, { + value: totalFee - ethers.parseEther('0.5'), + }), + ).to.be.revertedWithCustomError(core, 'InvalidFeeAmount'); + + await expect(core.postRequest(data.requests[0], fees.request, fees.result, fees.batch, { value: totalFee })).to + .not.be.reverted; + }); + + it('should distribute request fees based on gas used', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const requestFee = ethers.parseEther('10'); + const [requestor] = await ethers.getSigners(); + const paybackAddress = data.results[1].paybackAddress; + + await core.postRequest(data.requests[1], requestFee, 0, 0, { value: requestFee }); + + const totalGas = BigInt(data.requests[1].execGasLimit) + BigInt(data.requests[1].tallyGasLimit); + const expectedPayback = (requestFee * BigInt(data.results[1].gasUsed)) / totalGas; + const expectedRefund = requestFee - expectedPayback; + + await expect(core.postResult(data.results[1], 0, data.proofs[1])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, paybackAddress, expectedPayback, 0) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, requestor.address, expectedRefund, 3); + }); + + it('should pay result fees to result submitter', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const resultFee = ethers.parseEther('2.0'); + const [, resultSubmitter] = await ethers.getSigners(); + + await core.postRequest(data.requests[0], 0, resultFee, 0, { value: resultFee }); + + await expect((core.connect(resultSubmitter) as SedaCoreV1).postResult(data.results[0], 0, data.proofs[0])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[0].drId, resultSubmitter.address, resultFee, 1); + }); + + it('should pay batch fees to batch submitter', async () => { + const { core, prover, data } = await loadFixture(deployCoreFixture); + const batchFee = ethers.parseEther('3.0'); + const [, batchSender] = await ethers.getSigners(); + + await core.postRequest(data.requests[0], 0, 0, batchFee, { value: batchFee }); + + const batch = { ...data.initialBatch, batchHeight: 1 }; + const signatures = [await data.wallets[0].signingKey.sign(deriveBatchId(batch)).serialized]; + await (prover.connect(batchSender) as Secp256k1ProverV1).postBatch(batch, signatures, [ + data.validatorProofs[0], + ]); + + await expect(core.postResult(data.results[0], 1, data.proofs[0])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[0].drId, batchSender.address, batchFee, 2); + }); + + it('should refund batch fee if no batch is used', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const batchFee = ethers.parseEther('3.0'); + const [requestor] = await ethers.getSigners(); + + await core.postRequest(data.requests[0], 0, 0, batchFee, { value: batchFee }); + + await expect(core.postResult(data.results[0], 0, data.proofs[0])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[0].drId, requestor.address, batchFee, 3); + }); + }); + + describe('edge cases', () => { + it('should handle invalid payback addresses', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const requestFee = ethers.parseEther('1.0'); + const [requestor] = await ethers.getSigners(); + + // Test zero address + await core.postRequest(data.requests[0], requestFee, 0, 0, { value: requestFee }); + await expect(core.postResult(data.results[0], 0, data.proofs[0])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[0].drId, requestor.address, requestFee, 3); + }); + + it('should handle zero fees gracefully', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + await core.postRequest(data.requests[0], 0, 0, 0, { value: 0 }); + await expect(core.postResult(data.results[0], 0, data.proofs[0])).to.not.be.reverted; + }); + }); + + describe('comprehensive fee scenarios', () => { + it('should handle all fee types in a single transaction', async () => { + const { core, prover, data } = await loadFixture(deployCoreFixture); + const [requestor, resultSubmitter, batchSubmitter] = await ethers.getSigners(); + + // Set up fees + const fees = { + request: ethers.parseEther('1.0'), + result: ethers.parseEther('2.0'), + batch: ethers.parseEther('3.0'), + }; + const totalFee = fees.request + fees.result + fees.batch; + + // Record initial balances + const initialBalances = { + requestor: await ethers.provider.getBalance(requestor.address), + resultSubmitter: await ethers.provider.getBalance(resultSubmitter.address), + batchSubmitter: await ethers.provider.getBalance(batchSubmitter.address), + payback: await ethers.provider.getBalance(data.results[1].paybackAddress.toString()), + }; + + // Post request with all fees + await core.postRequest(data.requests[1], fees.request, fees.result, fees.batch, { value: totalFee }); + + // Submit batch + const batch = { ...data.initialBatch, batchHeight: 1 }; + const signatures = [await data.wallets[0].signingKey.sign(deriveBatchId(batch)).serialized]; + await (prover.connect(batchSubmitter) as Secp256k1ProverV1).postBatch(batch, signatures, [ + data.validatorProofs[0], + ]); + + // Calculate expected request fee distribution + const totalGas = BigInt(data.requests[1].execGasLimit) + BigInt(data.requests[1].tallyGasLimit); + const expectedPayback = (fees.request * BigInt(data.results[1].gasUsed)) / totalGas; + const expectedRefund = fees.request - expectedPayback; + + // Submit result and verify all fee distributions + await expect((core.connect(resultSubmitter) as SedaCoreV1).postResult(data.results[1], 1, data.proofs[1])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, data.results[1].paybackAddress, expectedPayback, 0) // Request fee to executor + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, resultSubmitter.address, fees.result, 1) // Result fee to result submitter + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, batchSubmitter.address, fees.batch, 2) // Batch fee to batch submitter + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, requestor.address, expectedRefund, 3); // Remaining request fee refund + + // Verify final balances (accounting for gas costs with approximate checks) + const finalBalances = { + requestor: await ethers.provider.getBalance(requestor.address), + resultSubmitter: await ethers.provider.getBalance(resultSubmitter.address), + batchSubmitter: await ethers.provider.getBalance(batchSubmitter.address), + payback: await ethers.provider.getBalance(data.results[1].paybackAddress.toString()), + }; + + expect(finalBalances.payback - initialBalances.payback).to.equal(expectedPayback); + expect(finalBalances.resultSubmitter - initialBalances.resultSubmitter).to.be.closeTo( + fees.result, + ethers.parseEther('0.01'), // Allow for gas costs + ); + expect(finalBalances.batchSubmitter - initialBalances.batchSubmitter).to.be.closeTo( + fees.batch, + ethers.parseEther('0.01'), // Allow for gas costs + ); + expect(initialBalances.requestor - finalBalances.requestor).to.be.closeTo( + totalFee - expectedRefund, + ethers.parseEther('0.01'), // Allow for gas costs + ); + }); + }); + + describe('payback address handling', () => { + it('should handle non-standard payback address lengths', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const requestFee = ethers.parseEther('1.0'); + const [requestor] = await ethers.getSigners(); + + // Test short payback address (10 bytes) + await core.postRequest(data.requests[3], requestFee, 0, 0, { value: requestFee }); + await expect(core.postResult(data.results[3], 0, data.proofs[3])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[3].drId, requestor.address, requestFee, 3); + + // Test long payback address (40 bytes) + await core.postRequest(data.requests[2], requestFee, 0, 0, { value: requestFee }); + await expect(core.postResult(data.results[2], 0, data.proofs[2])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[2].drId, requestor.address, requestFee, 3); + }); + }); + + describe('gas usage edge cases', () => { + it('should handle zero gas usage', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const requestFee = ethers.parseEther('1.0'); + const [requestor] = await ethers.getSigners(); + + await core.postRequest(data.requests[4], requestFee, 0, 0, { value: requestFee }); + await expect(core.postResult(data.results[4], 0, data.proofs[4])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[4].drId, requestor.address, requestFee, 3); + }); + + it('should handle gas usage equal to limit', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const [, resultSolver] = await ethers.getSigners(); + const requestFee = ethers.parseEther('1.0'); + const resultFee = ethers.parseEther('1.0'); + + await core.postRequest(data.requests[5], requestFee, resultFee, 0, { value: requestFee + resultFee }); + await expect((core.connect(resultSolver) as SedaCoreV1).postResult(data.results[5], 0, data.proofs[5])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[5].drId, data.results[5].paybackAddress, requestFee, 0) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[5].drId, resultSolver.address, resultFee, 1); + }); + }); + + describe('fee increase', () => { + it('should allow increasing fees for pending requests', async () => { + const { prover, core, data } = await loadFixture(deployCoreFixture); + const fees = { + request: ethers.parseEther('1.0'), + result: ethers.parseEther('2.0'), + batch: ethers.parseEther('3.0'), + }; + const totalFee = fees.request + fees.result + fees.batch; + const additionalFees = { + request: ethers.parseEther('0.5'), + result: ethers.parseEther('1.0'), + batch: ethers.parseEther('1.5'), + }; + const totalAdditionalFee = additionalFees.request + additionalFees.result + additionalFees.batch; + const [requestor, resultSubmitter, batchSubmitter] = await ethers.getSigners(); + + // Post request with all fees + await core.postRequest(data.requests[1], fees.request, fees.result, fees.batch, { value: totalFee }); + + // Increase fees + await core.increaseFees( + data.results[1].drId, + additionalFees.request, + additionalFees.result, + additionalFees.batch, + { value: totalAdditionalFee }, + ); + + // Submit batch + const batch = { ...data.initialBatch, batchHeight: 1 }; + const signatures = [await data.wallets[0].signingKey.sign(deriveBatchId(batch)).serialized]; + await (prover.connect(batchSubmitter) as Secp256k1ProverV1).postBatch(batch, signatures, [ + data.validatorProofs[0], + ]); + + // Calculate expected request fee distribution + const totalGas = BigInt(data.requests[1].execGasLimit) + BigInt(data.requests[1].tallyGasLimit); + const expectedPayback = ((fees.request + additionalFees.request) * BigInt(data.results[1].gasUsed)) / totalGas; + const expectedRefund = fees.request + additionalFees.request - expectedPayback; + + // Submit result and verify all fee distributions + await expect((core.connect(resultSubmitter) as SedaCoreV1).postResult(data.results[1], 1, data.proofs[1])) + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, data.results[1].paybackAddress, expectedPayback, 0) // Request fee to executor + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, resultSubmitter.address, fees.result + additionalFees.result, 1) // Result fee to result submitter + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, batchSubmitter.address, fees.batch + additionalFees.batch, 2) // Batch fee to batch submitter + .to.emit(core, 'FeeDistributed') + .withArgs(data.results[1].drId, requestor.address, expectedRefund, 3); // Remaining request fee refund + }); + + it('should reject fee increase for non-existent request', async () => { + const { core } = await loadFixture(deployCoreFixture); + const nonExistentRequestId = ethers.randomBytes(32); + + await expect( + core.increaseFees( + nonExistentRequestId, + ethers.parseEther('1.0'), + ethers.parseEther('1.0'), + ethers.parseEther('1.0'), + { value: ethers.parseEther('3.0') }, + ), + ).to.be.revertedWithCustomError(core, 'RequestNotFound'); + }); + + it('should reject fee increase with incorrect payment amount', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + + // First post a request + await core.postRequest(data.requests[0], 0, 0, 0); + const requestId = await deriveRequestId(data.requests[0]); + + // Try to increase fees with incorrect amount + await expect( + core.increaseFees( + requestId, + ethers.parseEther('1.0'), + ethers.parseEther('1.0'), + ethers.parseEther('1.0'), + { value: ethers.parseEther('2.0') }, // Sending less than total additional fees + ), + ).to.be.revertedWithCustomError(core, 'InvalidFeeAmount'); + }); + }); + }); + + describe('pause functionality', () => { + it('should allow owner to pause and unpause', async () => { + const { core } = await loadFixture(deployCoreFixture); + const [owner] = await ethers.getSigners(); + + expect(await core.paused()).to.be.false; + + await expect((core.connect(owner) as SedaCoreV1).pause()) + .to.emit(core, 'Paused') + .withArgs(owner.address); + + expect(await core.paused()).to.be.true; + + await expect((core.connect(owner) as SedaCoreV1).unpause()) + .to.emit(core, 'Unpaused') + .withArgs(owner.address); + + expect(await core.paused()).to.be.false; + }); + + it('should prevent non-owner from pausing/unpausing', async () => { + const { core } = await loadFixture(deployCoreFixture); + const [, nonOwner] = await ethers.getSigners(); + + await expect((core.connect(nonOwner) as SedaCoreV1).pause()).to.be.revertedWithCustomError( + core, + 'OwnableUnauthorizedAccount', + ); + + await expect((core.connect(nonOwner) as SedaCoreV1).unpause()).to.be.revertedWithCustomError( + core, + 'OwnableUnauthorizedAccount', + ); + }); + + it('should prevent operations while paused', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const [owner] = await ethers.getSigners(); + + // Pause the contract + await (core.connect(owner) as SedaCoreV1).pause(); + + // Test postRequest + await expect(core.postRequest(data.requests[0])).to.be.revertedWithCustomError(core, 'EnforcedPause'); + + // Test postRequest with fees + await expect( + core.postRequest(data.requests[0], ethers.parseEther('1'), 0, 0, { value: ethers.parseEther('1') }), + ).to.be.revertedWithCustomError(core, 'EnforcedPause'); + + // Test postResult + await expect(core.postResult(data.results[0], 0, data.proofs[0])).to.be.revertedWithCustomError( + core, + 'EnforcedPause', + ); + + // Test increaseFees + await expect( + core.increaseFees(data.results[0].drId, ethers.parseEther('1'), 0, 0, { value: ethers.parseEther('1') }), + ).to.be.revertedWithCustomError(core, 'EnforcedPause'); + }); + + it('should revert getPendingRequests while paused', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const [owner] = await ethers.getSigners(); + + // Post some requests + await core.postRequest(data.requests[0]); + await core.postRequest(data.requests[1]); + + // Verify requests are visible + let requests = await core.getPendingRequests(0, 10); + expect(requests.length).to.equal(2); + + // Pause the contract + await (core.connect(owner) as SedaCoreV1).pause(); + + // Verify getPendingRequests reverts while paused + await expect(core.getPendingRequests(0, 10)).to.be.revertedWithCustomError(core, 'EnforcedPause'); + + // Unpause and verify requests are visible again + await (core.connect(owner) as SedaCoreV1).unpause(); + requests = await core.getPendingRequests(0, 10); + expect(requests.length).to.equal(2); + }); + + it('should resume operations after unpausing', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + const [owner] = await ethers.getSigners(); + + // Pause the contract + await (core.connect(owner) as SedaCoreV1).pause(); + + // Unpause the contract + await (core.connect(owner) as SedaCoreV1).unpause(); + + // Should now be able to perform operations + await expect(core.postRequest(data.requests[0])) + .to.emit(core, 'RequestPosted') + .withArgs(await deriveRequestId(data.requests[0])); + + await expect(core.postResult(data.results[0], 0, data.proofs[0])).to.emit(core, 'ResultPosted'); + + // Verify the request was removed after posting result + const requests = await (core.connect(owner) as SedaCoreV1).getPendingRequests(0, 10); + expect(requests.length).to.equal(0); }); }); }); diff --git a/test/helpers.ts b/test/helpers.ts index db56584..e0cf5f0 100644 --- a/test/helpers.ts +++ b/test/helpers.ts @@ -1,23 +1,32 @@ import { expect } from 'chai'; import type { CoreRequestTypes, CoreResultTypes, ProverDataTypes } from '../ts-types'; -// Function to convert an unformatted tuple result to a formatted struct -export function convertToRequestInputs( +// Function to convert an unformatted tuple result to a formatted struct. +// The unformated pending request is a tuple of the following format: +// ``` +// [ +// [version, execProgramId, execInputs, execGasLimit, tallyProgramId, tallyInputs, tallyGasLimit, replicationFactor, consensusFilter, gasPrice, memo] +// [requestor, timestamp, requestFee, resultFee, batchFee] +// ] +// ``` +// @param pending - The unformatted tuple result to convert +// @returns The formatted struct +export function convertPendingToRequestInputs( // biome-ignore lint/suspicious/noExplicitAny: Explicit any type is necessary to handle the unformatted tuple result - request: any, + pending: any, ): CoreRequestTypes.RequestInputsStruct { return { - //version: unformatted[0], - execProgramId: request[1], - execInputs: request[2], - execGasLimit: request[3], - tallyProgramId: request[4], - tallyInputs: request[5].toString(), - tallyGasLimit: request[6], - replicationFactor: Number(request[7]), - consensusFilter: request[8].toString(), - gasPrice: request[9], - memo: request[10], + //version: unformatted[0][0], + execProgramId: pending[0][1], + execInputs: pending[0][2], + execGasLimit: pending[0][3], + tallyProgramId: pending[0][4], + tallyInputs: pending[0][5].toString(), + tallyGasLimit: pending[0][6], + replicationFactor: Number(pending[0][7]), + consensusFilter: pending[0][8].toString(), + gasPrice: pending[0][9], + memo: pending[0][10], }; } diff --git a/test/mocks/SedaPermissioned.test.ts b/test/mocks/SedaPermissioned.test.ts index ef0d43e..400f282 100644 --- a/test/mocks/SedaPermissioned.test.ts +++ b/test/mocks/SedaPermissioned.test.ts @@ -1,7 +1,7 @@ import { loadFixture } from '@nomicfoundation/hardhat-network-helpers'; import { expect } from 'chai'; import { ethers } from 'hardhat'; -import { compareRequests, compareResults, convertToRequestInputs } from '../helpers'; +import { compareRequests, compareResults, convertPendingToRequestInputs } from '../helpers'; import { deriveDataResultId, deriveRequestId, generateDataFixtures } from '../utils'; describe('SedaPermissioned', () => { @@ -37,7 +37,7 @@ describe('SedaPermissioned', () => { // Verify the request is in the pending list const pendingRequests = await core.getPendingRequests(0, 10); expect(pendingRequests).to.have.lengthOf(1); - compareRequests(pendingRequests[0], requests[0]); + compareRequests(pendingRequests[0].request, requests[0]); // Verify the request details const storedRequest = await core.getRequest(expectedRequestId); @@ -99,20 +99,20 @@ describe('SedaPermissioned', () => { requestIds.push(requestId); } - let pending = (await core.getPendingRequests(0, 10)).map(convertToRequestInputs); + let pending = (await core.getPendingRequests(0, 10)).map(convertPendingToRequestInputs); expect(pending.length).to.equal(5); expect(pending).to.deep.include.members(requests); await core.connect(signers.relayer).postResult(results[0], 0, []); await core.connect(signers.relayer).postResult(results[2], 0, []); - pending = (await core.getPendingRequests(0, 10)).map(convertToRequestInputs); + pending = (await core.getPendingRequests(0, 10)).map(convertPendingToRequestInputs); expect(pending.length).to.equal(3); expect(pending).to.deep.include.members([requests[1], requests[3], requests[4]]); await core.connect(signers.relayer).postResult(results[4], 0, []); - pending = (await core.getPendingRequests(0, 10)).map(convertToRequestInputs); + pending = (await core.getPendingRequests(0, 10)).map(convertPendingToRequestInputs); expect(pending).to.have.lengthOf(2); expect(pending).to.deep.include.members([requests[1], requests[3]]); }); diff --git a/test/prover/Secp256k1ProverV1.test.ts b/test/prover/Secp256k1ProverV1.test.ts index ef6a60f..00c324e 100644 --- a/test/prover/Secp256k1ProverV1.test.ts +++ b/test/prover/Secp256k1ProverV1.test.ts @@ -4,6 +4,7 @@ import { expect } from 'chai'; import type { Wallet } from 'ethers'; import { ethers, upgrades } from 'hardhat'; import type { ProverDataTypes } from '../../ts-types'; +import type { Secp256k1ProverV1 } from '../../typechain-types'; import { computeResultLeafHash, computeValidatorLeafHash, @@ -132,9 +133,12 @@ describe('Secp256k1ProverV1', () => { const { newBatch, signatures, newBatchId } = await generateAndSignBatch(wallets, data.initialBatch, [0]); - await expect(prover.postBatch(newBatch, signatures, [data.validatorProofs[0]])) + const [batchSender] = await ethers.getSigners(); + await expect( + (prover.connect(batchSender) as Secp256k1ProverV1).postBatch(newBatch, signatures, [data.validatorProofs[0]]), + ) .to.emit(prover, 'BatchPosted') - .withArgs(newBatch.batchHeight, newBatchId); + .withArgs(newBatch.batchHeight, newBatchId, batchSender.address); }); it('should fail to update a batch with 1 validator (25% voting power)', async () => { @@ -238,11 +242,14 @@ describe('Secp256k1ProverV1', () => { }; const newBatchId = deriveBatchId(batch); const signatures = [await wallets[0].signingKey.sign(newBatchId).serialized]; - await prover.postBatch(batch, signatures, [data.validatorProofs[0]]); + + const [batchSender] = await ethers.getSigners(); + await (prover.connect(batchSender) as Secp256k1ProverV1).postBatch(batch, signatures, [data.validatorProofs[0]]); // Verify a valid proof - const isValid = await prover.verifyResultProof(resultIds[1], 1, resultsTree.getProof(1)); + const [isValid, sender] = await prover.verifyResultProof(resultIds[1], 1, resultsTree.getProof(1)); expect(isValid).to.be.true; + expect(sender).to.equal(batchSender.address); }); it('should reject an invalid result proof', async () => { @@ -265,8 +272,9 @@ describe('Secp256k1ProverV1', () => { await prover.postBatch(batch, signatures, [data.validatorProofs[0]]); // Verify an invalid proof - const isValid = await prover.verifyResultProof(resultIds[0], 1, resultsTree.getProof(1)); + const [isValid, batchSender] = await prover.verifyResultProof(resultIds[0], 1, resultsTree.getProof(1)); expect(isValid).to.be.false; + expect(batchSender).to.equal(ethers.ZeroAddress); }); }); @@ -291,7 +299,7 @@ describe('Secp256k1ProverV1', () => { await prover.postBatch(batch, signatures, [data.validatorProofs[0]]); // Verify a valid proof - const resultBatch = await prover.verifyResultProof(resultIds[0], batch.batchHeight, resultsTree.getProof(0)); + const [resultBatch] = await prover.verifyResultProof(resultIds[0], batch.batchHeight, resultsTree.getProof(0)); expect(resultBatch).to.be.true; }); @@ -315,8 +323,13 @@ describe('Secp256k1ProverV1', () => { await prover.postBatch(batch1, signatures1, [data.validatorProofs[0]]); // Verify an invalid proof - const resultBatch1 = await prover.verifyResultProof(resultIds[0], batch1.batchHeight, resultsTree.getProof(0)); + const [resultBatch1, batchSender] = await prover.verifyResultProof( + resultIds[0], + batch1.batchHeight, + resultsTree.getProof(0), + ); expect(resultBatch1).to.be.false; + expect(batchSender).to.equal(ethers.ZeroAddress); }); }); @@ -369,4 +382,78 @@ describe('Secp256k1ProverV1', () => { expect(prover).to.emit(prover, 'BatchPosted').withArgs(testBatch.batchHeight, expectedBatchId); }); }); + + describe('pause functionality', () => { + it('should allow owner to pause and unpause', async () => { + const { prover } = await loadFixture(deployProverFixture); + const [owner] = await ethers.getSigners(); + + expect(await prover.paused()).to.be.false; + + await expect((prover.connect(owner) as Secp256k1ProverV1).pause()) + .to.emit(prover, 'Paused') + .withArgs(owner.address); + + expect(await prover.paused()).to.be.true; + + await expect((prover.connect(owner) as Secp256k1ProverV1).unpause()) + .to.emit(prover, 'Unpaused') + .withArgs(owner.address); + + expect(await prover.paused()).to.be.false; + }); + + it('should prevent non-owner from pausing/unpausing', async () => { + const { prover } = await loadFixture(deployProverFixture); + const [, nonOwner] = await ethers.getSigners(); + + await expect((prover.connect(nonOwner) as Secp256k1ProverV1).pause()).to.be.revertedWithCustomError( + prover, + 'OwnableUnauthorizedAccount', + ); + + await expect((prover.connect(nonOwner) as Secp256k1ProverV1).unpause()).to.be.revertedWithCustomError( + prover, + 'OwnableUnauthorizedAccount', + ); + }); + + it('should prevent postBatch while paused', async () => { + const { prover, wallets, data } = await loadFixture(deployProverFixture); + const [owner] = await ethers.getSigners(); + + // Pause the contract + await (prover.connect(owner) as Secp256k1ProverV1).pause(); + + // Try to post a batch while paused + const { newBatch, signatures } = await generateAndSignBatch(wallets, data.initialBatch, [0]); + await expect(prover.postBatch(newBatch, signatures, [data.validatorProofs[0]])).to.be.revertedWithCustomError( + prover, + 'EnforcedPause', + ); + }); + + it('should resume operations after unpausing', async () => { + const { prover, wallets, data } = await loadFixture(deployProverFixture); + const [owner] = await ethers.getSigners(); + + // Pause the contract + await (prover.connect(owner) as Secp256k1ProverV1).pause(); + + // Try to post a batch while paused + const { newBatch, signatures } = await generateAndSignBatch(wallets, data.initialBatch, [0]); + await expect(prover.postBatch(newBatch, signatures, [data.validatorProofs[0]])).to.be.revertedWithCustomError( + prover, + 'EnforcedPause', + ); + + // Unpause the contract + await (prover.connect(owner) as Secp256k1ProverV1).unpause(); + + // Should now be able to post batch + await expect(prover.postBatch(newBatch, signatures, [data.validatorProofs[0]])) + .to.emit(prover, 'BatchPosted') + .withArgs(newBatch.batchHeight, deriveBatchId(newBatch), owner.address); + }); + }); }); diff --git a/test/utils.ts b/test/utils.ts index 02e8ff7..21ab135 100644 --- a/test/utils.ts +++ b/test/utils.ts @@ -106,7 +106,7 @@ export function generateDataFixtures(length: number): { result: ethers.keccak256(ethers.toUtf8Bytes('SUCCESS')), blockHeight: 0, blockTimestamp: Math.floor(Date.now() / 1000) + 3600, - gasUsed: 0, + gasUsed: 1000000n, paybackAddress: ethers.ZeroAddress, sedaPayload: ethers.ZeroHash, };