diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000..3a9c8a9 --- /dev/null +++ b/.prettierrc @@ -0,0 +1,15 @@ +{ + "printWidth": 120, + "singleQuote": true, + "trailingComma": "all", + "arrowParens": "avoid", + "overrides": [ + { + "files": "*.sol", + "options": { + "singleQuote": false + } + } + ], + "plugins": ["prettier-plugin-solidity"] +} \ No newline at end of file diff --git a/bun.lockb b/bun.lockb index 578eb77..b2e8442 100755 Binary files a/bun.lockb and b/bun.lockb differ diff --git a/contracts/abstract/RequestHandlerBase.sol b/contracts/abstract/RequestHandlerBase.sol deleted file mode 100644 index 56e5c2e..0000000 --- a/contracts/abstract/RequestHandlerBase.sol +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; - -import {IRequestHandler} from "../interfaces/IRequestHandler.sol"; -import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; - -abstract contract RequestHandlerBase is IRequestHandler { - /// @inheritdoc IRequestHandler - function postRequest( - SedaDataTypes.RequestInputs calldata inputs - ) external virtual override(IRequestHandler) returns (bytes32); - - /// @inheritdoc IRequestHandler - function getRequest( - bytes32 requestId - ) - external - view - virtual - override(IRequestHandler) - returns (SedaDataTypes.Request memory); -} diff --git a/contracts/abstract/ResultHandlerBase.sol b/contracts/abstract/ResultHandlerBase.sol deleted file mode 100644 index b30ebed..0000000 --- a/contracts/abstract/ResultHandlerBase.sol +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; - -import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; -import {IProver} from "../interfaces/IProver.sol"; -import {IResultHandler} from "../interfaces/IResultHandler.sol"; - -abstract contract ResultHandlerBase is IResultHandler { - IProver public sedaProver; - - /// @notice Initializes the ResultHandlerBase contract - /// @dev Sets the address of the SEDA Prover contract - /// @param sedaProverAddress The address of the SEDA Prover contract - constructor(address sedaProverAddress) { - sedaProver = IProver(sedaProverAddress); - } - - /// @inheritdoc IResultHandler - function postResult( - SedaDataTypes.Result calldata result, - uint64 batchHeight, - bytes32[] calldata proof - ) external virtual override(IResultHandler) returns (bytes32); - - /// @inheritdoc IResultHandler - function getResult( - bytes32 requestId - ) external view virtual override(IResultHandler) returns (SedaDataTypes.Result memory); -} diff --git a/contracts/core/RequestHandler.sol b/contracts/core/RequestHandler.sol deleted file mode 100644 index fd0a245..0000000 --- a/contracts/core/RequestHandler.sol +++ /dev/null @@ -1,70 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; - -import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; -import {RequestHandlerBase} from "../abstract/RequestHandlerBase.sol"; - -/// @title RequestHandler -/// @notice Implements the RequestHandlerBase for managing Seda protocol requests -contract RequestHandler is RequestHandlerBase { - // Mapping of request IDs to Request structs - mapping(bytes32 => SedaDataTypes.Request) public requests; - - /// @inheritdoc RequestHandlerBase - function postRequest( - SedaDataTypes.RequestInputs calldata inputs - ) public virtual override(RequestHandlerBase) returns (bytes32) { - if (inputs.replicationFactor == 0) { - revert InvalidReplicationFactor(); - } - - bytes32 requestId = SedaDataTypes.deriveRequestId(inputs); - if (bytes(requests[requestId].version).length != 0) { - revert RequestAlreadyExists(requestId); - } - - requests[requestId] = SedaDataTypes.Request({ - version: SedaDataTypes.VERSION, - execProgramId: inputs.execProgramId, - execInputs: inputs.execInputs, - execGasLimit: inputs.execGasLimit, - tallyProgramId: inputs.tallyProgramId, - tallyInputs: inputs.tallyInputs, - tallyGasLimit: inputs.tallyGasLimit, - replicationFactor: inputs.replicationFactor, - consensusFilter: inputs.consensusFilter, - gasPrice: inputs.gasPrice, - memo: inputs.memo - }); - - emit RequestPosted(requestId); - return requestId; - } - - /// @inheritdoc RequestHandlerBase - function getRequest( - bytes32 requestId - ) - external - view - override(RequestHandlerBase) - returns (SedaDataTypes.Request memory) - { - SedaDataTypes.Request memory request = requests[requestId]; - // Version field is always set - if (bytes(request.version).length == 0) { - revert RequestNotFound(requestId); - } - - return requests[requestId]; - } - - /// @notice Derives a request ID from the given inputs - /// @param inputs The request inputs - /// @return The derived request ID - function deriveRequestId( - SedaDataTypes.RequestInputs calldata inputs - ) public pure returns (bytes32) { - return SedaDataTypes.deriveRequestId(inputs); - } -} diff --git a/contracts/core/ResultHandler.sol b/contracts/core/ResultHandler.sol deleted file mode 100644 index 0e2d7da..0000000 --- a/contracts/core/ResultHandler.sol +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; - -import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; -import {ResultHandlerBase} from "../abstract/ResultHandlerBase.sol"; - -/// @title ResultHandler -/// @notice Implements the ResultHandlerBase for managing Seda protocol results -contract ResultHandler is ResultHandlerBase { - // Mapping of request IDs to Result structs - mapping(bytes32 => SedaDataTypes.Result) public results; - - /// @notice Initializes the ResultHandler contract - /// @dev Sets up the contract with the provided Seda prover address - /// @param sedaProverAddress The address of the Seda prover contract - constructor( - address sedaProverAddress - ) ResultHandlerBase(sedaProverAddress) {} - - /// @inheritdoc ResultHandlerBase - function postResult( - SedaDataTypes.Result calldata result, - uint64 batchHeight, - bytes32[] calldata proof - ) public virtual override(ResultHandlerBase) returns (bytes32) { - bytes32 resultId = SedaDataTypes.deriveResultId(result); - if (results[result.drId].drId != bytes32(0)) { - revert ResultAlreadyExists(resultId); - } - if (!sedaProver.verifyResultProof(resultId, batchHeight, proof)) { - revert InvalidResultProof(resultId); - } - - results[result.drId] = result; - - emit ResultPosted(resultId); - return resultId; - } - - /// @inheritdoc ResultHandlerBase - function getResult( - bytes32 requestId - ) - public - view - override(ResultHandlerBase) - returns (SedaDataTypes.Result memory) - { - SedaDataTypes.Result memory result = results[requestId]; - if (bytes(result.version).length == 0) { - revert ResultNotFound(requestId); - } - - return results[requestId]; - } - - /// @notice Verifies the result without storing it - /// @param result The result to verify - /// @param batchHeight The height of the batch the result belongs to - /// @param proof The proof associated with the result - /// @return A boolean indicating whether the result is valid - function verifyResult( - SedaDataTypes.Result calldata result, - uint64 batchHeight, - bytes32[] calldata proof - ) public view returns (bytes32) { - bytes32 resultId = SedaDataTypes.deriveResultId(result); - if (!sedaProver.verifyResultProof(resultId, batchHeight, proof)) { - revert InvalidResultProof(resultId); - } - - return resultId; - } - - /// @notice Derives a result ID from the given result - /// @param result The result data - /// @return The derived result ID - function deriveResultId( - SedaDataTypes.Result calldata result - ) public pure returns (bytes32) { - return SedaDataTypes.deriveResultId(result); - } -} diff --git a/contracts/core/Secp256k1Prover.sol b/contracts/core/Secp256k1Prover.sol deleted file mode 100644 index 8d0b696..0000000 --- a/contracts/core/Secp256k1Prover.sol +++ /dev/null @@ -1,145 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; - -import {ECDSA} from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; -import {MerkleProof} from "@openzeppelin/contracts/utils/cryptography/MerkleProof.sol"; -import {ProverBase} from "../abstract/ProverBase.sol"; -import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; - -/// @title Secp256k1Prover -/// @notice Implements the ProverBase for Secp256k1 signature verification in the Seda protocol -/// @dev This contract manages batch updates and result proof verification using Secp256k1 signatures. -/// Batch validity is determined by consensus among validators, requiring: -/// - Increasing batch and block heights -/// - Valid validator proofs and signatures -/// - Sufficient voting power to meet the consensus threshold -contract Secp256k1Prover is ProverBase { - error ConsensusNotReached(); - - // The percentage of voting power required for consensus (66.666666%, represented as parts per 100,000,000) - uint32 public constant CONSENSUS_PERCENTAGE = 66_666_666; - // Domain separator for Secp256k1 Merkle Tree leaves - bytes1 internal constant SECP256K1_DOMAIN_SEPARATOR = 0x01; - - // Mapping to store results roots by their batch height - mapping(uint64 => bytes32) public batchToResultsRoot; - - // The height of the last processed batch - uint64 public lastBatchHeight; - // The validator root of the last processed batch - bytes32 public lastValidatorsRoot; - - /// @notice Initializes the contract with a predefined batch size - /// @param initialBatch The initial batch data - constructor(SedaDataTypes.Batch memory initialBatch) { - batchToResultsRoot[initialBatch.batchHeight] = initialBatch.resultsRoot; - lastBatchHeight = initialBatch.batchHeight; - lastValidatorsRoot = initialBatch.validatorsRoot; - emit BatchPosted( - initialBatch.batchHeight, - SedaDataTypes.deriveBatchId(initialBatch) - ); - } - - /// @inheritdoc ProverBase - /// @notice Posts a new batch with new data, ensuring validity through consensus - /// @dev Validates a new batch by checking: - /// 1. Higher batch height than the current batch - /// 2. Matching number of signatures and validator proofs - /// 3. Valid validator proofs (verified against the batch's validator root) - /// 4. Valid signatures (signed by the corresponding validators) - /// 5. Sufficient voting power to meet or exceed the consensus threshold - /// @param newBatch The new batch data to be validated and set as current - /// @param signatures Array of signatures from validators approving the new batch - /// @param validatorProofs Array of validator proofs corresponding to the signatures - function postBatch( - SedaDataTypes.Batch calldata newBatch, - bytes[] calldata signatures, - SedaDataTypes.ValidatorProof[] calldata validatorProofs - ) public override { - // Check that new batch invariants hold - if (newBatch.batchHeight <= lastBatchHeight) { - revert InvalidBatchHeight(); - } - if (signatures.length != validatorProofs.length) { - revert MismatchedSignaturesAndProofs(); - } - - // Derive Batch Id - bytes32 batchId = SedaDataTypes.deriveBatchId(newBatch); - - // Check that all validator proofs are valid and accumulate voting power - uint64 votingPower = 0; - for (uint256 i = 0; i < validatorProofs.length; i++) { - if ( - !_verifyValidatorProof(validatorProofs[i], lastValidatorsRoot) - ) { - revert InvalidValidatorProof(); - } - if ( - !_verifySignature( - batchId, - signatures[i], - validatorProofs[i].signer - ) - ) { - revert InvalidSignature(); - } - votingPower += validatorProofs[i].votingPower; - } - - // Check voting power consensus - if (votingPower < CONSENSUS_PERCENTAGE) { - revert ConsensusNotReached(); - } - - // Update current batch - lastBatchHeight = newBatch.batchHeight; - lastValidatorsRoot = newBatch.validatorsRoot; - batchToResultsRoot[newBatch.batchHeight] = newBatch.resultsRoot; - emit BatchPosted(newBatch.batchHeight, batchId); - } - - /// @inheritdoc ProverBase - function verifyResultProof( - bytes32 resultId, - uint64 batchHeight, - bytes32[] calldata merkleProof - ) public view override returns (bool) { - bytes32 leaf = keccak256( - abi.encodePacked(RESULT_DOMAIN_SEPARATOR, resultId) - ); - return MerkleProof.verify(merkleProof, batchToResultsRoot[batchHeight], leaf); - } - - /// @notice Verifies a validator proof - /// @param proof The validator proof to verify - /// @return bool Returns true if the proof is valid, false otherwise - function _verifyValidatorProof( - SedaDataTypes.ValidatorProof memory proof, - bytes32 validatorsRoot - ) internal pure returns (bool) { - bytes32 leaf = keccak256( - abi.encodePacked( - SECP256K1_DOMAIN_SEPARATOR, - proof.signer, - proof.votingPower - ) - ); - - return MerkleProof.verify(proof.merkleProof, validatorsRoot, leaf); - } - - /// @notice Verifies a signature against a message hash and its address - /// @param messageHash The hash of the message that was signed - /// @param signature The signature to verify - /// @param signer The validator Secp256k1 address signer - /// @return bool Returns true if the signature is valid, false otherwise - function _verifySignature( - bytes32 messageHash, - bytes calldata signature, - address signer - ) internal pure returns (bool) { - return ECDSA.recover(messageHash, signature) == signer; - } -} diff --git a/contracts/core/SedaCorePermissioned.sol b/contracts/core/SedaCorePermissioned.sol index 9c1e4fd..df24297 100644 --- a/contracts/core/SedaCorePermissioned.sol +++ b/contracts/core/SedaCorePermissioned.sol @@ -1,35 +1,33 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; +pragma solidity ^0.8.24; import {AccessControl} from "@openzeppelin/contracts/access/AccessControl.sol"; import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; import {Pausable} from "@openzeppelin/contracts/utils/Pausable.sol"; import {IResultHandler} from "../interfaces/IResultHandler.sol"; -import {RequestHandlerBase} from "../abstract/RequestHandlerBase.sol"; +import {RequestHandlerBase} from "./abstract/RequestHandlerBase.sol"; import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; /// @title SedaCorePermissioned /// @notice Core contract for the Seda protocol with permissioned access, managing requests and results -/// @dev Implements RequestHandlerBase, IResultHandler, AccessControl, Pausable, and ReentrancyGuard functionalities -contract SedaCorePermissioned is - RequestHandlerBase, - IResultHandler, - AccessControl, - Pausable -{ +/// @dev Implements RequestHandlerBase, IResultHandler, AccessControl, and Pausable functionalities +contract SedaCorePermissioned is RequestHandlerBase, IResultHandler, AccessControl, Pausable { using EnumerableSet for EnumerableSet.Bytes32Set; - // Constants + // ============ Constants ============ + bytes32 public constant RELAYER_ROLE = keccak256("RELAYER_ROLE"); bytes32 public constant ADMIN_ROLE = keccak256("ADMIN_ROLE"); - // State variables + // ============ State Variables ============ + uint16 public maxReplicationFactor; - mapping(bytes32 => SedaDataTypes.Request) public requests; mapping(bytes32 => SedaDataTypes.Result) public results; EnumerableSet.Bytes32Set private pendingRequests; + // ============ Constructor ============ + /// @notice Contract constructor /// @param relayers The initial list of relayer addresses to be granted the RELAYER_ROLE /// @param initialMaxReplicationFactor The initial maximum replication factor @@ -47,57 +45,19 @@ contract SedaCorePermissioned is maxReplicationFactor = initialMaxReplicationFactor; } + // ============ External Functions ============ + /// @notice Sets the maximum replication factor that can be used for requests /// @param newMaxReplicationFactor The new maximum replication factor - function setMaxReplicationFactor( - uint16 newMaxReplicationFactor - ) external onlyRole(ADMIN_ROLE) { + function setMaxReplicationFactor(uint16 newMaxReplicationFactor) external onlyRole(ADMIN_ROLE) { maxReplicationFactor = newMaxReplicationFactor; } - /// @notice Posts a new request - /// @param inputs The request inputs - /// @return requestId The ID of the posted request - function postRequest( - SedaDataTypes.RequestInputs calldata inputs - ) external override whenNotPaused returns (bytes32) { - uint16 replicationFactor = inputs.replicationFactor; - if ( - replicationFactor > maxReplicationFactor || replicationFactor == 0 - ) { - revert InvalidReplicationFactor(); - } - - bytes32 requestId = SedaDataTypes.deriveRequestId(inputs); - if (bytes(requests[requestId].version).length != 0) { - revert RequestAlreadyExists(requestId); - } - - requests[requestId] = SedaDataTypes.Request({ - version: SedaDataTypes.VERSION, - execProgramId: inputs.execProgramId, - execInputs: inputs.execInputs, - execGasLimit: inputs.execGasLimit, - tallyProgramId: inputs.tallyProgramId, - tallyInputs: inputs.tallyInputs, - tallyGasLimit: inputs.tallyGasLimit, - replicationFactor: inputs.replicationFactor, - consensusFilter: inputs.consensusFilter, - gasPrice: inputs.gasPrice, - memo: inputs.memo - }); - - _addPendingRequest(requestId); - - emit RequestPosted(requestId); - return requestId; - } - /// @notice Posts a result for a request /// @param result The result data function postResult( SedaDataTypes.Result calldata result, - uint64 , + uint64, bytes32[] calldata ) external override onlyRole(RELAYER_ROLE) whenNotPaused returns (bytes32) { bytes32 resultId = SedaDataTypes.deriveResultId(result); @@ -110,26 +70,10 @@ contract SedaCorePermissioned is return resultId; } - /// @notice Retrieves a stored request - /// @param requestId The ID of the request to retrieve - /// @return The requested data - function getRequest( - bytes32 requestId - ) external view override returns (SedaDataTypes.Request memory) { - SedaDataTypes.Request memory request = requests[requestId]; - if (bytes(request.version).length == 0) { - revert RequestNotFound(requestId); - } - - return requests[requestId]; - } - /// @notice Retrieves a result by its ID /// @param requestId The unique identifier of the result /// @return The result data associated with the given ID - function getResult( - bytes32 requestId - ) external view override returns (SedaDataTypes.Result memory) { + function getResult(bytes32 requestId) external view override returns (SedaDataTypes.Result memory) { if (results[requestId].drId == bytes32(0)) { revert ResultNotFound(requestId); } @@ -137,33 +81,50 @@ contract SedaCorePermissioned is return results[requestId]; } + // ============ Public Functions ============ + + /// @notice Posts a new request + /// @param inputs The request inputs + /// @return requestId The ID of the posted request + function postRequest( + SedaDataTypes.RequestInputs calldata inputs + ) public override(RequestHandlerBase) whenNotPaused returns (bytes32) { + // Check max replication factor first + if (inputs.replicationFactor > maxReplicationFactor) { + revert InvalidReplicationFactor(); + } + + // Call parent implementation which handles the rest + bytes32 requestId = super.postRequest(inputs); + + // Add to pending requests (unique to this implementation) + _addPendingRequest(requestId); + + return requestId; + } + /// @notice Retrieves a list of pending request IDs /// @param offset The starting index in the pendingRequests set /// @param limit The maximum number of request IDs to return /// @return An array of pending request IDs - function getPendingRequests( - uint256 offset, - uint256 limit - ) public view returns (SedaDataTypes.Request[] memory) { + function getPendingRequests(uint256 offset, uint256 limit) public view returns (SedaDataTypes.Request[] memory) { uint256 totalRequests = pendingRequests.length(); if (offset >= totalRequests) { return new SedaDataTypes.Request[](0); } - uint256 actualLimit = (offset + limit > totalRequests) - ? totalRequests - offset - : limit; - SedaDataTypes.Request[] memory queriedPendingRequests = new SedaDataTypes.Request[]( - actualLimit - ); + uint256 actualLimit = (offset + limit > totalRequests) ? totalRequests - offset : limit; + SedaDataTypes.Request[] memory queriedPendingRequests = new SedaDataTypes.Request[](actualLimit); for (uint256 i = 0; i < actualLimit; i++) { - bytes32 requestId = pendingRequests.at(offset + i); // Get request ID - queriedPendingRequests[i] = requests[requestId]; + bytes32 requestId = pendingRequests.at(offset + i); + queriedPendingRequests[i] = getRequest(requestId); } return queriedPendingRequests; } + // ============ Admin Functions ============ + /// @notice Adds a relayer /// @param account The address of the relayer to add function addRelayer(address account) external onlyRole(ADMIN_ROLE) { @@ -186,6 +147,8 @@ contract SedaCorePermissioned is _unpause(); } + // ============ Internal Functions ============ + /// @notice Adds a request ID to the pendingRequests set /// @param requestId The ID of the request to add function _addPendingRequest(bytes32 requestId) internal { diff --git a/contracts/core/SedaCoreV1.sol b/contracts/core/SedaCoreV1.sol index a908027..4841683 100644 --- a/contracts/core/SedaCoreV1.sol +++ b/contracts/core/SedaCoreV1.sol @@ -1,88 +1,144 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; +pragma solidity ^0.8.24; import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; +import {UUPSUpgradeable} from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; + +import {IRequestHandler} from "../interfaces/IRequestHandler.sol"; +import {IResultHandler} from "../interfaces/IResultHandler.sol"; +import {ISedaCore} from "../interfaces/ISedaCore.sol"; +import {RequestHandlerBase} from "./abstract/RequestHandlerBase.sol"; +import {ResultHandlerBase} from "./abstract/ResultHandlerBase.sol"; import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; -import {ResultHandler} from "./ResultHandler.sol"; -import {RequestHandler} from "./RequestHandler.sol"; /// @title SedaCoreV1 /// @notice Core contract for the Seda protocol, managing requests and results /// @dev Implements ResultHandler and RequestHandler functionalities, and manages active requests -contract SedaCoreV1 is RequestHandler, ResultHandler { +contract SedaCoreV1 is ISedaCore, RequestHandlerBase, ResultHandlerBase, UUPSUpgradeable, OwnableUpgradeable { using EnumerableSet for EnumerableSet.Bytes32Set; - // Enumerable Set to store the request IDs that are pending - // `pendingRequests` keeps track of all active data requests that have been posted but not yet fulfilled. - // This set is used to manage the lifecycle of requests, allowing easy retrieval and status tracking. - // When a request is posted, it is added to `pendingRequests`. - // When a result is posted and the request is fulfilled, it is removed from `pendingRequests` - EnumerableSet.Bytes32Set private pendingRequests; + // ============ Constants ============ - /// @notice Initializes the SedaCoreV1 contract - /// @param sedaProverAddress The address of the Seda prover contract - constructor(address sedaProverAddress) ResultHandler(sedaProverAddress) {} + // Constant storage slot for the state following the ERC-7201 standard + bytes32 private constant CORE_V1_STORAGE_SLOT = + keccak256(abi.encode(uint256(keccak256("sedacore.storage.v1")) - 1)) & ~bytes32(uint256(0xff)); - /// @notice Retrieves a list of active requests - /// @dev This function is gas-intensive due to iteration over the pendingRequests array. - /// Users should be cautious when using high `limit` values in production environments, as it can result in high gas consumption. - /// @param offset The starting index in the pendingRequests array - /// @param limit The maximum number of requests to return - /// @return An array of SedaDataTypes.Request structs - function getPendingRequests( - uint256 offset, - uint256 limit - ) public view returns (SedaDataTypes.Request[] memory) { - uint256 totalRequests = pendingRequests.length(); - if (offset >= totalRequests) { - return new SedaDataTypes.Request[](0); - } + // ============ Errors ============ - uint256 actualLimit = (offset + limit > totalRequests) - ? totalRequests - offset - : limit; - SedaDataTypes.Request[] - memory queriedPendingRequests = new SedaDataTypes.Request[]( - actualLimit - ); - for (uint256 i = 0; i < actualLimit; i++) { - bytes32 requestId = pendingRequests.at(offset + i); - queriedPendingRequests[i] = requests[requestId]; - } + // Error thrown when a result is posted with a timestamp before the corresponding request + error InvalidResultTimestamp(bytes32 drId, uint256 resultTimestamp, uint256 requestTimestamp); - return queriedPendingRequests; + // ============ Storage ============ + + /// @custom:storage-location erc7201:sedacore.storage.v1 + struct SedaCoreStorage { + // Enumerable Set to store the request IDs that are pending + // `pendingRequests` keeps track of all active data requests that have been posted but not yet fulfilled. + // This set is used to manage the lifecycle of requests, allowing easy retrieval and status tracking. + // When a request is posted, it is added to `pendingRequests`. + // When a result is posted and the request is fulfilled, it is removed from `pendingRequests` + EnumerableSet.Bytes32Set pendingRequests; + // Mapping to store request timestamps for pending DRs + mapping(bytes32 => uint256) requestTimestamps; + } + + // ============ Constructor & Initializer ============ + + /// @custom:oz-upgrades-unsafe-allow constructor + constructor() { + _disableInitializers(); } - /// @inheritdoc RequestHandler - /// @dev Overrides the base implementation to also add the request ID to the pendingRequests array + /// @notice Initializes the SedaCoreV1 contract + /// @param sedaProverAddress The address of the Seda prover contract + /// @dev This function replaces the constructor for proxy compatibility and can only be called once + function initialize(address sedaProverAddress) public initializer { + __ResultHandler_init(sedaProverAddress); + __Ownable_init(msg.sender); + __UUPSUpgradeable_init(); + } + + // ============ External Functions ============ + + /// @inheritdoc RequestHandlerBase + /// @dev Overrides the base implementation to also add the request ID and timestamp to storage function postRequest( SedaDataTypes.RequestInputs calldata inputs - ) public override(RequestHandler) returns (bytes32) { + ) public override(RequestHandlerBase, IRequestHandler) returns (bytes32) { bytes32 requestId = super.postRequest(inputs); _addRequest(requestId); + // Store the request timestamp + _storageV1().requestTimestamps[requestId] = block.timestamp; return requestId; } - /// @inheritdoc ResultHandler - /// @dev Overrides the base implementation to also remove the request ID from the pendingRequests array if it exists + /// @inheritdoc ResultHandlerBase + /// @dev Overrides the base implementation to validate result timestamp and clean up storage function postResult( SedaDataTypes.Result calldata result, uint64 batchHeight, bytes32[] calldata proof - ) public override(ResultHandler) returns (bytes32) { + ) public override(ResultHandlerBase, IResultHandler) returns (bytes32) { + uint256 requestTimestamp = _storageV1().requestTimestamps[result.drId]; + // Validate result timestamp comes after request timestamp + // Note: requestTimestamp = 0 for requests not tracked by this contract (always passes validation) + if (result.blockTimestamp <= requestTimestamp) { + revert InvalidResultTimestamp(result.drId, result.blockTimestamp, requestTimestamp); + } + bytes32 resultId = super.postResult(result, batchHeight, proof); + _removeRequest(result.drId); + delete _storageV1().requestTimestamps[result.drId]; return resultId; } + // ============ Public View Functions ============ + + /// @notice Retrieves a list of active requests + /// @dev This function is gas-intensive due to iteration over the pendingRequests array. + /// Users should be cautious when using high `limit` values in production environments, as it can result in high gas consumption. + /// @param offset The starting index in the pendingRequests array + /// @param limit The maximum number of requests to return + /// @return An array of SedaDataTypes.Request structs + function getPendingRequests(uint256 offset, uint256 limit) public view returns (SedaDataTypes.Request[] memory) { + uint256 totalRequests = _storageV1().pendingRequests.length(); + if (offset >= totalRequests) { + return new SedaDataTypes.Request[](0); + } + + uint256 actualLimit = (offset + limit > totalRequests) ? totalRequests - offset : limit; + SedaDataTypes.Request[] memory queriedPendingRequests = new SedaDataTypes.Request[](actualLimit); + for (uint256 i = 0; i < actualLimit; i++) { + bytes32 requestId = _storageV1().pendingRequests.at(offset + i); + queriedPendingRequests[i] = getRequest(requestId); + } + + return queriedPendingRequests; + } + + // ============ Internal Functions ============ + + /// @notice Returns the storage struct for the contract + /// @dev Uses ERC-7201 storage pattern to access the storage struct at a specific slot + /// @return s The storage struct containing the contract's state variables + function _storageV1() internal pure returns (SedaCoreStorage storage s) { + bytes32 slot = CORE_V1_STORAGE_SLOT; + // solhint-disable-next-line no-inline-assembly + assembly { + s.slot := slot + } + } + /// @notice Adds a request ID to the pendingRequests set /// @dev This function is internal to ensure that only the contract's internal logic can add requests, /// preventing unauthorized additions and maintaining proper state management. /// @param requestId The ID of the request to add function _addRequest(bytes32 requestId) internal { - pendingRequests.add(requestId); + _storageV1().pendingRequests.add(requestId); } /// @notice Removes a request ID from the pendingRequests set if it exists @@ -90,6 +146,18 @@ contract SedaCoreV1 is RequestHandler, ResultHandler { /// maintaining proper state transitions and preventing unauthorized removals. /// @param requestId The ID of the request to remove function _removeRequest(bytes32 requestId) internal { - pendingRequests.remove(requestId); + _storageV1().pendingRequests.remove(requestId); } + + /// @dev Required override for UUPSUpgradeable. Ensures only the owner can upgrade the implementation. + /// @inheritdoc UUPSUpgradeable + /// @param newImplementation Address of the new implementation contract + function _authorizeUpgrade( + address newImplementation + ) + internal + virtual + override + onlyOwner // solhint-disable-next-line no-empty-blocks + {} } diff --git a/contracts/core/abstract/RequestHandlerBase.sol b/contracts/core/abstract/RequestHandlerBase.sol new file mode 100644 index 0000000..5bd1adc --- /dev/null +++ b/contracts/core/abstract/RequestHandlerBase.sol @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import {SedaDataTypes} from "../../libraries/SedaDataTypes.sol"; +import {IRequestHandler} from "../../interfaces/IRequestHandler.sol"; + +/// @title RequestHandler +/// @notice Implements the RequestHandlerBase for managing Seda protocol requests +abstract contract RequestHandlerBase is IRequestHandler { + // ============ Constants ============ + + // Define a unique storage slot for RequestHandlerBase + bytes32 private constant REQUEST_HANDLER_STORAGE_SLOT = + keccak256(abi.encode(uint256(keccak256("seda.requesthandler.storage")) - 1)) & ~bytes32(uint256(0xff)); + + // ============ Storage ============ + + /// @custom:storage-location erc7201:seda.requesthandler.storage + struct RequestHandlerStorage { + // Mapping of request IDs to Request structs + mapping(bytes32 => SedaDataTypes.Request) requests; + } + + // ============ External Functions ============ + + /// @inheritdoc IRequestHandler + function postRequest( + SedaDataTypes.RequestInputs calldata inputs + ) public virtual override(IRequestHandler) returns (bytes32) { + if (inputs.replicationFactor == 0) { + revert InvalidReplicationFactor(); + } + + bytes32 requestId = SedaDataTypes.deriveRequestId(inputs); + if (bytes(_requestHandlerStorage().requests[requestId].version).length != 0) { + revert RequestAlreadyExists(requestId); + } + + _requestHandlerStorage().requests[requestId] = SedaDataTypes.Request({ + version: SedaDataTypes.VERSION, + execProgramId: inputs.execProgramId, + execInputs: inputs.execInputs, + execGasLimit: inputs.execGasLimit, + tallyProgramId: inputs.tallyProgramId, + tallyInputs: inputs.tallyInputs, + tallyGasLimit: inputs.tallyGasLimit, + replicationFactor: inputs.replicationFactor, + consensusFilter: inputs.consensusFilter, + gasPrice: inputs.gasPrice, + memo: inputs.memo + }); + + emit RequestPosted(requestId); + return requestId; + } + + /// @inheritdoc IRequestHandler + function getRequest( + bytes32 requestId + ) public view virtual override(IRequestHandler) returns (SedaDataTypes.Request memory) { + SedaDataTypes.Request memory request = _requestHandlerStorage().requests[requestId]; + // Version field is always set + if (bytes(request.version).length == 0) { + revert RequestNotFound(requestId); + } + + return _requestHandlerStorage().requests[requestId]; + } + + /// @notice Derives a request ID from the given inputs + /// @param inputs The request inputs + /// @return The derived request ID + function deriveRequestId(SedaDataTypes.RequestInputs calldata inputs) public pure returns (bytes32) { + return SedaDataTypes.deriveRequestId(inputs); + } + + // ============ Internal Functions ============ + + /// @notice Returns the storage struct for the contract + /// @dev Uses ERC-7201 storage pattern to access the storage struct at a specific slot + /// @return s The storage struct containing the contract's state variables + function _requestHandlerStorage() internal pure returns (RequestHandlerStorage storage s) { + bytes32 slot = REQUEST_HANDLER_STORAGE_SLOT; + // solhint-disable-next-line no-inline-assembly + assembly { + s.slot := slot + } + } +} diff --git a/contracts/core/abstract/ResultHandlerBase.sol b/contracts/core/abstract/ResultHandlerBase.sol new file mode 100644 index 0000000..f9e9c5e --- /dev/null +++ b/contracts/core/abstract/ResultHandlerBase.sol @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; + +import {SedaDataTypes} from "../../libraries/SedaDataTypes.sol"; +import {IProver} from "../../interfaces/IProver.sol"; +import {IResultHandler} from "../../interfaces/IResultHandler.sol"; + +/// @title ResultHandler +/// @notice Implements the ResultHandlerBase for managing Seda protocol results +abstract contract ResultHandlerBase is IResultHandler, Initializable { + // ============ Constants ============ + + // Define a unique storage slot for ResultHandlerBase + bytes32 private constant RESULT_HANDLER_STORAGE_SLOT = + keccak256(abi.encode(uint256(keccak256("seda.resulthandler.storage")) - 1)) & ~bytes32(uint256(0xff)); + + // ============ Storage ============ + + /// @custom:storage-location erc7201:seda.resulthandler.storage + struct ResultHandlerStorage { + IProver sedaProver; + // Mapping of request IDs to Result structs + mapping(bytes32 => SedaDataTypes.Result) results; + } + + // ============ Constructor & Initializer ============ + + /// @custom:oz-upgrades-unsafe-allow constructor + constructor() { + _disableInitializers(); + } + + /// @notice Initializes the ResultHandler contract + /// @dev Sets up the contract with the provided Seda prover address + /// @param sedaProverAddress The address of the Seda prover contract + // solhint-disable-next-line func-name-mixedcase + function __ResultHandler_init(address sedaProverAddress) internal onlyInitializing { + _resultHandlerStorage().sedaProver = IProver(sedaProverAddress); + } + + // ============ External Functions ============ + + /// @inheritdoc IResultHandler + function postResult( + SedaDataTypes.Result calldata result, + uint64 batchHeight, + bytes32[] calldata proof + ) public virtual override(IResultHandler) returns (bytes32) { + bytes32 resultId = SedaDataTypes.deriveResultId(result); + if (_resultHandlerStorage().results[result.drId].drId != bytes32(0)) { + revert ResultAlreadyExists(resultId); + } + if (!_resultHandlerStorage().sedaProver.verifyResultProof(resultId, batchHeight, proof)) { + revert InvalidResultProof(resultId); + } + + _resultHandlerStorage().results[result.drId] = result; + + emit ResultPosted(resultId); + return resultId; + } + + // ============ Public View Functions ============ + + /// @inheritdoc IResultHandler + function getResult(bytes32 requestId) public view override(IResultHandler) returns (SedaDataTypes.Result memory) { + SedaDataTypes.Result memory result = _resultHandlerStorage().results[requestId]; + if (bytes(result.version).length == 0) { + revert ResultNotFound(requestId); + } + return _resultHandlerStorage().results[requestId]; + } + + /// @notice Returns the address of the Seda prover contract + /// @return The address of the Seda prover contract + function getSedaProver() public view returns (address) { + return address(_resultHandlerStorage().sedaProver); + } + + /// @notice Verifies the result without storing it + /// @param result The result to verify + /// @param batchHeight The height of the batch the result belongs to + /// @param proof The proof associated with the result + /// @return A boolean indicating whether the result is valid + function verifyResult( + SedaDataTypes.Result calldata result, + uint64 batchHeight, + bytes32[] calldata proof + ) public view returns (bytes32) { + bytes32 resultId = SedaDataTypes.deriveResultId(result); + if (!_resultHandlerStorage().sedaProver.verifyResultProof(resultId, batchHeight, proof)) { + revert InvalidResultProof(resultId); + } + + return resultId; + } + + /// @notice Derives a result ID from the given result + /// @param result The result data + /// @return The derived result ID + function deriveResultId(SedaDataTypes.Result calldata result) public pure returns (bytes32) { + return SedaDataTypes.deriveResultId(result); + } + + // ============ Internal Functions ============ + + /// @notice Returns the storage struct for the contract + /// @dev Uses ERC-7201 storage pattern to access the storage struct at a specific slot + /// @return s The storage struct containing the contract's state variables + function _resultHandlerStorage() private pure returns (ResultHandlerStorage storage s) { + bytes32 slot = RESULT_HANDLER_STORAGE_SLOT; + // solhint-disable-next-line no-inline-assembly + assembly { + s.slot := slot + } + } +} diff --git a/contracts/interfaces/IProver.sol b/contracts/interfaces/IProver.sol index 9db48db..6adb8e7 100644 --- a/contracts/interfaces/IProver.sol +++ b/contracts/interfaces/IProver.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; +pragma solidity ^0.8.24; import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; @@ -8,6 +8,10 @@ import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; interface IProver { event BatchPosted(uint256 indexed batchHeight, bytes32 batchHash); + /// @notice Gets the height of the most recently posted batch + /// @return uint64 The height of the last batch, 0 if no batches exist + function getLastBatchHeight() external view returns (uint64); + /// @notice Posts a new batch with new data and validator proofs /// @param newBatch The new batch data to be posted /// @param signatures Array of signatures validating the new batch diff --git a/contracts/interfaces/IRequestHandler.sol b/contracts/interfaces/IRequestHandler.sol index e57c996..f75afff 100644 --- a/contracts/interfaces/IRequestHandler.sol +++ b/contracts/interfaces/IRequestHandler.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; +pragma solidity ^0.8.24; import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; @@ -12,17 +12,13 @@ interface IRequestHandler { event RequestPosted(bytes32 indexed requestId); - /// @notice Allows users to post a new data request. - /// @param inputs The input parameters for the data request. - /// @return requestId The unique identifier for the posted request. - function postRequest( - SedaDataTypes.RequestInputs calldata inputs - ) external returns (bytes32); - /// @notice Retrieves a stored data request by its unique identifier. /// @param id The unique identifier of the request to retrieve. /// @return request The details of the requested data. - function getRequest( - bytes32 id - ) external view returns (SedaDataTypes.Request memory); + function getRequest(bytes32 id) external view returns (SedaDataTypes.Request memory); + + /// @notice Allows users to post a new data request. + /// @param inputs The input parameters for the data request. + /// @return requestId The unique identifier for the posted request. + function postRequest(SedaDataTypes.RequestInputs calldata inputs) external returns (bytes32); } diff --git a/contracts/interfaces/IResultHandler.sol b/contracts/interfaces/IResultHandler.sol index 7bddd0c..0bb7a7f 100644 --- a/contracts/interfaces/IResultHandler.sol +++ b/contracts/interfaces/IResultHandler.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; +pragma solidity ^0.8.24; import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; @@ -12,20 +12,19 @@ interface IResultHandler { event ResultPosted(bytes32 indexed resultId); + /// @notice Retrieves a result by its ID + /// @param requestId The unique identifier of the request + /// @return The result data associated with the given ID + function getResult(bytes32 requestId) external view returns (SedaDataTypes.Result memory); + /// @notice Posts a new result with a proof /// @param inputs The result data to be posted /// @param batchHeight The height of the batch the result belongs to /// @param proof The proof associated with the result + /// @return resultId The unique identifier of the posted result function postResult( SedaDataTypes.Result calldata inputs, uint64 batchHeight, bytes32[] memory proof ) external returns (bytes32); - - /// @notice Retrieves a result by its ID - /// @param requestId The unique identifier of the request - /// @return The result data associated with the given ID - function getResult( - bytes32 requestId - ) external view returns (SedaDataTypes.Result memory); } diff --git a/contracts/interfaces/ISedaCore.sol b/contracts/interfaces/ISedaCore.sol new file mode 100644 index 0000000..f6c1c18 --- /dev/null +++ b/contracts/interfaces/ISedaCore.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import {IResultHandler} from "./IResultHandler.sol"; +import {IRequestHandler} from "./IRequestHandler.sol"; +import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; + +/// @title ISedaCoreV1 +/// @notice Interface for the main Seda protocol contract that handles both requests and results +interface ISedaCore is IResultHandler, IRequestHandler { + /// @notice Retrieves a paginated list of pending requests + /// @param offset The starting position in the list + /// @param limit The maximum number of requests to return + /// @return An array of Request structs + function getPendingRequests(uint256 offset, uint256 limit) external view returns (SedaDataTypes.Request[] memory); +} diff --git a/contracts/libraries/SedaDataTypes.sol b/contracts/libraries/SedaDataTypes.sol index f9ac0b5..f8731f9 100644 --- a/contracts/libraries/SedaDataTypes.sol +++ b/contracts/libraries/SedaDataTypes.sol @@ -1,12 +1,12 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; +pragma solidity ^0.8.24; // TODO: Rearrange struct fields to minimize storage gaps and optimize packing /// @title SedaDataTypes Library /// @notice Contains data structures and utility functions for the SEDA protocol library SedaDataTypes { - string public constant VERSION = "0.0.1"; + string internal constant VERSION = "0.0.1"; /// @notice Input parameters for creating a data request struct RequestInputs { @@ -105,7 +105,7 @@ library SedaDataTypes { /// @notice Derives a unique batch ID from a Batch struct /// @param batch The Batch struct to derive the ID from /// @return The derived batch ID - function deriveBatchId(Batch calldata batch) public pure returns (bytes32) { + function deriveBatchId(Batch memory batch) internal pure returns (bytes32) { return keccak256( bytes.concat( @@ -121,9 +121,7 @@ library SedaDataTypes { /// @notice Derives a unique result ID from a Result struct /// @param result The Result struct to derive the ID from /// @return The derived result ID - function deriveResultId( - SedaDataTypes.Result calldata result - ) public pure returns (bytes32) { + function deriveResultId(Result memory result) internal pure returns (bytes32) { return keccak256( bytes.concat( @@ -144,9 +142,7 @@ library SedaDataTypes { /// @notice Derives a unique request ID from RequestInputs /// @param inputs The RequestInputs struct to derive the ID from /// @return The derived request ID - function deriveRequestId( - SedaDataTypes.RequestInputs calldata inputs - ) public pure returns (bytes32) { + function deriveRequestId(RequestInputs memory inputs) internal pure returns (bytes32) { return keccak256( bytes.concat( diff --git a/contracts/mocks/MockSecp256k1ProverV2.sol b/contracts/mocks/MockSecp256k1ProverV2.sol new file mode 100644 index 0000000..3b98574 --- /dev/null +++ b/contracts/mocks/MockSecp256k1ProverV2.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import {Secp256k1ProverV1} from "../provers/Secp256k1ProverV1.sol"; + +/// @title MockSecp256k1ProverV2 +/// @notice Mock version of Secp256k1Prover for testing purposes +/// @dev This contract is a mock and should not be used in production +contract MockSecp256k1ProverV2 is Secp256k1ProverV1 { + // ============ Constants ============ + bytes32 private constant PROVER_V2_STORAGE_SLOT = + keccak256(abi.encode(uint256(keccak256("secp256k1prover.storage.v2")) - 1)) & ~bytes32(uint256(0xff)); + + // ============ Errors ============ + error ContractNotUpgradeable(); + + // ============ Storage ============ + /// @custom:storage-location secp256k1prover.storage.v2 + struct V2Storage { + string version; + } + + // ============ Constructor & Initializer ============ + /// @custom:oz-upgrades-unsafe-allow constructor + constructor() { + _disableInitializers(); + } + + function initialize() external reinitializer(2) onlyOwner { + V2Storage storage s = _storageV2(); + s.version = "2.0.0"; + } + + // ============ External Functions ============ + /// @notice Returns the version string from V2 storage + /// @return version The version string + function getVersion() external view returns (string memory) { + return _storageV2().version; + } + + // ============ Internal Functions ============ + function _storageV2() internal pure returns (V2Storage storage s) { + bytes32 slot = PROVER_V2_STORAGE_SLOT; + // solhint-disable-next-line no-inline-assembly + assembly { + s.slot := slot + } + } + + // /// @dev Override the _authorizeUpgrade function + // function _authorizeUpgrade(address) internal virtual override onlyOwner { + // revert ContractNotUpgradeable(); + // } +} diff --git a/contracts/mocks/MockSedaCoreV2.sol b/contracts/mocks/MockSedaCoreV2.sol new file mode 100644 index 0000000..6432f98 --- /dev/null +++ b/contracts/mocks/MockSedaCoreV2.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import {SedaCoreV1} from "../core/SedaCoreV1.sol"; + +/// @title MockSedaCoreV2 +/// @notice Mock version of SedaCore for testing purposes +/// @dev This contract is a mock and should not be used in production +contract MockSedaCoreV2 is SedaCoreV1 { + // ============ Constants ============ + bytes32 private constant CORE_V2_STORAGE_SLOT = + keccak256(abi.encode(uint256(keccak256("sedacore.storage.v2")) - 1)) & ~bytes32(uint256(0xff)); + + // ============ Errors ============ + error ContractNotUpgradeable(); + + // ============ Storage ============ + /// @custom:storage-location sedacore.storage.v2 + struct V2Storage { + string version; + } + + // ============ Constructor & Initializer ============ + /// @custom:oz-upgrades-unsafe-allow constructor + constructor() { + _disableInitializers(); + } + + function initialize() external reinitializer(2) onlyOwner { + V2Storage storage s = _storageV2(); + s.version = "2.0.0"; + } + + // ============ External Functions ============ + /// @notice Returns the version string from V2 storage + /// @return version The version string + function getVersion() external view returns (string memory) { + return _storageV2().version; + } + + // ============ Internal Functions ============ + function _storageV2() internal pure returns (V2Storage storage s) { + bytes32 slot = CORE_V2_STORAGE_SLOT; + // solhint-disable-next-line no-inline-assembly + assembly { + s.slot := slot + } + } + + // /// @dev Override the _authorizeUpgrade function + // function _authorizeUpgrade(address) internal virtual override onlyOwner { + // revert ContractNotUpgradeable(); + // } +} diff --git a/contracts/provers/Secp256k1ProverV1.sol b/contracts/provers/Secp256k1ProverV1.sol new file mode 100644 index 0000000..91bb065 --- /dev/null +++ b/contracts/provers/Secp256k1ProverV1.sol @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.24; + +import {ECDSA} from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; +import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; +import {MerkleProof} from "@openzeppelin/contracts/utils/cryptography/MerkleProof.sol"; +import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; +import {UUPSUpgradeable} from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; + +import {ProverBase} from "./abstract/ProverBase.sol"; +import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; + +/// @title Secp256k1ProverV1 +/// @notice Implements the ProverBase for Secp256k1 signature verification in the Seda protocol +/// @dev This contract manages batch updates and result proof verification using Secp256k1 signatures. +/// Batch validity is determined by consensus among validators, requiring: +/// - Increasing batch and block heights +/// - Valid validator proofs and signatures +/// - Sufficient voting power to meet the consensus threshold +contract Secp256k1ProverV1 is ProverBase, Initializable, UUPSUpgradeable, OwnableUpgradeable { + // ============ Constants ============ + + // The percentage of voting power required for consensus (66.666666%, represented as parts per 100,000,000) + uint32 public constant CONSENSUS_PERCENTAGE = 66_666_666; + // Domain separator for Secp256k1 Merkle Tree leaves + bytes1 internal constant SECP256K1_DOMAIN_SEPARATOR = 0x01; + // Constant storage slot for the state following the ERC-7201 standard + bytes32 private constant PROVER_V1_STORAGE_SLOT = + keccak256(abi.encode(uint256(keccak256("secp256k1prover.storage.v1")) - 1)) & ~bytes32(uint256(0xff)); + + // ============ Errors ============ + error ConsensusNotReached(); + + // ============ Storage ============ + + /// @custom:storage-location secp256k1prover.storage.v1 + struct Secp256k1ProverStorage { + uint64 lastBatchHeight; + bytes32 lastValidatorsRoot; + mapping(uint64 => bytes32) batchToResultsRoot; + } + + // ============ Constructor & Initializer ============ + + /// @custom:oz-upgrades-unsafe-allow constructor + constructor() { + _disableInitializers(); + } + + /// @notice Initializes the contract with initial batch data + /// @dev Sets up the contract's initial state and initializes inherited contracts + /// @param initialBatch The initial batch data containing height, validators root, and results root + function initialize(SedaDataTypes.Batch memory initialBatch) public initializer { + // Initialize inherited contracts + __Ownable_init(msg.sender); + __UUPSUpgradeable_init(); + + // Existing initialization code + Secp256k1ProverStorage storage s = _storageV1(); + s.batchToResultsRoot[initialBatch.batchHeight] = initialBatch.resultsRoot; + s.lastBatchHeight = initialBatch.batchHeight; + s.lastValidatorsRoot = initialBatch.validatorsRoot; + emit BatchPosted(initialBatch.batchHeight, SedaDataTypes.deriveBatchId(initialBatch)); + } + + // ============ External Functions ============ + + /// @inheritdoc ProverBase + /// @notice Posts a new batch with new data, ensuring validity through consensus + /// @dev Validates a new batch by checking: + /// 1. Higher batch height than the current batch + /// 2. Matching number of signatures and validator proofs + /// 3. Valid validator proofs (verified against the batch's validator root) + /// 4. Valid signatures (signed by the corresponding validators) + /// 5. Sufficient voting power to meet or exceed the consensus threshold + /// @param newBatch The new batch data to be validated and set as current + /// @param signatures Array of signatures from validators approving the new batch + /// @param validatorProofs Array of validator proofs corresponding to the signatures + function postBatch( + SedaDataTypes.Batch calldata newBatch, + bytes[] calldata signatures, + SedaDataTypes.ValidatorProof[] calldata validatorProofs + ) public override { + Secp256k1ProverStorage storage s = _storageV1(); + // Check that new batch invariants hold + if (newBatch.batchHeight <= s.lastBatchHeight) { + revert InvalidBatchHeight(); + } + if (signatures.length != validatorProofs.length) { + revert MismatchedSignaturesAndProofs(); + } + + // Derive Batch Id + bytes32 batchId = SedaDataTypes.deriveBatchId(newBatch); + + // Check that all validator proofs are valid and accumulate voting power + uint64 votingPower = 0; + for (uint256 i = 0; i < validatorProofs.length; i++) { + if (!_verifyValidatorProof(validatorProofs[i], s.lastValidatorsRoot)) { + revert InvalidValidatorProof(); + } + if (!_verifySignature(batchId, signatures[i], validatorProofs[i].signer)) { + revert InvalidSignature(); + } + votingPower += validatorProofs[i].votingPower; + } + + // Check voting power consensus + if (votingPower < CONSENSUS_PERCENTAGE) { + revert ConsensusNotReached(); + } + + // Update current batch + s.lastBatchHeight = newBatch.batchHeight; + s.lastValidatorsRoot = newBatch.validatorsRoot; + s.batchToResultsRoot[newBatch.batchHeight] = newBatch.resultsRoot; + emit BatchPosted(newBatch.batchHeight, batchId); + } + + // ============ Public View Functions ============ + + /// @notice Verifies a result proof against a batch's results root + /// @param resultId The ID of the result to verify + /// @param batchHeight The height of the batch containing the result + /// @param merkleProof The Merkle proof for the result + /// @return bool Returns true if the proof is valid, false otherwise + function verifyResultProof( + bytes32 resultId, + uint64 batchHeight, + bytes32[] calldata merkleProof + ) public view override returns (bool) { + Secp256k1ProverStorage storage s = _storageV1(); + bytes32 leaf = keccak256(abi.encodePacked(RESULT_DOMAIN_SEPARATOR, resultId)); + return MerkleProof.verify(merkleProof, s.batchToResultsRoot[batchHeight], leaf); + } + + /// @notice Returns the last processed batch height + /// @return The height of the last batch + function getLastBatchHeight() public view override returns (uint64) { + return _storageV1().lastBatchHeight; + } + + /// @notice Returns the last validators root hash + /// @return The Merkle root of the last validator set + function getLastValidatorsRoot() public view returns (bytes32) { + return _storageV1().lastValidatorsRoot; + } + + /// @notice Returns the results root for a specific batch height + /// @param batchHeight The batch height to query + /// @return The results root for the specified batch + function getBatchResultsRoot(uint64 batchHeight) public view returns (bytes32) { + return _storageV1().batchToResultsRoot[batchHeight]; + } + + // ============ Internal Functions ============ + + /// @notice Returns the storage struct for the contract + /// @dev Uses ERC-7201 storage pattern to access the storage struct at a specific slot + /// @return s The storage struct containing the contract's state variables + function _storageV1() internal pure returns (Secp256k1ProverStorage storage s) { + bytes32 slot = PROVER_V1_STORAGE_SLOT; + // solhint-disable-next-line no-inline-assembly + assembly { + s.slot := slot + } + } + + /// @notice Verifies a validator proof against the validators root + /// @dev Constructs a leaf using SECP256K1_DOMAIN_SEPARATOR and verifies it against the validators root + /// @param proof The validator proof containing signer, voting power, and Merkle proof + /// @param validatorsRoot The root hash to verify against + /// @return bool Returns true if the proof is valid, false otherwise + function _verifyValidatorProof( + SedaDataTypes.ValidatorProof memory proof, + bytes32 validatorsRoot + ) internal pure returns (bool) { + bytes32 leaf = keccak256(abi.encodePacked(SECP256K1_DOMAIN_SEPARATOR, proof.signer, proof.votingPower)); + + return MerkleProof.verify(proof.merkleProof, validatorsRoot, leaf); + } + + /// @notice Verifies a signature against a message hash and its address + /// @param messageHash The hash of the message that was signed + /// @param signature The signature to verify + /// @param signer The validator Secp256k1 address signer + /// @return bool Returns true if the signature is valid, false otherwise + function _verifySignature( + bytes32 messageHash, + bytes calldata signature, + address signer + ) internal pure returns (bool) { + return ECDSA.recover(messageHash, signature) == signer; + } + + /// @dev Required override for UUPSUpgradeable. Ensures only the owner can upgrade the implementation. + /// @inheritdoc UUPSUpgradeable + /// @param newImplementation Address of the new implementation contract + function _authorizeUpgrade( + address newImplementation + ) + internal + virtual + override + onlyOwner // solhint-disable-next-line no-empty-blocks + {} +} diff --git a/contracts/abstract/ProverBase.sol b/contracts/provers/abstract/ProverBase.sol similarity index 51% rename from contracts/abstract/ProverBase.sol rename to contracts/provers/abstract/ProverBase.sol index 592689c..3b5019d 100644 --- a/contracts/abstract/ProverBase.sol +++ b/contracts/provers/abstract/ProverBase.sol @@ -1,16 +1,26 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.9; +pragma solidity ^0.8.24; -import {IProver} from "../interfaces/IProver.sol"; -import {SedaDataTypes} from "../libraries/SedaDataTypes.sol"; +import {IProver} from "../../interfaces/IProver.sol"; +import {SedaDataTypes} from "../../libraries/SedaDataTypes.sol"; +/// @title ProverBase +/// @notice Base contract for implementing proof verification logic +/// @dev This abstract contract defines the basic structure and error handling for proof verification abstract contract ProverBase is IProver { + // ============ Constants ============ + + /// @notice Domain separator used to prevent cross-domain replay attacks when hashing result IDs + bytes1 internal constant RESULT_DOMAIN_SEPARATOR = 0x00; + + // ============ Errors ============ + error InvalidBatchHeight(); error InvalidSignature(); error InvalidValidatorProof(); error MismatchedSignaturesAndProofs(); - bytes1 internal constant RESULT_DOMAIN_SEPARATOR = 0x00; + // ============ External Functions ============ /// @inheritdoc IProver function postBatch( @@ -25,4 +35,7 @@ abstract contract ProverBase is IProver { uint64 batchHeight, bytes32[] calldata merkleProof ) public view virtual override(IProver) returns (bool); + + /// @inheritdoc IProver + function getLastBatchHeight() external view virtual override(IProver) returns (uint64); } diff --git a/deployments/parameters.json b/deployments/parameters.json new file mode 100644 index 0000000..2033034 --- /dev/null +++ b/deployments/parameters.json @@ -0,0 +1,14 @@ +{ + "SedaCoreV1": { + "sedaProverAddress": "0xe2E938Ec34C2f03C6D2Aaa851861eE72891177F0" + }, + "Secp256k1ProverV1": { + "initialBatch": { + "batchHeight": 3, + "blockHeight": 31, + "validatorsRoot": "0x6b8a7c6cd54c814f4e30b89b5f2e91b9d96860e24eb39366f4c260400fcb47db", + "resultsRoot": "0x56c4f39b7564ea6a32877fc98743652998753c4cd8a4b455c26dcb3b92774b73", + "provingMetadata": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + } +} diff --git a/hardhat.config.ts b/hardhat.config.ts index 6d52d63..8420fcc 100644 --- a/hardhat.config.ts +++ b/hardhat.config.ts @@ -1,7 +1,10 @@ -import type { HardhatUserConfig } from 'hardhat/config'; import '@nomicfoundation/hardhat-toolbox'; +import '@openzeppelin/hardhat-upgrades'; +import type { HardhatUserConfig } from 'hardhat/config'; import { getEtherscanConfig, getNetworksConfig } from './config'; +import './tasks'; + const gasReporterConfig = { currency: 'USD', gasPrice: 20, @@ -14,7 +17,7 @@ const config: HardhatUserConfig = { enabled: false, }, solidity: { - version: '0.8.25', + version: '0.8.24', settings: { optimizer: { enabled: true, diff --git a/ignition/modules/SedaCorePermissioned.ts b/ignition/modules/SedaCorePermissioned.ts index 3bcd01d..71058a9 100644 --- a/ignition/modules/SedaCorePermissioned.ts +++ b/ignition/modules/SedaCorePermissioned.ts @@ -5,13 +5,8 @@ const SedaProverModule = buildModule('SedaCorePermissioned', (m) => { const relayers = [m.getAccount(0)]; const maxReplicationFactor = m.getParameter('maxReplicationFactor'); - // Deploy SedaDataTypes library - const dataTypesLib = m.library('SedaDataTypes'); - // Deploy SedaCorePermissioned contract with the library - const coreContract = m.contract('SedaCorePermissioned', [relayers, maxReplicationFactor], { - libraries: { SedaDataTypes: dataTypesLib }, - }); + const coreContract = m.contract('SedaCorePermissioned', [relayers, maxReplicationFactor]); return { coreContract }; }); diff --git a/ignition/modules/SedaCoreV1.ts b/ignition/modules/SedaCoreV1.ts deleted file mode 100644 index 192f568..0000000 --- a/ignition/modules/SedaCoreV1.ts +++ /dev/null @@ -1,28 +0,0 @@ -import { buildModule } from '@nomicfoundation/hardhat-ignition/modules'; -import { ethers } from 'hardhat'; - -const SedaCoreV1Module = buildModule('SedaCoreV1', (m) => { - // Constructor arguments - const initialBatch = m.getParameter('initialBatch'); - - // Deploy SedaDataTypes library - const dataTypesLib = m.library('SedaDataTypes'); - - // Deploy Secp256k1Prover contract - const proverContract = m.contract('Secp256k1Prover', [initialBatch], { - libraries: { - SedaDataTypes: dataTypesLib, - }, - }); - - // Deploy SedaCoreV1 contract - const coreV1Contract = m.contract('SedaCoreV1', [proverContract], { - libraries: { - SedaDataTypes: dataTypesLib, - }, - }); - - return { dataTypesLib, proverContract, coreV1Contract }; -}); - -export default SedaCoreV1Module; diff --git a/ignition/modules/parameters.json b/ignition/modules/parameters.json index 407181a..9bc7384 100644 --- a/ignition/modules/parameters.json +++ b/ignition/modules/parameters.json @@ -1,13 +1,4 @@ { - "SedaCoreV1": { - "initialBatch": { - "batchHeight": 0, - "blockHeight": 0, - "validatorsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "resultsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "provingMetadata": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - }, "SedaCorePermissioned": { "maxReplicationFactor": 1 } diff --git a/package.json b/package.json index f08874c..649f198 100644 --- a/package.json +++ b/package.json @@ -3,10 +3,12 @@ "version": "0.0.4", "main": "index.js", "scripts": { + "check": "bun run lint && bun run format:sol", "clean": "rimraf artifacts cache typechain-types", "compile": "hardhat compile", "deploy:core:permissioned": "hardhat ignition deploy ignition/modules/SedaCorePermissioned.ts --parameters ignition/modules/parameters.json", - "deploy:core:v1": "hardhat ignition deploy ignition/modules/SedaCoreV1.ts --parameters ignition/modules/parameters.json", + "format:sol": "prettier --check \"**/*.sol\"", + "format:sol:fix": "prettier --write \"**/*.sol\"", "gen:testvectors": "bun scripts/generateTestVectors.ts", "lint": "bun run lint:ts && bun run lint:sol", "lint:sol": "solhint 'contracts/**/*.sol' --ignore-path .solhintignore", @@ -21,14 +23,19 @@ "description": "", "devDependencies": { "@biomejs/biome": "^1.9.4", - "@openzeppelin/merkle-tree": "^1.0.7", "@nomicfoundation/hardhat-toolbox": "^5.0.0", - "hardhat": "^2.22.16", - "dotenv": "^16.4.5", + "@openzeppelin/hardhat-upgrades": "^3.7.0", + "@openzeppelin/merkle-tree": "^1.0.7", + "dotenv": "^16.4.7", + "hardhat": "^2.22.17", + "prettier": "^3.4.2", + "prettier-plugin-solidity": "^1.4.1", "rimraf": "^6.0.1", - "solhint": "^5.0.3" + "solhint": "^5.0.3", + "valibot": "^0.42.1" }, "dependencies": { - "@openzeppelin/contracts": "5.1.0" + "@openzeppelin/contracts": "5.1.0", + "@openzeppelin/contracts-upgradeable": "5.1.0" } } diff --git a/tasks/common/config.ts b/tasks/common/config.ts new file mode 100644 index 0000000..ec22493 --- /dev/null +++ b/tasks/common/config.ts @@ -0,0 +1,28 @@ +export const CONFIG = { + DEPLOYMENTS: { + FOLDER: 'deployments', + FILES: { + ADDRESSES: 'addresses.json', + ARTIFACTS: 'artifacts', + }, + }, + LOGGER: { + ICONS: { + info: '•', + success: '✓', + error: '✗', + warn: '⚠️', + }, + SECTION_ICONS: { + config: '🔧', + deploy: '🚀', + files: '📝', + test: '🧪', + verify: '🔍', + params: '📜', + default: '🔹', + meta: '🌟', + }, + META_BORDER: '━', + }, +} as const; diff --git a/tasks/common/io.ts b/tasks/common/io.ts new file mode 100644 index 0000000..773b507 --- /dev/null +++ b/tasks/common/io.ts @@ -0,0 +1,60 @@ +import * as fs from 'node:fs/promises'; +import * as path from 'node:path'; +import * as readline from 'node:readline'; +import { logger } from './logger'; + +// Add this helper function at the top level +export async function prompt(question: string): Promise { + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + }); + + return new Promise((resolve) => { + rl.question(question, (answer) => { + rl.close(); + resolve(answer); + }); + }); +} + +export async function readFile(filePath: string): Promise { + try { + return await fs.readFile(filePath, 'utf8'); + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : String(error); + console.error(`Failed to read file ${filePath}: ${errorMessage}`); + throw new Error(`File read failed: ${errorMessage}`); + } +} + +export async function writeFile(filePath: string, data: object): Promise { + try { + const relativePath = path.relative(process.cwd(), filePath); + await fs.writeFile(filePath, JSON.stringify(data, null, 2)); + logger.success(`Updated ${relativePath}`); + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : String(error); + throw new Error(`Failed to write ${path.basename(filePath)}: ${errorMessage}`); + } +} + +export async function ensureDirectoryExists(dirPath: string): Promise { + try { + await fs.access(dirPath); + } catch { + logger.info(`Creating directory: ${dirPath}`); + await fs.mkdir(dirPath, { recursive: true }); + } +} + +export async function pathExists(path: string): Promise { + try { + await fs.access(path); + return true; + } catch { + return false; + } +} + +export { path }; diff --git a/tasks/common/logger.ts b/tasks/common/logger.ts new file mode 100644 index 0000000..f0f6e3c --- /dev/null +++ b/tasks/common/logger.ts @@ -0,0 +1,50 @@ +import { CONFIG } from './config'; + +type LogLevel = 'info' | 'success' | 'error' | 'warn'; +type SectionType = 'config' | 'deploy' | 'files' | 'test' | 'verify' | 'default' | 'params' | 'meta'; + +class Logger { + private prefix?: string; + + constructor(prefix?: string) { + this.prefix = prefix; + } + + private log(level: LogLevel, message: string) { + const icon = CONFIG.LOGGER.ICONS[level]; + const prefix = this.prefix ? `[${this.prefix}] ` : ''; + console.log(`${icon} ${prefix}${message}`); + } + + section(message: string, type: SectionType = 'default'): void { + const icon = CONFIG.LOGGER.SECTION_ICONS[type]; + if (type === 'meta') { + const border = CONFIG.LOGGER.META_BORDER.repeat(40); + console.log(`\n${border}\n${icon} ${message.toUpperCase()}\n${border}`); + } else { + console.log(`\n${icon} ${message.toUpperCase()}\n${'-'.repeat(60)}`); + } + } + + info(message: string): void { + this.log('info', message); + } + + success(message: string): void { + this.log('success', message); + } + + error(message: string): void { + this.log('error', message); + } + + warn(message: string): void { + this.log('warn', message); + } + + withPrefix(prefix: string): Logger { + return new Logger(prefix); + } +} + +export const logger = new Logger(); diff --git a/tasks/common/params.ts b/tasks/common/params.ts new file mode 100644 index 0000000..80a6eb1 --- /dev/null +++ b/tasks/common/params.ts @@ -0,0 +1,35 @@ +import * as v from 'valibot'; +import { readFile } from './io'; + +const HexString = v.pipe(v.string(), v.regex(/^0x[0-9a-fA-F]*$/, 'Invalid hex string')); + +const ParamsSchema = v.object({ + SedaCoreV1: v.object({ + sedaProverAddress: HexString, + }), + Secp256k1ProverV1: v.object({ + initialBatch: v.object({ + batchHeight: v.number(), + blockHeight: v.number(), + validatorsRoot: HexString, + resultsRoot: HexString, + provingMetadata: HexString, + }), + }), +}); + +export async function readParams(filePath: string): Promise> { + try { + const fileContent = await readFile(filePath); + const parsedJson = JSON.parse(fileContent); + + return v.parse(ParamsSchema, parsedJson); + } catch (error: unknown) { + if (error instanceof v.ValiError) { + throw new Error(`Failed to read or parse params file: ${v.flatten(error.issues)}`); + } + + const errorMessage = error instanceof Error ? error.message : String(error); + throw new Error(`Failed to read or parse params file: ${errorMessage}`); + } +} diff --git a/tasks/common/reports.ts b/tasks/common/reports.ts new file mode 100644 index 0000000..1a02290 --- /dev/null +++ b/tasks/common/reports.ts @@ -0,0 +1,102 @@ +import type { Artifact, BuildInfo } from 'hardhat/types'; +import type { HardhatRuntimeEnvironment } from 'hardhat/types'; +import { CONFIG } from './config'; +import { path, ensureDirectoryExists, pathExists, readFile, writeFile } from './io'; + +const DEPLOYMENTS_FOLDER = CONFIG.DEPLOYMENTS.FOLDER; +const ADDRESSES_FILE = CONFIG.DEPLOYMENTS.FILES.ADDRESSES; + +// Define the type for the addresses object +type Addresses = { + [networkName: string]: { + [contractName: string]: { + proxy: string; + implementation: string; + gitCommitHash: string; + }; + }; +}; + +export async function updateAddressesFile( + hre: HardhatRuntimeEnvironment, + contractName: string, + proxyAddress: string, + implAddress: string, +) { + const addressesPath = path.join(DEPLOYMENTS_FOLDER, ADDRESSES_FILE); + let addresses: Addresses = {}; + + if (await pathExists(addressesPath)) { + const content = await readFile(addressesPath); + if (content.trim()) { + addresses = JSON.parse(content) as Addresses; + } + } + + const networkName = `${hre.network.name}-${(await hre.ethers.provider.getNetwork()).chainId.toString()}`; + if (!addresses[networkName]) { + addresses[networkName] = {}; + } + + const gitCommitHash = require('node:child_process').execSync('git rev-parse HEAD').toString().trim(); + + addresses[networkName][contractName] = { + proxy: proxyAddress, + implementation: implAddress, + gitCommitHash, + }; + + await writeFile(addressesPath, addresses); +} + +export async function updateDeployment(hre: HardhatRuntimeEnvironment, contractName: string) { + const deploymentsDir = path.join(process.cwd(), CONFIG.DEPLOYMENTS.FOLDER); + await ensureDirectoryExists(deploymentsDir); + + const networkId = `${hre.network.name}-${(await hre.ethers.provider.getNetwork()).chainId.toString()}`; + const networkDeployDir = path.join(deploymentsDir, networkId); + const artifactsDir = path.join(networkDeployDir, CONFIG.DEPLOYMENTS.FILES.ARTIFACTS); + + await Promise.all([ensureDirectoryExists(networkDeployDir), ensureDirectoryExists(artifactsDir)]); + + await Promise.all([ + writeBuildInfoToFile(hre, contractName, networkDeployDir), + writeArtifactToFile(hre, contractName, artifactsDir), + ]); +} + +async function writeArtifactToFile(hre: HardhatRuntimeEnvironment, contractName: string, artifactDir: string) { + try { + ensureDirectoryExists(artifactDir); + const artifact: Artifact = await hre.artifacts.readArtifact(contractName); + const artifactPath = path.join(artifactDir, `${contractName}.json`); + await writeFile(artifactPath, artifact); + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : String(error); + throw new Error(`Artifact generation failed: ${errorMessage}`); + } +} + +async function writeBuildInfoToFile(hre: HardhatRuntimeEnvironment, contractName: string, folderPath: string) { + ensureDirectoryExists(folderPath); + + const buildInfo: BuildInfo | undefined = await hre.artifacts.getBuildInfo(await findBuildInfoPath(hre, contractName)); + + if (!buildInfo) { + throw new Error(`Build info not found for ${contractName}`); + } + + const buildInfoPath = path.join(folderPath, `${contractName}.buildinfo`); + await writeFile(buildInfoPath, buildInfo); +} + +async function findBuildInfoPath(hre: HardhatRuntimeEnvironment, contractName: string): Promise { + const fullNames = await hre.artifacts.getAllFullyQualifiedNames(); + const contractPath = fullNames.find((name) => name.endsWith(`${contractName}.sol:${contractName}`)); + + if (!contractPath) { + throw new Error(`Contract ${contractName} not found in artifacts`); + } + + return contractPath; +} diff --git a/tasks/common/uupsProxy.ts b/tasks/common/uupsProxy.ts new file mode 100644 index 0000000..e0b4302 --- /dev/null +++ b/tasks/common/uupsProxy.ts @@ -0,0 +1,33 @@ +import type { Signer } from 'ethers'; +import type { HardhatRuntimeEnvironment } from 'hardhat/types'; +import type { ProverDataTypes } from '../../ts-types'; + +type Contracts = { + Secp256k1ProverV1: { + constructorArgs: [ProverDataTypes.BatchStruct]; + }; + SedaCoreV1: { + constructorArgs: [string]; + }; +}; + +export async function deployProxyContract( + hre: HardhatRuntimeEnvironment, + contractName: T, + constructorArgs: Contracts[T]['constructorArgs'], + signer: Signer, +) { + const ContractFactory = await hre.ethers.getContractFactory(contractName, signer); + const contract = await hre.upgrades.deployProxy(ContractFactory, constructorArgs, { + initializer: 'initialize', + kind: 'uups', + }); + await contract.waitForDeployment(); + + const contractImplAddress = await hre.upgrades.erc1967.getImplementationAddress(await contract.getAddress()); + + return { + contract, + contractImplAddress, + }; +} diff --git a/tasks/deployAll.ts b/tasks/deployAll.ts new file mode 100644 index 0000000..1413d63 --- /dev/null +++ b/tasks/deployAll.ts @@ -0,0 +1,29 @@ +import type { HardhatRuntimeEnvironment } from 'hardhat/types'; +import { logger } from './common/logger'; +import { deploySedaCore } from './deployCore'; +import { deploySecp256k1Prover } from './deployProver'; + +export async function deployAll( + hre: HardhatRuntimeEnvironment, + options: { + params: string; + reset?: boolean; + verify?: boolean; + }, +) { + // 1. Deploy Secp256k1Prover + logger.section('1. Deploy Secp256k1Prover contracts', 'meta'); + const { contractAddress } = await deploySecp256k1Prover(hre, { + params: options.params, + verify: options.verify, + reset: options.reset, + }); + + // 2. Deploy SedaCore using the prover address + logger.section('2. Deploy SedaCoreV1 contracts', 'meta'); + await deploySedaCore(hre, { + proverAddress: contractAddress, + verify: options.verify, + reset: options.reset, + }); +} diff --git a/tasks/deployCore.ts b/tasks/deployCore.ts new file mode 100644 index 0000000..be40568 --- /dev/null +++ b/tasks/deployCore.ts @@ -0,0 +1,108 @@ +import type { HardhatRuntimeEnvironment } from 'hardhat/types'; +import { CONFIG } from './common/config'; +import { pathExists } from './common/io'; +import { prompt } from './common/io'; +import { logger } from './common/logger'; +import { readParams } from './common/params'; +import { updateAddressesFile, updateDeployment } from './common/reports'; +import { deployProxyContract } from './common/uupsProxy'; + +export async function deploySedaCore( + hre: HardhatRuntimeEnvironment, + options: { + params?: string; + proverAddress?: string; + reset?: boolean; + verify?: boolean; + }, +) { + const { params, proverAddress, verify } = options; + const contractName = 'SedaCoreV1'; + + // Add Contract Parameters section + logger.section('Contract Parameters', 'params'); + // Check for conflicting parameters + if (params && proverAddress) { + throw new Error('Both params file and proverAddress cannot be provided simultaneously.'); + } + + // Validate parameters + let sedaProverAddress: string; + if (params) { + logger.info(`Using parameters file: ${params}`); + const sedaProverParams = await readParams(params); + sedaProverAddress = sedaProverParams.SedaCoreV1.sedaProverAddress; + logger.info(`File content: \n ${JSON.stringify(sedaProverParams, null, 2).replace(/\n/g, '\n ')}`); + } else if (proverAddress) { + // Use the directly provided prover address + sedaProverAddress = proverAddress; + logger.info(`Using provided prover address parameter: ${sedaProverAddress}`); + } else { + // Try to read from deployments/addresses.json + try { + const networkKey = `${hre.network.name}-${hre.network.config.chainId}`; + const addressesPath = `../../${CONFIG.DEPLOYMENTS.FOLDER}/${CONFIG.DEPLOYMENTS.FILES.ADDRESSES}`; + const addresses = require(addressesPath); + const proverDeployment = addresses[networkKey]?.Secp256k1ProverV1; + + if (!proverDeployment?.proxy) { + throw new Error(`No Secp256k1ProverV1 proxy address found in ${CONFIG.DEPLOYMENTS.FILES.ADDRESSES}`); + } + + sedaProverAddress = proverDeployment.proxy; + logger.info(`Using prover address from deployments/addresses.json: ${sedaProverAddress}`); + } catch { + throw new Error('Either params file or proverAddress must be provided, or Secp256k1ProverV1 must be deployed'); + } + } + + // Configuration + logger.section('Deployment Configuration', 'config'); + logger.info(`Contract: ${contractName}`); + logger.info(`Network: ${hre.network.name}`); + logger.info(`Chain ID: ${hre.network.config.chainId}`); + const [owner] = await hre.ethers.getSigners(); + const balance = hre.ethers.formatEther(await owner.provider.getBalance(owner.address)); + logger.info(`Deployer: ${owner.address} (${balance} ETH)`); + + // Deploy + logger.section('Deploying Contracts', 'deploy'); + const networkKey = `${hre.network.name}-${hre.network.config.chainId}`; + if (!options.reset && (await pathExists(`${CONFIG.DEPLOYMENTS.FOLDER}/${networkKey}`))) { + const confirmation = await prompt(`Deployments folder for ${networkKey} already exists. Type "yes" to continue: `); + if (confirmation !== 'yes') { + logger.error('Deployment aborted.'); + return; + } + } + + const { contract, contractImplAddress } = await deployProxyContract(hre, contractName, [sedaProverAddress], owner); + const contractAddress = await contract.getAddress(); + logger.success(`Proxy address: ${contractAddress}`); + logger.success(`Impl. address: ${contractImplAddress}`); + + // Update deployment files + logger.section('Updating Deployment Files', 'files'); + await updateDeployment(hre, contractName); + await updateAddressesFile(hre, contractName, contractAddress, contractImplAddress); + + if (verify) { + logger.section('Verifying Contracts', 'verify'); + try { + await hre.run('verify:verify', { + address: contractAddress, + }); + logger.success('Contract verified successfully'); + } catch (error) { + // Check if the error is "Already Verified" + const errorMessage = error instanceof Error ? error.message : String(error); + if (errorMessage.includes('Already Verified')) { + logger.success('Contract is already verified on block explorer'); + } else { + logger.warn(`Verification failed: ${error}`); + } + } + } + + return { contract, contractAddress, contractImplAddress }; +} diff --git a/tasks/deployProver.ts b/tasks/deployProver.ts new file mode 100644 index 0000000..2cd39cf --- /dev/null +++ b/tasks/deployProver.ts @@ -0,0 +1,80 @@ +import type { HardhatRuntimeEnvironment } from 'hardhat/types'; +import { CONFIG } from './common/config'; +import { pathExists } from './common/io'; +import { prompt } from './common/io'; +import { logger } from './common/logger'; +import { readParams } from './common/params'; +import { updateAddressesFile, updateDeployment } from './common/reports'; +import { deployProxyContract } from './common/uupsProxy'; + +export async function deploySecp256k1Prover( + hre: HardhatRuntimeEnvironment, + options: { + params: string; + reset?: boolean; + verify?: boolean; + }, +): Promise<{ contractAddress: string; contractImplAddress: string }> { + const { params: paramsFilePath, verify } = options; + const contractName = 'Secp256k1ProverV1'; + + // Contract Parameters + logger.section('Contract Parameters', 'params'); + logger.info(`Using parameters file: ${paramsFilePath}`); + const proverParams = await readParams(paramsFilePath); + logger.info(`File Content: \n ${JSON.stringify(proverParams, null, 2).replace(/\n/g, '\n ')}`); + + // Configuration + logger.section('Deployment Configuration', 'config'); + logger.info(`Contract: ${contractName}`); + logger.info(`Network: ${hre.network.name}`); + logger.info(`Chain ID: ${hre.network.config.chainId}`); + const [owner] = await hre.ethers.getSigners(); + const balance = hre.ethers.formatEther(await owner.provider.getBalance(owner.address)); + logger.info(`Deployer: ${owner.address} (${balance} ETH)`); + + // Deploy + logger.section('Deploying Contracts', 'deploy'); + const networkKey = `${hre.network.name}-${hre.network.config.chainId}`; + if (!options.reset && (await pathExists(`${CONFIG.DEPLOYMENTS.FOLDER}/${networkKey}`))) { + const confirmation = await prompt('Deployments folder already exists. Type "yes" to continue: '); + if (confirmation !== 'yes') { + logger.error('Deployment aborted.'); + throw new Error('Deployment aborted: User cancelled the operation'); + } + } + const { contract, contractImplAddress } = await deployProxyContract( + hre, + contractName, + [proverParams.Secp256k1ProverV1.initialBatch], + owner, + ); + const contractAddress = await contract.getAddress(); + logger.success(`Proxy address: ${contractAddress}`); + logger.success(`Impl. address: ${contractImplAddress}`); + + // Update deployment files + logger.section('Updating Deployment Files', 'files'); + await updateDeployment(hre, contractName); + await updateAddressesFile(hre, contractName, contractAddress, contractImplAddress); + + if (verify) { + logger.section('Verifying Contracts', 'verify'); + try { + await hre.run('verify:verify', { + address: contractAddress, + }); + logger.success('Contract verified successfully'); + } catch (error) { + // Check if the error is "Already Verified" + const errorMessage = error instanceof Error ? error.message : String(error); + if (errorMessage.includes('Already Verified')) { + logger.info('Contract is already verified on block explorer'); + } else { + logger.warn(`Verification failed: ${error}`); + } + } + } + + return { contractAddress, contractImplAddress }; +} diff --git a/tasks/index.ts b/tasks/index.ts new file mode 100644 index 0000000..cd37897 --- /dev/null +++ b/tasks/index.ts @@ -0,0 +1,40 @@ +import { scope, types } from 'hardhat/config'; + +/** + * Defines the scope for SEDA-related tasks. + */ +export const sedaScope = scope('seda', 'Deploy and interact with SEDA contracts'); + +import './postRequest'; + +import { deployAll } from './deployAll'; +import { deploySedaCore } from './deployCore'; +import { deploySecp256k1Prover } from './deployProver'; + +sedaScope + .task('deploy:core', 'Deploy the SedaCoreV1 contract') + .addOptionalParam('params', 'The parameters file to use', undefined, types.string) + .addOptionalParam('proverAddress', 'Direct SedaProver contract address', undefined, types.string) + .addFlag('verify', 'Verify the contract on etherscan') + .addFlag('reset', 'Replace existing deployment files') + .setAction(async (taskArgs, hre) => { + await deploySedaCore(hre, taskArgs); + }); + +sedaScope + .task('deploy:prover', 'Deploy the Secp256k1ProverV1 contract') + .addParam('params', 'The parameters file to use', undefined, types.string) + .addFlag('verify', 'Verify the contract on etherscan') + .addFlag('reset', 'Replace existing deployment files') + .setAction(async (taskArgs, hre) => { + await deploySecp256k1Prover(hre, taskArgs); + }); + +sedaScope + .task('deploy:all', 'Deploy the Secp256k1ProverV1 and SedaCoreV1 contracts') + .addParam('params', 'The parameters file to use', undefined, types.string) + .addFlag('verify', 'Verify the contract on etherscan') + .addFlag('reset', 'Replace existing deployment files') + .setAction(async (taskArgs, hre) => { + await deployAll(hre, taskArgs); + }); diff --git a/tasks/postRequest.ts b/tasks/postRequest.ts new file mode 100644 index 0000000..504e771 --- /dev/null +++ b/tasks/postRequest.ts @@ -0,0 +1,37 @@ +import { sedaScope } from '.'; +import { logger } from './common/logger'; + +sedaScope + .task('post-request', 'Posts a data request to the SedaProver contract') + .addParam('core', 'The address of the SedaCore contract') + .setAction(async (taskArgs, hre) => { + logger.section('Post Data Request', 'deploy'); + + const core = await hre.ethers.getContractAt('ISedaCore', taskArgs.core); + logger.info(`SedaCore address: ${taskArgs.core}`); + + const timestamp = Math.floor(Date.now() / 1000).toString(16); + const request = { + execProgramId: '0x541d1faf3b6e167ea5369928a24a0019f4167ca430da20a271c5a7bc5fa2657a', + execInputs: '0x1234', + execGasLimit: 100000n, + tallyProgramId: '0x541d1faf3b6e167ea5369928a24a0019f4167ca430da20a271c5a7bc5fa2657a', + tallyInputs: '0x5678', + tallyGasLimit: 100000n, + replicationFactor: 1, + consensusFilter: '0x00', + gasPrice: 1000n, + memo: `0x${timestamp}`, + }; + + logger.info(`Posting DR with memo: ${request.memo}`); + const tx = await core.postRequest(request); + const receipt = await tx.wait(); + logger.success('Data request posted successfully!'); + + // Get requestId from event logs + const logs = await core.queryFilter(core.filters.RequestPosted(), receipt?.blockNumber, receipt?.blockNumber); + const requestId = logs[0].args[0]; + + logger.info(`Data Request ID: ${requestId}`); + }); diff --git a/test/ResultHandler.test.ts b/test/ResultHandler.test.ts deleted file mode 100644 index 95e6f26..0000000 --- a/test/ResultHandler.test.ts +++ /dev/null @@ -1,193 +0,0 @@ -import { loadFixture } from '@nomicfoundation/hardhat-toolbox/network-helpers'; -import { SimpleMerkleTree } from '@openzeppelin/merkle-tree'; -import { expect } from 'chai'; -import { ethers } from 'hardhat'; - -import { compareResults } from './helpers'; -import { computeResultLeafHash, deriveDataResultId, generateDataFixtures } from './utils'; - -describe('ResultHandler', () => { - async function deployResultHandlerFixture() { - const { requests, results } = generateDataFixtures(2); - const leaves = results.map(deriveDataResultId).map(computeResultLeafHash); - - // Create merkle tree and proofs - const tree = SimpleMerkleTree.of(leaves, { sortLeaves: true }); - const proofs = results.map((_, index) => { - return tree.getProof(index); - }); - - const data = { - requests, - results, - proofs, - }; - - // Create initial batch with data results - const initialBatch = { - batchHeight: 0, - blockHeight: 0, - validatorsRoot: ethers.ZeroHash, - resultsRoot: tree.root, - provingMetadata: ethers.ZeroHash, - }; - - // Deploy the SedaDataTypes library first - const DataTypesFactory = await ethers.getContractFactory('SedaDataTypes'); - const dataTypes = await DataTypesFactory.deploy(); - - // Deploy the contract - const ProverFactory = await ethers.getContractFactory('Secp256k1Prover', { - libraries: { - SedaDataTypes: await dataTypes.getAddress(), - }, - }); - const prover = await ProverFactory.deploy(initialBatch); - - const ResultHandlerFactory = await ethers.getContractFactory('ResultHandler', { - libraries: { - SedaDataTypes: await dataTypes.getAddress(), - }, - }); - const handler = await ResultHandlerFactory.deploy(prover.getAddress()); - - return { handler, data }; - } - - describe('deriveResultId', () => { - it('should generate consistent data result IDs', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - const resultIdFromUtils = deriveDataResultId(data.results[0]); - const resultId = await handler.deriveResultId.staticCall(data.results[0]); - - expect(resultId).to.equal(resultIdFromUtils); - }); - - it('should generate different IDs for different results', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - const id1 = await handler.deriveResultId.staticCall(data.results[0]); - const id2 = await handler.deriveResultId.staticCall(data.results[1]); - - expect(id1).to.not.equal(id2); - }); - }); - - describe('postResult', () => { - it('should successfully post a result and read it back', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - await handler.postResult(data.results[0], 0, data.proofs[0]); - - const postedResult = await handler.getResult(data.results[0].drId); - compareResults(postedResult, data.results[0]); - }); - - it('should fail to post a result that already exists', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - await handler.postResult(data.results[0], 0, data.proofs[0]); - - const resultId = deriveDataResultId(data.results[0]); - await expect(handler.postResult(data.results[0], 0, data.proofs[0])) - .to.be.revertedWithCustomError(handler, 'ResultAlreadyExists') - .withArgs(resultId); - }); - - it('should fail to post a result with invalid proof', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - const resultId = deriveDataResultId(data.results[1]); - await expect(handler.postResult(data.results[1], 0, data.proofs[0])) - .to.be.revertedWithCustomError(handler, 'InvalidResultProof') - .withArgs(resultId); - }); - - it('should emit a ResultPosted event', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - await expect(handler.postResult(data.results[0], 0, data.proofs[0])) - .to.emit(handler, 'ResultPosted') - .withArgs(deriveDataResultId(data.results[0])); - }); - - it('should fail to post a result with empty proof', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - const resultId = deriveDataResultId(data.results[0]); - await expect(handler.postResult(data.results[0], 0, [])) - .to.be.revertedWithCustomError(handler, 'InvalidResultProof') - .withArgs(resultId); - }); - - it('should fail to post a result with invalid drId', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - const invalidResult = { ...data.results[0], drId: ethers.ZeroHash }; - const resultId = deriveDataResultId(invalidResult); - await expect(handler.postResult(invalidResult, 0, data.proofs[0])) - .to.be.revertedWithCustomError(handler, 'InvalidResultProof') - .withArgs(resultId); - }); - }); - - describe('getResult', () => { - it('should revert with ResultNotFound for non-existent result id', async () => { - const { handler } = await loadFixture(deployResultHandlerFixture); - - const nonExistentId = ethers.ZeroHash; - await expect(handler.getResult(nonExistentId)) - .to.be.revertedWithCustomError(handler, 'ResultNotFound') - .withArgs(nonExistentId); - }); - - it('should return the correct result for an existing result id', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - await handler.postResult(data.results[0], 0, data.proofs[0]); - const retrievedResult = await handler.getResult(data.results[0].drId); - - compareResults(retrievedResult, data.results[0]); - }); - - it('should return the correct result for multiple posted results', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - // Post two results - await handler.postResult(data.results[0], 0, data.proofs[0]); - await handler.postResult(data.results[1], 0, data.proofs[1]); - - // Retrieve and verify both results - const retrievedResult1 = await handler.getResult(data.results[0].drId); - const retrievedResult2 = await handler.getResult(data.results[1].drId); - - compareResults(retrievedResult1, data.results[0]); - compareResults(retrievedResult2, data.results[1]); - - // Try to get a non-existent result - const nonExistentId = ethers.randomBytes(32); - await expect(handler.getResult(nonExistentId)) - .to.be.revertedWithCustomError(handler, 'ResultNotFound') - .withArgs(nonExistentId); - }); - }); - - describe('verifyResult', () => { - it('should successfully verify a valid result', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - const resultId = await handler.verifyResult(data.results[0], 0, data.proofs[0]); - expect(resultId).to.equal(deriveDataResultId(data.results[0])); - }); - - it('should fail to verify a result with invalid proof', async () => { - const { handler, data } = await loadFixture(deployResultHandlerFixture); - - const resultId = deriveDataResultId(data.results[1]); - await expect(handler.verifyResult(data.results[1], 0, data.proofs[0])) - .to.be.revertedWithCustomError(handler, 'InvalidResultProof') - .withArgs(resultId); - }); - }); -}); diff --git a/test/RequestHandler.test.ts b/test/core/RequestHandler.test.ts similarity index 90% rename from test/RequestHandler.test.ts rename to test/core/RequestHandler.test.ts index f973923..0e1bb06 100644 --- a/test/RequestHandler.test.ts +++ b/test/core/RequestHandler.test.ts @@ -2,23 +2,15 @@ import { loadFixture } from '@nomicfoundation/hardhat-toolbox/network-helpers'; import { expect } from 'chai'; import { ethers } from 'hardhat'; -import { compareRequests } from './helpers'; -import { deriveRequestId, generateDataFixtures } from './utils'; +import { compareRequests } from '../helpers'; +import { deriveRequestId, generateDataFixtures } from '../utils'; describe('RequestHandler', () => { async function deployRequestHandlerFixture() { const { requests } = generateDataFixtures(4); - // Deploy the SedaDataTypes library first - const DataTypesFactory = await ethers.getContractFactory('SedaDataTypes'); - const dataTypes = await DataTypesFactory.deploy(); - // Deploy the RequestHandler contract - const RequestHandlerFactory = await ethers.getContractFactory('RequestHandler', { - libraries: { - SedaDataTypes: await dataTypes.getAddress(), - }, - }); + const RequestHandlerFactory = await ethers.getContractFactory('SedaCoreV1'); const handler = await RequestHandlerFactory.deploy(); return { handler, requests }; diff --git a/test/core/ResultHandler.test.ts b/test/core/ResultHandler.test.ts new file mode 100644 index 0000000..9ac175c --- /dev/null +++ b/test/core/ResultHandler.test.ts @@ -0,0 +1,183 @@ +import { loadFixture } from '@nomicfoundation/hardhat-toolbox/network-helpers'; +import { SimpleMerkleTree } from '@openzeppelin/merkle-tree'; +import { expect } from 'chai'; +import { ethers, upgrades } from 'hardhat'; + +import { compareResults } from '../helpers'; +import { computeResultLeafHash, deriveDataResultId, generateDataFixtures } from '../utils'; + +describe('ResultHandler', () => { + async function deployResultHandlerFixture() { + const { requests, results } = generateDataFixtures(2); + const leaves = results.map(deriveDataResultId).map(computeResultLeafHash); + + // Create merkle tree and proofs + const tree = SimpleMerkleTree.of(leaves, { sortLeaves: true }); + const proofs = results.map((_, index) => { + return tree.getProof(index); + }); + + const data = { + requests, + results, + proofs, + }; + + // Create initial batch with data results + const initialBatch = { + batchHeight: 0, + blockHeight: 0, + validatorsRoot: ethers.ZeroHash, + resultsRoot: tree.root, + provingMetadata: ethers.ZeroHash, + }; + + // Deploy the contract + const ProverFactory = await ethers.getContractFactory('Secp256k1ProverV1'); + const prover = await upgrades.deployProxy(ProverFactory, [initialBatch], { initializer: 'initialize' }); + await prover.waitForDeployment(); + + const CoreFactory = await ethers.getContractFactory('SedaCoreV1'); + const core = await upgrades.deployProxy(CoreFactory, [await prover.getAddress()], { initializer: 'initialize' }); + await core.waitForDeployment(); + + return { core, data }; + } + + describe('deriveResultId', () => { + it('should generate consistent data result IDs', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + const resultIdFromUtils = deriveDataResultId(data.results[0]); + const resultId = await core.deriveResultId.staticCall(data.results[0]); + + expect(resultId).to.equal(resultIdFromUtils); + }); + + it('should generate different IDs for different results', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + const id1 = await core.deriveResultId.staticCall(data.results[0]); + const id2 = await core.deriveResultId.staticCall(data.results[1]); + + expect(id1).to.not.equal(id2); + }); + }); + + describe('postResult', () => { + it('should successfully post a result and read it back', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + await core.postResult(data.results[0], 0, data.proofs[0]); + + const postedResult = await core.getResult(data.results[0].drId); + compareResults(postedResult, data.results[0]); + }); + + it('should fail to post a result that already exists', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + await core.postResult(data.results[0], 0, data.proofs[0]); + + const resultId = deriveDataResultId(data.results[0]); + await expect(core.postResult(data.results[0], 0, data.proofs[0])) + .to.be.revertedWithCustomError(core, 'ResultAlreadyExists') + .withArgs(resultId); + }); + + it('should fail to post a result with invalid proof', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + const resultId = deriveDataResultId(data.results[1]); + await expect(core.postResult(data.results[1], 0, data.proofs[0])) + .to.be.revertedWithCustomError(core, 'InvalidResultProof') + .withArgs(resultId); + }); + + it('should emit a ResultPosted event', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + await expect(core.postResult(data.results[0], 0, data.proofs[0])) + .to.emit(core, 'ResultPosted') + .withArgs(deriveDataResultId(data.results[0])); + }); + + it('should fail to post a result with empty proof', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + const resultId = deriveDataResultId(data.results[0]); + await expect(core.postResult(data.results[0], 0, [])) + .to.be.revertedWithCustomError(core, 'InvalidResultProof') + .withArgs(resultId); + }); + + it('should fail to post a result with invalid drId', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + const invalidResult = { ...data.results[0], drId: ethers.ZeroHash }; + const resultId = deriveDataResultId(invalidResult); + await expect(core.postResult(invalidResult, 0, data.proofs[0])) + .to.be.revertedWithCustomError(core, 'InvalidResultProof') + .withArgs(resultId); + }); + }); + + describe('getResult', () => { + it('should revert with ResultNotFound for non-existent result id', async () => { + const { core } = await loadFixture(deployResultHandlerFixture); + + const nonExistentId = ethers.ZeroHash; + await expect(core.getResult(nonExistentId)) + .to.be.revertedWithCustomError(core, 'ResultNotFound') + .withArgs(nonExistentId); + }); + + it('should return the correct result for an existing result id', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + await core.postResult(data.results[0], 0, data.proofs[0]); + const retrievedResult = await core.getResult(data.results[0].drId); + + compareResults(retrievedResult, data.results[0]); + }); + + it('should return the correct result for multiple posted results', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + // Post two results + await core.postResult(data.results[0], 0, data.proofs[0]); + await core.postResult(data.results[1], 0, data.proofs[1]); + + // Retrieve and verify both results + const retrievedResult1 = await core.getResult(data.results[0].drId); + const retrievedResult2 = await core.getResult(data.results[1].drId); + + compareResults(retrievedResult1, data.results[0]); + compareResults(retrievedResult2, data.results[1]); + + // Try to get a non-existent result + const nonExistentId = ethers.randomBytes(32); + await expect(core.getResult(nonExistentId)) + .to.be.revertedWithCustomError(core, 'ResultNotFound') + .withArgs(nonExistentId); + }); + }); + + describe('verifyResult', () => { + it('should successfully verify a valid result', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + const resultId = await core.verifyResult(data.results[0], 0, data.proofs[0]); + expect(resultId).to.equal(deriveDataResultId(data.results[0])); + }); + + it('should fail to verify a result with invalid proof', async () => { + const { core, data } = await loadFixture(deployResultHandlerFixture); + + const resultId = deriveDataResultId(data.results[1]); + await expect(core.verifyResult(data.results[1], 0, data.proofs[0])) + .to.be.revertedWithCustomError(core, 'InvalidResultProof') + .withArgs(resultId); + }); + }); +}); diff --git a/test/core/SedaCore.proxy.ts b/test/core/SedaCore.proxy.ts new file mode 100644 index 0000000..9fffce5 --- /dev/null +++ b/test/core/SedaCore.proxy.ts @@ -0,0 +1,82 @@ +import { loadFixture } from '@nomicfoundation/hardhat-toolbox/network-helpers'; +import { expect } from 'chai'; +import { ethers, upgrades } from 'hardhat'; + +describe('Proxy: SedaCore', () => { + async function deployProxyFixture() { + const [owner] = await ethers.getSigners(); + + const initialBatch = { + batchHeight: 0, + blockHeight: 0, + validatorsRoot: ethers.ZeroHash, + resultsRoot: ethers.ZeroHash, + provingMetadata: ethers.ZeroHash, + }; + + // Deploy prover + const ProverFactory = await ethers.getContractFactory('Secp256k1ProverV1'); + const prover = await upgrades.deployProxy(ProverFactory, [initialBatch], { initializer: 'initialize' }); + await prover.waitForDeployment(); + + // Deploy V1 through proxy + const CoreV1Factory = await ethers.getContractFactory('SedaCoreV1', owner); + const core = await upgrades.deployProxy(CoreV1Factory, [await prover.getAddress()], { initializer: 'initialize' }); + await core.waitForDeployment(); + + // Get V2 factory + const CoreV2Factory = await ethers.getContractFactory('MockSedaCoreV2', owner); + + return { prover, core, CoreV2Factory }; + } + + describe('upgrade', () => { + it('should maintain state after upgrade', async () => { + const { prover, core, CoreV2Factory } = await loadFixture(deployProxyFixture); + + // Check initial state (using a relevant state variable from your SedaCore) + const stateBeforeUpgrade = await core.getSedaProver(); + expect(stateBeforeUpgrade).to.equal(await prover.getAddress()); + + // Upgrade to V2 + const proxyV2 = await upgrades.upgradeProxy(await core.getAddress(), CoreV2Factory); + + // Check state is maintained + const stateAfterUpgrade = await proxyV2.getSedaProver(); + expect(stateAfterUpgrade).to.equal(stateBeforeUpgrade); + }); + + it('should maintain owner after upgrade', async () => { + const { core: proxy, CoreV2Factory } = await loadFixture(deployProxyFixture); + const [owner] = await ethers.getSigners(); + + // Check owner before upgrade + const ownerBeforeUpgrade = await proxy.owner(); + expect(ownerBeforeUpgrade).to.equal(owner.address); + + // Upgrade to V2 + const proxyV2 = await upgrades.upgradeProxy(await proxy.getAddress(), CoreV2Factory); + + // Check owner is maintained after upgrade + const ownerAfterUpgrade = await proxyV2.owner(); + expect(ownerAfterUpgrade).to.equal(owner.address); + }); + + it('should have new functionality after upgrade', async () => { + const { core: proxy, CoreV2Factory } = await loadFixture(deployProxyFixture); + + // Verify V1 doesn't have getVersion() + const V1Contract = proxy.connect(await ethers.provider.getSigner()); + // @ts-expect-error - getVersion shouldn't exist on V1 + expect(V1Contract.getVersion).to.be.undefined; + + // Upgrade to V2 + const proxyV2 = await upgrades.upgradeProxy(await proxy.getAddress(), CoreV2Factory); + await proxyV2.initialize(); + + // Check new V2 functionality + const version = await proxyV2.getVersion(); + expect(version).to.equal('2.0.0'); + }); + }); +}); diff --git a/test/SedaCoreV1.test.ts b/test/core/SedaCoreV1.test.ts similarity index 88% rename from test/SedaCoreV1.test.ts rename to test/core/SedaCoreV1.test.ts index 44d42e6..9a8626f 100644 --- a/test/SedaCoreV1.test.ts +++ b/test/core/SedaCoreV1.test.ts @@ -1,14 +1,18 @@ import { loadFixture } from '@nomicfoundation/hardhat-toolbox/network-helpers'; import { SimpleMerkleTree } from '@openzeppelin/merkle-tree'; import { expect } from 'chai'; -import { ethers } from 'hardhat'; +import { ethers, upgrades } from 'hardhat'; -import { compareRequests, compareResults, convertToRequestInputs } from './helpers'; -import { computeResultLeafHash, deriveDataResultId, deriveRequestId, generateDataFixtures } from './utils'; +import { compareRequests, compareResults, convertToRequestInputs } from '../helpers'; +import { computeResultLeafHash, deriveDataResultId, deriveRequestId, generateDataFixtures } from '../utils'; describe('SedaCoreV1', () => { async function deployCoreFixture() { + // Generate test fixtures and modify the last result's timestamp to be 1 second (1 unix timestamp) + // This simulates an invalid result with a timestamp from 1970-01-01T00:00:01Z const { requests, results } = generateDataFixtures(10); + results[results.length - 1].blockTimestamp = 1; + const leaves = results.map(deriveDataResultId).map(computeResultLeafHash); // Create merkle tree and proofs @@ -25,18 +29,13 @@ describe('SedaCoreV1', () => { provingMetadata: ethers.ZeroHash, }; - const DataTypesFactory = await ethers.getContractFactory('SedaDataTypes'); - const dataTypes = await DataTypesFactory.deploy(); + const ProverFactory = await ethers.getContractFactory('Secp256k1ProverV1'); + const prover = await upgrades.deployProxy(ProverFactory, [initialBatch], { initializer: 'initialize' }); + await prover.waitForDeployment(); - const ProverFactory = await ethers.getContractFactory('Secp256k1Prover', { - libraries: { SedaDataTypes: await dataTypes.getAddress() }, - }); - const prover = await ProverFactory.deploy(initialBatch); - - const CoreFactory = await ethers.getContractFactory('SedaCoreV1', { - libraries: { SedaDataTypes: await dataTypes.getAddress() }, - }); - const core = await CoreFactory.deploy(await prover.getAddress()); + const CoreFactory = await ethers.getContractFactory('SedaCoreV1'); + const core = await upgrades.deployProxy(CoreFactory, [await prover.getAddress()], { initializer: 'initialize' }); + await core.waitForDeployment(); return { prover, core, data }; } @@ -85,6 +84,7 @@ describe('SedaCoreV1', () => { const requests = await core.getPendingRequests(0, 1); expect(requests.length).to.equal(0); }); + it('should post a request and then post its result', async () => { const { core, data } = await loadFixture(deployCoreFixture); @@ -123,6 +123,18 @@ describe('SedaCoreV1', () => { compareResults(postedResult, data.results[i]); } }); + + it('should reject results with invalid timestamps', async () => { + const { core, data } = await loadFixture(deployCoreFixture); + + const requestIndex = data.results.length - 1; + await core.postRequest(data.requests[requestIndex]); + + // Try to post the last result which has an invalid timestamp of 1 + await expect( + core.postResult(data.results[requestIndex], 0, data.proofs[requestIndex]), + ).to.be.revertedWithCustomError(core, 'InvalidResultTimestamp'); + }); }); describe('request management', () => { @@ -183,7 +195,7 @@ describe('SedaCoreV1', () => { const gasUsed = await core.postResult.estimateGas(data.results[2], 0, data.proofs[2]); // This is rough esimate - expect(gasUsed).to.be.lessThan(250000); + expect(gasUsed).to.be.lessThan(300000); }); it('should maintain pending requests (with removals)', async () => { diff --git a/test/SedaPermissioned.test.ts b/test/core/SedaPermissioned.test.ts similarity index 97% rename from test/SedaPermissioned.test.ts rename to test/core/SedaPermissioned.test.ts index a44f9a7..c43c197 100644 --- a/test/SedaPermissioned.test.ts +++ b/test/core/SedaPermissioned.test.ts @@ -1,8 +1,8 @@ import { loadFixture } from '@nomicfoundation/hardhat-network-helpers'; import { expect } from 'chai'; import { ethers } from 'hardhat'; -import { compareRequests, compareResults, convertToRequestInputs } from './helpers'; -import { deriveDataResultId, deriveRequestId, generateDataFixtures } from './utils'; +import { compareRequests, compareResults, convertToRequestInputs } from '../helpers'; +import { deriveDataResultId, deriveRequestId, generateDataFixtures } from '../utils'; describe('SedaCorePermissioned', () => { const MAX_REPLICATION_FACTOR = 1; @@ -15,12 +15,7 @@ describe('SedaCorePermissioned', () => { anyone, }; - const SedaDataTypes = await ethers.getContractFactory('SedaDataTypes'); - const dataTypes = await SedaDataTypes.deploy(); - - const PermissionedFactory = await ethers.getContractFactory('SedaCorePermissioned', { - libraries: { SedaDataTypes: await dataTypes.getAddress() }, - }); + const PermissionedFactory = await ethers.getContractFactory('SedaCorePermissioned'); const core = await PermissionedFactory.deploy([relayer.address], MAX_REPLICATION_FACTOR); return { core, signers }; diff --git a/test/helpers.ts b/test/helpers.ts index 1870bda..db56584 100644 --- a/test/helpers.ts +++ b/test/helpers.ts @@ -1,12 +1,11 @@ import { expect } from 'chai'; - -import type { SedaDataTypes } from '../typechain-types/contracts/libraries/SedaDataTypes'; +import type { CoreRequestTypes, CoreResultTypes, ProverDataTypes } from '../ts-types'; // Function to convert an unformatted tuple result to a formatted struct export function convertToRequestInputs( // biome-ignore lint/suspicious/noExplicitAny: Explicit any type is necessary to handle the unformatted tuple result request: any, -): SedaDataTypes.RequestInputsStruct { +): CoreRequestTypes.RequestInputsStruct { return { //version: unformatted[0], execProgramId: request[1], @@ -24,8 +23,8 @@ export function convertToRequestInputs( // Helper function to compare two requests export const compareRequests = ( - actual: SedaDataTypes.RequestInputsStruct, - expected: SedaDataTypes.RequestInputsStruct, + actual: CoreRequestTypes.RequestInputsStruct, + expected: CoreRequestTypes.RequestInputsStruct, ) => { expect(actual.execProgramId).to.equal(expected.execProgramId); expect(actual.execInputs).to.equal(expected.execInputs); @@ -40,7 +39,7 @@ export const compareRequests = ( }; // Helper function to compare two results -export const compareResults = (actual: SedaDataTypes.ResultStruct, expected: SedaDataTypes.ResultStruct) => { +export const compareResults = (actual: CoreResultTypes.ResultStruct, expected: CoreResultTypes.ResultStruct) => { expect(actual.version).to.equal(expected.version); expect(actual.drId).to.equal(expected.drId); expect(actual.consensus).to.equal(expected.consensus); diff --git a/test/prover/Secp256k1Prover.proxy.ts b/test/prover/Secp256k1Prover.proxy.ts new file mode 100644 index 0000000..e3835cc --- /dev/null +++ b/test/prover/Secp256k1Prover.proxy.ts @@ -0,0 +1,78 @@ +import { loadFixture } from '@nomicfoundation/hardhat-toolbox/network-helpers'; +import { expect } from 'chai'; +import { ethers, upgrades } from 'hardhat'; + +describe('Proxy: Secp256k1Prover', () => { + async function deployProxyFixture() { + const [owner] = await ethers.getSigners(); + + // Generate initial batch data + const initialBatch = { + batchHeight: 0, + blockHeight: 0, + validatorsRoot: ethers.ZeroHash, + resultsRoot: ethers.ZeroHash, + provingMetadata: '0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef', + }; + + // Deploy V1 through proxy + const ProverV1Factory = await ethers.getContractFactory('Secp256k1ProverV1', owner); + const proxy = await upgrades.deployProxy(ProverV1Factory, [initialBatch], { initializer: 'initialize' }); + await proxy.waitForDeployment(); + + // Get V2 factory + const ProverV2Factory = await ethers.getContractFactory('MockSecp256k1ProverV2', owner); + + return { proxy, ProverV2Factory, initialBatch }; + } + + describe('upgrade', () => { + it('should maintain state after upgrade', async () => { + const { proxy, ProverV2Factory, initialBatch } = await loadFixture(deployProxyFixture); + + // Check initial state + const heightBeforeUpgrade = await proxy.getLastBatchHeight(); + expect(heightBeforeUpgrade).to.equal(initialBatch.batchHeight); + + // Upgrade to V2 + const proxyV2 = await upgrades.upgradeProxy(await proxy.getAddress(), ProverV2Factory); + + // Check state is maintained + const heightAfterUpgrade = await proxyV2.getLastBatchHeight(); + expect(heightAfterUpgrade).to.equal(heightBeforeUpgrade); + }); + + it('should maintain owner after upgrade', async () => { + const { proxy, ProverV2Factory } = await loadFixture(deployProxyFixture); + const [owner] = await ethers.getSigners(); + + // Check owner before upgrade + const ownerBeforeUpgrade = await proxy.owner(); + expect(ownerBeforeUpgrade).to.equal(owner.address); + + // Upgrade to V2 + const proxyV2 = await upgrades.upgradeProxy(await proxy.getAddress(), ProverV2Factory); + + // Check owner is maintained after upgrade + const ownerAfterUpgrade = await proxyV2.owner(); + expect(ownerAfterUpgrade).to.equal(owner.address); + }); + + it('should have new functionality after upgrade', async () => { + const { proxy, ProverV2Factory } = await loadFixture(deployProxyFixture); + + // Verify V1 doesn't have getVersion() + const V1Contract = proxy.connect(await ethers.provider.getSigner()); + // @ts-expect-error - getVersion shouldn't exist on V1 + expect(V1Contract.getVersion).to.be.undefined; + + // Upgrade to V2 + const proxyV2 = await upgrades.upgradeProxy(await proxy.getAddress(), ProverV2Factory); + await proxyV2.initialize(); + + // Check new V2 functionality + const version = await proxyV2.getVersion(); + expect(version).to.equal('2.0.0'); + }); + }); +}); diff --git a/test/Secp256k1Prover.test.ts b/test/prover/Secp256k1ProverV1.test.ts similarity index 89% rename from test/Secp256k1Prover.test.ts rename to test/prover/Secp256k1ProverV1.test.ts index 74986b3..ef6a60f 100644 --- a/test/Secp256k1Prover.test.ts +++ b/test/prover/Secp256k1ProverV1.test.ts @@ -2,8 +2,8 @@ import { loadFixture } from '@nomicfoundation/hardhat-toolbox/network-helpers'; import { SimpleMerkleTree } from '@openzeppelin/merkle-tree'; import { expect } from 'chai'; import type { Wallet } from 'ethers'; -import { ethers } from 'hardhat'; -import type { SedaDataTypes } from '../typechain-types/contracts/libraries'; +import { ethers, upgrades } from 'hardhat'; +import type { ProverDataTypes } from '../../ts-types'; import { computeResultLeafHash, computeValidatorLeafHash, @@ -11,9 +11,9 @@ import { deriveDataResultId, generateDataFixtures, generateNewBatchWithId, -} from './utils'; +} from '../utils'; -describe('Secp256k1Prover', () => { +describe('Secp256k1ProverV1', () => { async function deployProverFixture(length = 4) { const wallets = Array.from({ length }, (_, i) => { const seed = ethers.id(`validator${i}`); @@ -72,23 +72,20 @@ describe('Secp256k1Prover', () => { results, }; - // Deploy the SedaDataTypes library first - const DataTypesFactory = await ethers.getContractFactory('SedaDataTypes'); - const dataTypes = await DataTypesFactory.deploy(); - // Deploy the contract - const ProverFactory = await ethers.getContractFactory('Secp256k1Prover', { - libraries: { - SedaDataTypes: await dataTypes.getAddress(), - }, - }); - const prover = await ProverFactory.deploy(initialBatch); + const ProverFactory = await ethers.getContractFactory('Secp256k1ProverV1'); + const prover = await upgrades.deployProxy(ProverFactory, [initialBatch], { initializer: 'initialize' }); + await prover.waitForDeployment(); return { prover, wallets, data }; } // Add a helper function to generate and sign a new batch - async function generateAndSignBatch(wallets: Wallet[], initialBatch: SedaDataTypes.BatchStruct, signerIndices = [0]) { + async function generateAndSignBatch( + wallets: Wallet[], + initialBatch: ProverDataTypes.BatchStruct, + signerIndices = [0], + ) { const { newBatchId, newBatch } = generateNewBatchWithId(initialBatch); const signatures = await Promise.all(signerIndices.map((i) => wallets[i].signingKey.sign(newBatchId).serialized)); return { newBatchId, newBatch, signatures }; @@ -100,7 +97,7 @@ describe('Secp256k1Prover', () => { prover, data: { initialBatch }, } = await loadFixture(deployProverFixture); - const lastBatchHeight = await prover.lastBatchHeight(); + const lastBatchHeight = await prover.getLastBatchHeight(); expect(lastBatchHeight).to.equal(initialBatch.batchHeight); }); }); @@ -112,8 +109,8 @@ describe('Secp256k1Prover', () => { const { newBatch, signatures } = await generateAndSignBatch(wallets, data.initialBatch, [0]); await prover.postBatch(newBatch, signatures, [data.validatorProofs[0]]); - const lastBatchHeight = await prover.lastBatchHeight(); - const lastValidatorsRoot = await prover.lastValidatorsRoot(); + const lastBatchHeight = await prover.getLastBatchHeight(); + const lastValidatorsRoot = await prover.getLastValidatorsRoot(); expect(lastBatchHeight).to.equal(newBatch.batchHeight); expect(lastValidatorsRoot).to.equal(newBatch.validatorsRoot); }); @@ -124,8 +121,8 @@ describe('Secp256k1Prover', () => { const { newBatch, signatures } = await generateAndSignBatch(wallets, data.initialBatch, [1, 2, 3]); await prover.postBatch(newBatch, signatures, data.validatorProofs.slice(1)); - const lastBatchHeight = await prover.lastBatchHeight(); - const lastValidatorsRoot = await prover.lastValidatorsRoot(); + const lastBatchHeight = await prover.getLastBatchHeight(); + const lastValidatorsRoot = await prover.getLastValidatorsRoot(); expect(lastBatchHeight).to.equal(newBatch.batchHeight); expect(lastValidatorsRoot).to.equal(newBatch.validatorsRoot); }); @@ -151,7 +148,7 @@ describe('Secp256k1Prover', () => { 'ConsensusNotReached', ); - const lastBatchHeight = await prover.lastBatchHeight(); + const lastBatchHeight = await prover.getLastBatchHeight(); expect(lastBatchHeight).to.equal(data.initialBatch.batchHeight); }); @@ -218,7 +215,7 @@ describe('Secp256k1Prover', () => { await prover.postBatch(newBatch, signatures, data.validatorProofs); - const lastBatchHeight = await prover.lastBatchHeight(); + const lastBatchHeight = await prover.getLastBatchHeight(); expect(lastBatchHeight).to.equal(newBatch.batchHeight); }); }); @@ -338,7 +335,7 @@ describe('Secp256k1Prover', () => { wallets.slice(0, validatorCount).map((wallet) => wallet.signingKey.sign(newBatchId).serialized), ); await prover.postBatch(newBatch, signatures, data.validatorProofs.slice(0, validatorCount)); - const lastBatchHeight = await prover.lastBatchHeight(); + const lastBatchHeight = await prover.getLastBatchHeight(); expect(lastBatchHeight).to.equal(newBatch.batchHeight); } } @@ -354,7 +351,7 @@ describe('Secp256k1Prover', () => { describe('batch id', () => { it('should generate the correct batch id for test vectors', async () => { - const testBatch: SedaDataTypes.BatchStruct = { + const testBatch: ProverDataTypes.BatchStruct = { batchHeight: 4, blockHeight: 134, resultsRoot: '0x49918c4e986fff80aeb3532466132920d2ffd8db2a9615e8d02dd0f02e19503a', @@ -364,17 +361,11 @@ describe('Secp256k1Prover', () => { const expectedBatchId = deriveBatchId(testBatch); expect(expectedBatchId).to.equal('0x9b8a1c156da9096bc89288e9d64df3c897435e962ae7402f0c25c97f3de76e94'); - // Deploy the SedaDataTypes library first - const DataTypesFactory = await ethers.getContractFactory('SedaDataTypes'); - const dataTypes = await DataTypesFactory.deploy(); - // Deploy the contract - const ProverFactory = await ethers.getContractFactory('Secp256k1Prover', { - libraries: { - SedaDataTypes: await dataTypes.getAddress(), - }, - }); - const prover = await ProverFactory.deploy(testBatch); + const ProverFactory = await ethers.getContractFactory('Secp256k1ProverV1'); + const prover = await upgrades.deployProxy(ProverFactory, [testBatch], { initializer: 'initialize' }); + await prover.waitForDeployment(); + expect(prover).to.emit(prover, 'BatchPosted').withArgs(testBatch.batchHeight, expectedBatchId); }); }); diff --git a/test/utils.ts b/test/utils.ts index 98fd610..02e8ff7 100644 --- a/test/utils.ts +++ b/test/utils.ts @@ -1,6 +1,5 @@ import { ethers } from 'hardhat'; - -import type { SedaDataTypes } from '../typechain-types/contracts/libraries/SedaDataTypes'; +import type { CoreRequestTypes, CoreResultTypes, ProverDataTypes } from '../ts-types'; export const SEDA_DATA_TYPES_VERSION = '0.0.1'; @@ -11,8 +10,8 @@ function padBigIntToBytes(value: bigint, byteLength: number): string { return ethers.zeroPadValue(ethers.toBeArray(value), byteLength); } -export function generateNewBatchWithId(initialBatch: SedaDataTypes.BatchStruct) { - const newBatch: SedaDataTypes.BatchStruct = { +export function generateNewBatchWithId(initialBatch: ProverDataTypes.BatchStruct) { + const newBatch: ProverDataTypes.BatchStruct = { ...initialBatch, batchHeight: BigInt(initialBatch.batchHeight) + BigInt(1), blockHeight: BigInt(initialBatch.blockHeight) + BigInt(1), @@ -22,7 +21,7 @@ export function generateNewBatchWithId(initialBatch: SedaDataTypes.BatchStruct) return { newBatchId, newBatch }; } -export function deriveBatchId(batch: SedaDataTypes.BatchStruct): string { +export function deriveBatchId(batch: ProverDataTypes.BatchStruct): string { return ethers.keccak256( ethers.concat([ padBigIntToBytes(BigInt(batch.batchHeight), 8), @@ -34,7 +33,7 @@ export function deriveBatchId(batch: SedaDataTypes.BatchStruct): string { ); } -export function deriveRequestId(request: SedaDataTypes.RequestInputsStruct): string { +export function deriveRequestId(request: CoreRequestTypes.RequestInputsStruct): string { return ethers.keccak256( ethers.concat([ ethers.keccak256(ethers.toUtf8Bytes(SEDA_DATA_TYPES_VERSION)), @@ -52,7 +51,7 @@ export function deriveRequestId(request: SedaDataTypes.RequestInputsStruct): str ); } -export function deriveDataResultId(dataResult: SedaDataTypes.ResultStruct): string { +export function deriveDataResultId(dataResult: CoreResultTypes.ResultStruct): string { return ethers.keccak256( ethers.concat([ ethers.keccak256(ethers.toUtf8Bytes(SEDA_DATA_TYPES_VERSION)), @@ -69,9 +68,20 @@ export function deriveDataResultId(dataResult: SedaDataTypes.ResultStruct): stri ); } +export function computeResultLeafHash(resultId: string): string { + return ethers.solidityPackedKeccak256(['bytes1', 'bytes32'], [RESULT_DOMAIN_SEPARATOR, ethers.getBytes(resultId)]); +} + +export function computeValidatorLeafHash(validator: string, votingPower: number): string { + return ethers.solidityPackedKeccak256( + ['bytes1', 'bytes', 'uint32'], + [SECP256K1_DOMAIN_SEPARATOR, validator, votingPower], + ); +} + export function generateDataFixtures(length: number): { - requests: SedaDataTypes.RequestInputsStruct[]; - results: SedaDataTypes.ResultStruct[]; + requests: CoreRequestTypes.RequestInputsStruct[]; + results: CoreResultTypes.ResultStruct[]; } { const requests = Array.from({ length }, (_, i) => ({ execProgramId: ethers.ZeroHash, @@ -95,7 +105,7 @@ export function generateDataFixtures(length: number): { exitCode: 0, result: ethers.keccak256(ethers.toUtf8Bytes('SUCCESS')), blockHeight: 0, - blockTimestamp: 0, + blockTimestamp: Math.floor(Date.now() / 1000) + 3600, gasUsed: 0, paybackAddress: ethers.ZeroAddress, sedaPayload: ethers.ZeroHash, @@ -104,14 +114,3 @@ export function generateDataFixtures(length: number): { return { requests, results }; } - -export function computeResultLeafHash(resultId: string): string { - return ethers.solidityPackedKeccak256(['bytes1', 'bytes32'], [RESULT_DOMAIN_SEPARATOR, ethers.getBytes(resultId)]); -} - -export function computeValidatorLeafHash(validator: string, votingPower: number): string { - return ethers.solidityPackedKeccak256( - ['bytes1', 'bytes', 'uint32'], - [SECP256K1_DOMAIN_SEPARATOR, validator, votingPower], - ); -} diff --git a/ts-types/index.ts b/ts-types/index.ts new file mode 100644 index 0000000..3b5584f --- /dev/null +++ b/ts-types/index.ts @@ -0,0 +1,5 @@ +import type { SedaDataTypes as CoreRequestTypes } from '../typechain-types/contracts/core/abstract/RequestHandlerBase'; +import type { SedaDataTypes as CoreResultTypes } from '../typechain-types/contracts/core/abstract/ResultHandlerBase'; +import type { SedaDataTypes as ProverDataTypes } from '../typechain-types/contracts/provers/abstract/ProverBase'; + +export type { ProverDataTypes, CoreRequestTypes, CoreResultTypes };