diff --git a/.gitignore b/.gitignore index 86d3bfa13c..5caa70b1b7 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ tmp_venv/* .vscode/settings.json /data /logs +/sequencer_data /target /.vscode # Git hooks diff --git a/Cargo.lock b/Cargo.lock index 050c36691c..9112e3d225 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10482,6 +10482,7 @@ dependencies = [ "starknet_monitoring_endpoint", "starknet_sequencer_infra", "starknet_sequencer_node", + "starknet_state_sync", "starknet_task_executor", "strum 0.25.0", "tempfile", @@ -10681,6 +10682,7 @@ dependencies = [ "starknet_sequencer_infra", "starknet_sequencer_node", "starknet_sierra_compile", + "starknet_state_sync", "starknet_state_sync_types", "thiserror", "tokio", diff --git a/config/sequencer/default_config.json b/config/sequencer/default_config.json index a18908b48a..d1bc13a527 100644 --- a/config/sequencer/default_config.json +++ b/config/sequencer/default_config.json @@ -949,6 +949,176 @@ "privacy": "Public", "value": "" }, + "state_sync_config.network_config.advertised_multiaddr": { + "description": "The external address other peers see this node. If this is set, the node will not try to find out which addresses it has and will write this address as external instead", + "privacy": "Public", + "value": "" + }, + "state_sync_config.network_config.advertised_multiaddr.#is_none": { + "description": "Flag for an optional field.", + "privacy": "TemporaryValue", + "value": true + }, + "state_sync_config.network_config.bootstrap_peer_multiaddr": { + "description": "The multiaddress of the peer node. It should include the peer's id. For more info: https://docs.libp2p.io/concepts/fundamentals/peers/", + "privacy": "Public", + "value": "" + }, + "state_sync_config.network_config.bootstrap_peer_multiaddr.#is_none": { + "description": "Flag for an optional field.", + "privacy": "TemporaryValue", + "value": true + }, + "state_sync_config.network_config.chain_id": { + "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", + "pointer_target": "chain_id", + "privacy": "Public" + }, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": { + "description": "The base delay in milliseconds for the exponential backoff strategy.", + "privacy": "Public", + "value": 2 + }, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.factor": { + "description": "The factor for the exponential backoff strategy.", + "privacy": "Public", + "value": 5 + }, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": { + "description": "The maximum delay in seconds for the exponential backoff strategy.", + "privacy": "Public", + "value": 5 + }, + "state_sync_config.network_config.discovery_config.heartbeat_interval": { + "description": "The interval between each discovery (Kademlia) query in milliseconds.", + "privacy": "Public", + "value": 100 + }, + "state_sync_config.network_config.idle_connection_timeout": { + "description": "Amount of time in seconds that a connection with no active sessions will stay alive.", + "privacy": "Public", + "value": 120 + }, + "state_sync_config.network_config.peer_manager_config.malicious_timeout_seconds": { + "description": "The duration in seconds a peer is blacklisted after being marked as malicious.", + "privacy": "Public", + "value": 31536000 + }, + "state_sync_config.network_config.peer_manager_config.unstable_timeout_millis": { + "description": "The duration in milliseconds a peer blacklisted after being reported as unstable.", + "privacy": "Public", + "value": 1000 + }, + "state_sync_config.network_config.quic_port": { + "description": "The port that the node listens on for incoming quic connections.", + "privacy": "Public", + "value": 10001 + }, + "state_sync_config.network_config.secret_key": { + "description": "The secret key used for building the peer id. If it's an empty string a random one will be used.", + "privacy": "Private", + "value": "" + }, + "state_sync_config.network_config.session_timeout": { + "description": "Maximal time in seconds that each session can take before failing on timeout.", + "privacy": "Public", + "value": 120 + }, + "state_sync_config.network_config.tcp_port": { + "description": "The port that the node listens on for incoming tcp connections.", + "privacy": "Public", + "value": 10000 + }, + "state_sync_config.p2p_sync_client_config.buffer_size": { + "description": "Size of the buffer for read from the storage and for incoming responses.", + "privacy": "Public", + "value": 100000 + }, + "state_sync_config.p2p_sync_client_config.num_block_classes_per_query": { + "description": "The maximum amount of block's classes to ask from peers in each iteration.", + "privacy": "Public", + "value": 100 + }, + "state_sync_config.p2p_sync_client_config.num_block_state_diffs_per_query": { + "description": "The maximum amount of block's state diffs to ask from peers in each iteration.", + "privacy": "Public", + "value": 100 + }, + "state_sync_config.p2p_sync_client_config.num_block_transactions_per_query": { + "description": "The maximum amount of blocks to ask their transactions from peers in each iteration.", + "privacy": "Public", + "value": 100 + }, + "state_sync_config.p2p_sync_client_config.num_headers_per_query": { + "description": "The maximum amount of headers to ask from peers in each iteration.", + "privacy": "Public", + "value": 10000 + }, + "state_sync_config.p2p_sync_client_config.stop_sync_at_block_number": { + "description": "Stops the sync at given block number and closes the node cleanly. Used to run profiling on the node.", + "privacy": "Public", + "value": 1000 + }, + "state_sync_config.p2p_sync_client_config.stop_sync_at_block_number.#is_none": { + "description": "Flag for an optional field.", + "privacy": "TemporaryValue", + "value": true + }, + "state_sync_config.p2p_sync_client_config.wait_period_for_new_data": { + "description": "Time in seconds to wait when a query returned with partial data before sending a new query", + "privacy": "Public", + "value": 5 + }, + "state_sync_config.storage_config.db_config.chain_id": { + "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", + "pointer_target": "chain_id", + "privacy": "Public" + }, + "state_sync_config.storage_config.db_config.enforce_file_exists": { + "description": "Whether to enforce that the path exists. If true, `open_env` fails when the mdbx.dat file does not exist.", + "privacy": "Public", + "value": false + }, + "state_sync_config.storage_config.db_config.growth_step": { + "description": "The growth step in bytes, must be greater than zero to allow the database to grow.", + "privacy": "Public", + "value": 4294967296 + }, + "state_sync_config.storage_config.db_config.max_size": { + "description": "The maximum size of the node's storage in bytes.", + "privacy": "Public", + "value": 1099511627776 + }, + "state_sync_config.storage_config.db_config.min_size": { + "description": "The minimum size of the node's storage in bytes.", + "privacy": "Public", + "value": 1048576 + }, + "state_sync_config.storage_config.db_config.path_prefix": { + "description": "Prefix of the path of the node's storage directory, the storage file path will be /. The path is not created automatically.", + "privacy": "Public", + "value": "./sequencer_data" + }, + "state_sync_config.storage_config.mmap_file_config.growth_step": { + "description": "The growth step in bytes, must be greater than max_object_size.", + "privacy": "Public", + "value": 1073741824 + }, + "state_sync_config.storage_config.mmap_file_config.max_object_size": { + "description": "The maximum size of a single object in the file in bytes", + "privacy": "Public", + "value": 268435456 + }, + "state_sync_config.storage_config.mmap_file_config.max_size": { + "description": "The maximum size of a memory mapped file in bytes. Must be greater than growth_step.", + "privacy": "Public", + "value": 1099511627776 + }, + "state_sync_config.storage_config.scope": { + "description": "The categories of data saved in storage.", + "privacy": "Public", + "value": "FullArchive" + }, "strk_fee_token_address": { "description": "A required param! Address of the STRK fee token.", "param_type": "String", diff --git a/crates/papyrus_storage/src/db/mod.rs b/crates/papyrus_storage/src/db/mod.rs index c1b5da78ac..e7785226c6 100644 --- a/crates/papyrus_storage/src/db/mod.rs +++ b/crates/papyrus_storage/src/db/mod.rs @@ -31,7 +31,7 @@ use std::sync::Arc; use libmdbx::{DatabaseFlags, Geometry, PageSize, WriteMap}; use papyrus_config::dumping::{ser_param, SerializeConfig}; -use papyrus_config::validators::{validate_ascii, validate_path_exists}; +use papyrus_config::validators::validate_ascii; use papyrus_config::{ParamPath, ParamPrivacyInput, SerializedParam}; use papyrus_proc_macros::latency_histogram; use serde::{Deserialize, Serialize}; @@ -57,7 +57,6 @@ type DbValueType<'env> = Cow<'env, [u8]>; pub struct DbConfig { /// The path prefix of the database files. The final path is the path prefix followed by the /// chain id. - #[validate(custom = "validate_path_exists")] pub path_prefix: PathBuf, /// The [chain id](https://docs.rs/starknet_api/latest/starknet_api/core/struct.ChainId.html) of the Starknet network. #[validate(custom = "validate_ascii")] diff --git a/crates/papyrus_storage/src/lib.rs b/crates/papyrus_storage/src/lib.rs index e932d0d0db..4ad5e357f6 100644 --- a/crates/papyrus_storage/src/lib.rs +++ b/crates/papyrus_storage/src/lib.rs @@ -102,6 +102,7 @@ pub mod test_utils; use std::collections::{BTreeMap, HashMap}; use std::fmt::Debug; +use std::fs; use std::sync::Arc; use body::events::EventIndex; @@ -128,7 +129,7 @@ use starknet_api::deprecated_contract_class::ContractClass as DeprecatedContract use starknet_api::state::{SierraContractClass, StateNumber, StorageKey, ThinStateDiff}; use starknet_api::transaction::{Transaction, TransactionHash, TransactionOutput}; use starknet_types_core::felt::Felt; -use tracing::{debug, warn}; +use tracing::{debug, info, warn}; use validator::Validate; use version::{StorageVersionError, Version}; @@ -162,6 +163,13 @@ pub const STORAGE_VERSION_BLOCKS: Version = Version { major: 4, minor: 0 }; pub fn open_storage( storage_config: StorageConfig, ) -> StorageResult<(StorageReader, StorageWriter)> { + if !storage_config.db_config.path_prefix.exists() + && !storage_config.db_config.enforce_file_exists + { + fs::create_dir_all(storage_config.db_config.path_prefix.clone())?; + info!("Created storage directory: {:?}", storage_config.db_config.path()); + } + let (db_reader, mut db_writer) = open_env(&storage_config.db_config)?; let tables = Arc::new(Tables { block_hash_to_number: db_writer.create_simple_table("block_hash_to_number")?, diff --git a/crates/starknet_integration_tests/Cargo.toml b/crates/starknet_integration_tests/Cargo.toml index 64c4cdf8e3..f4f9b475e3 100644 --- a/crates/starknet_integration_tests/Cargo.toml +++ b/crates/starknet_integration_tests/Cargo.toml @@ -36,6 +36,7 @@ starknet_mempool_p2p.workspace = true starknet_monitoring_endpoint = { workspace = true, features = ["testing"] } starknet_sequencer_infra = { workspace = true, features = ["testing"] } starknet_sequencer_node = { workspace = true, features = ["testing"] } +starknet_state_sync.workspace = true starknet_task_executor.workspace = true strum.workspace = true tempfile.workspace = true diff --git a/crates/starknet_integration_tests/src/config_utils.rs b/crates/starknet_integration_tests/src/config_utils.rs index 90b7471a67..27aa8526bf 100644 --- a/crates/starknet_integration_tests/src/config_utils.rs +++ b/crates/starknet_integration_tests/src/config_utils.rs @@ -64,6 +64,8 @@ pub(crate) fn dump_config_file_changes( config.http_server_config.ip, config.http_server_config.port, config.consensus_manager_config.consensus_config.start_height, + config.state_sync_config.storage_config.db_config.path_prefix, + config.state_sync_config.network_config.tcp_port, ); let node_config_path = dump_json_data(json_data, NODE_CONFIG_CHANGES_FILE_PATH, dir); assert!(node_config_path.exists(), "File does not exist: {:?}", node_config_path); diff --git a/crates/starknet_integration_tests/src/flow_test_setup.rs b/crates/starknet_integration_tests/src/flow_test_setup.rs index c9102f0150..ceed7643bf 100644 --- a/crates/starknet_integration_tests/src/flow_test_setup.rs +++ b/crates/starknet_integration_tests/src/flow_test_setup.rs @@ -81,6 +81,7 @@ pub struct SequencerSetup { // Handlers for the storage files, maintained so the files are not deleted. pub batcher_storage_file_handle: TempDir, pub rpc_storage_file_handle: TempDir, + pub state_sync_storage_file_handle: TempDir, // Handle of the sequencer node. pub sequencer_node_handle: JoinHandle>, @@ -112,6 +113,7 @@ impl SequencerSetup { chain_info, rpc_server_addr, storage_for_test.batcher_storage_config, + storage_for_test.state_sync_storage_config, consensus_manager_config, ) .await; @@ -137,6 +139,7 @@ impl SequencerSetup { add_tx_http_client, batcher_storage_file_handle: storage_for_test.batcher_storage_handle, rpc_storage_file_handle: storage_for_test.rpc_storage_handle, + state_sync_storage_file_handle: storage_for_test.state_sync_storage_handle, sequencer_node_handle, } } diff --git a/crates/starknet_integration_tests/src/integration_test_setup.rs b/crates/starknet_integration_tests/src/integration_test_setup.rs index 286d207fc0..b0a6f37db9 100644 --- a/crates/starknet_integration_tests/src/integration_test_setup.rs +++ b/crates/starknet_integration_tests/src/integration_test_setup.rs @@ -29,6 +29,8 @@ pub struct IntegrationTestSetup { pub node_config_path: PathBuf, // Storage reader for the batcher. pub batcher_storage_config: StorageConfig, + // Storage reader for the state sync. + pub state_sync_storage_config: StorageConfig, // Handlers for the storage and config files, maintained so the files are not deleted. Since // these are only maintained to avoid dropping the handlers, private visibility suffices, and // as such, the '#[allow(dead_code)]' attributes are used to suppress the warning. @@ -38,6 +40,8 @@ pub struct IntegrationTestSetup { rpc_storage_handle: TempDir, #[allow(dead_code)] node_config_dir_handle: TempDir, + #[allow(dead_code)] + state_sync_storage_handle: TempDir, } impl IntegrationTestSetup { @@ -61,6 +65,7 @@ impl IntegrationTestSetup { chain_info, rpc_server_addr, storage_for_test.batcher_storage_config, + storage_for_test.state_sync_storage_config, consensus_manager_configs.pop().unwrap(), ) .await; @@ -87,6 +92,8 @@ impl IntegrationTestSetup { rpc_storage_handle: storage_for_test.rpc_storage_handle, node_config_dir_handle, node_config_path, + state_sync_storage_handle: storage_for_test.state_sync_storage_handle, + state_sync_storage_config: config.state_sync_config.storage_config, } } } diff --git a/crates/starknet_integration_tests/src/state_reader.rs b/crates/starknet_integration_tests/src/state_reader.rs index a04340f2a4..7462991e16 100644 --- a/crates/starknet_integration_tests/src/state_reader.rs +++ b/crates/starknet_integration_tests/src/state_reader.rs @@ -52,10 +52,13 @@ type ContractClassesMap = (Vec<(ClassHash, DeprecatedContractClass)>, Vec<(ClassHash, CasmContractClass)>); pub struct StorageTestSetup { + // TODO(Shahak): Remove rpc storage reader and handle pub rpc_storage_reader: StorageReader, pub rpc_storage_handle: TempDir, pub batcher_storage_config: StorageConfig, pub batcher_storage_handle: TempDir, + pub state_sync_storage_config: StorageConfig, + pub state_sync_storage_handle: TempDir, } impl StorageTestSetup { @@ -68,12 +71,23 @@ impl StorageTestSetup { .scope(StorageScope::StateOnly) .chain_id(chain_info.chain_id.clone()) .build(); - create_test_state(&mut batcher_storage_writer, chain_info, test_defined_accounts); + create_test_state(&mut batcher_storage_writer, chain_info, test_defined_accounts.clone()); + let ( + (_, mut state_sync_storage_writer), + state_sync_storage_config, + state_sync_storage_handle, + ) = TestStorageBuilder::default() + .scope(StorageScope::FullArchive) + .chain_id(chain_info.chain_id.clone()) + .build(); + create_test_state(&mut state_sync_storage_writer, chain_info, test_defined_accounts); Self { rpc_storage_reader, rpc_storage_handle: rpc_storage_file_handle, batcher_storage_config, batcher_storage_handle: batcher_storage_file_handle, + state_sync_storage_config, + state_sync_storage_handle, } } } diff --git a/crates/starknet_integration_tests/src/utils.rs b/crates/starknet_integration_tests/src/utils.rs index d1b7b4141f..7b13131791 100644 --- a/crates/starknet_integration_tests/src/utils.rs +++ b/crates/starknet_integration_tests/src/utils.rs @@ -9,6 +9,7 @@ use mempool_test_utils::starknet_api_test_utils::{AccountId, MultiAccountTransac use papyrus_consensus::config::ConsensusConfig; use papyrus_network::network_manager::test_utils::create_network_configs_connected_to_broadcast_channels; use papyrus_network::network_manager::BroadcastTopicChannels; +use papyrus_network::NetworkConfig; use papyrus_protobuf::consensus::{ProposalPart, StreamMessage}; use papyrus_storage::StorageConfig; use starknet_api::block::BlockNumber; @@ -27,6 +28,7 @@ use starknet_http_server::config::HttpServerConfig; use starknet_sequencer_infra::test_utils::get_available_socket; use starknet_sequencer_node::config::node_config::SequencerNodeConfig; use starknet_sequencer_node::config::test_utils::RequiredParams; +use starknet_state_sync::config::StateSyncConfig; pub fn create_chain_info() -> ChainInfo { let mut chain_info = ChainInfo::create_for_testing(); @@ -42,6 +44,7 @@ pub async fn create_config( chain_info: ChainInfo, rpc_server_addr: SocketAddr, batcher_storage_config: StorageConfig, + state_sync_storage_config: StorageConfig, consensus_manager_config: ConsensusManagerConfig, ) -> (SequencerNodeConfig, RequiredParams) { let fee_token_addresses = chain_info.fee_token_addresses.clone(); @@ -49,6 +52,7 @@ pub async fn create_config( let gateway_config = create_gateway_config(chain_info.clone()).await; let http_server_config = create_http_server_config().await; let rpc_state_reader_config = test_rpc_state_reader_config(rpc_server_addr); + let state_sync_config = create_state_sync_config(state_sync_storage_config); ( SequencerNodeConfig { @@ -57,6 +61,7 @@ pub async fn create_config( gateway_config, http_server_config, rpc_state_reader_config, + state_sync_config, ..SequencerNodeConfig::default() }, RequiredParams { @@ -231,3 +236,15 @@ pub fn create_batcher_config( pub fn run_integration_test() -> bool { std::env::var("SEQUENCER_INTEGRATION_TESTS").is_ok() } + +pub fn create_state_sync_config(state_sync_storage_config: StorageConfig) -> StateSyncConfig { + const STATE_SYNC_NETWORK_CONFIG_TCP_PORT_FOR_TESTING: u16 = 12345; + StateSyncConfig { + storage_config: state_sync_storage_config, + network_config: NetworkConfig { + tcp_port: STATE_SYNC_NETWORK_CONFIG_TCP_PORT_FOR_TESTING, + ..Default::default() + }, + ..Default::default() + } +} diff --git a/crates/starknet_integration_tests/tests/mempool_p2p_flow_test.rs b/crates/starknet_integration_tests/tests/mempool_p2p_flow_test.rs index cd60ca05b8..dda26ab221 100644 --- a/crates/starknet_integration_tests/tests/mempool_p2p_flow_test.rs +++ b/crates/starknet_integration_tests/tests/mempool_p2p_flow_test.rs @@ -17,6 +17,7 @@ use starknet_integration_tests::utils::{ create_gateway_config, create_http_server_config, create_integration_test_tx_generator, + create_state_sync_config, run_integration_test_scenario, test_rpc_state_reader_config, }; @@ -76,6 +77,7 @@ async fn test_mempool_sends_tx_to_other_peer(mut tx_generator: MultiAccountTrans let gateway_config = create_gateway_config(chain_info).await; let http_server_config = create_http_server_config().await; let rpc_state_reader_config = test_rpc_state_reader_config(rpc_server_addr); + let state_sync_config = create_state_sync_config(storage_for_test.state_sync_storage_config); let (mut network_configs, mut broadcast_channels) = create_network_configs_connected_to_broadcast_channels::( 1, @@ -90,6 +92,7 @@ async fn test_mempool_sends_tx_to_other_peer(mut tx_generator: MultiAccountTrans http_server_config, rpc_state_reader_config, mempool_p2p_config, + state_sync_config, ..SequencerNodeConfig::default() }; diff --git a/crates/starknet_sequencer_node/Cargo.toml b/crates/starknet_sequencer_node/Cargo.toml index 29aa72a10a..b85f1df975 100644 --- a/crates/starknet_sequencer_node/Cargo.toml +++ b/crates/starknet_sequencer_node/Cargo.toml @@ -35,6 +35,7 @@ starknet_mempool_types.workspace = true starknet_monitoring_endpoint.workspace = true starknet_sequencer_infra.workspace = true starknet_sierra_compile.workspace = true +starknet_state_sync.workspace = true starknet_state_sync_types.workspace = true thiserror = { workspace = true, optional = true } tokio.workspace = true diff --git a/crates/starknet_sequencer_node/src/components.rs b/crates/starknet_sequencer_node/src/components.rs index 1a3f76a870..8c7d1d8441 100644 --- a/crates/starknet_sequencer_node/src/components.rs +++ b/crates/starknet_sequencer_node/src/components.rs @@ -12,6 +12,8 @@ use starknet_monitoring_endpoint::monitoring_endpoint::{ create_monitoring_endpoint, MonitoringEndpoint, }; +use starknet_state_sync::runner::StateSyncRunner; +use starknet_state_sync::{create_state_sync_and_runner, StateSync}; use starknet_state_sync_types::communication::EmptyStateSyncClient; use crate::clients::SequencerNodeClients; @@ -28,6 +30,8 @@ pub struct SequencerNodeComponents { pub monitoring_endpoint: Option, pub mempool_p2p_propagator: Option, pub mempool_p2p_runner: Option, + pub state_sync: Option, + pub state_sync_runner: Option, } pub fn create_node_components( @@ -122,6 +126,18 @@ pub fn create_node_components( ReactiveComponentExecutionMode::Disabled | ReactiveComponentExecutionMode::Remote => None, }; + let (state_sync, state_sync_runner) = match config.components.state_sync.execution_mode { + ReactiveComponentExecutionMode::LocalExecutionWithRemoteDisabled + | ReactiveComponentExecutionMode::LocalExecutionWithRemoteEnabled => { + let (state_sync, state_sync_runner) = + create_state_sync_and_runner(config.state_sync_config.clone()); + (Some(state_sync), Some(state_sync_runner)) + } + ReactiveComponentExecutionMode::Disabled | ReactiveComponentExecutionMode::Remote => { + (None, None) + } + }; + SequencerNodeComponents { batcher, consensus_manager, @@ -131,5 +147,7 @@ pub fn create_node_components( monitoring_endpoint, mempool_p2p_propagator, mempool_p2p_runner, + state_sync, + state_sync_runner, } } diff --git a/crates/starknet_sequencer_node/src/config/node_config.rs b/crates/starknet_sequencer_node/src/config/node_config.rs index e0b5112b4f..a0541f2776 100644 --- a/crates/starknet_sequencer_node/src/config/node_config.rs +++ b/crates/starknet_sequencer_node/src/config/node_config.rs @@ -26,6 +26,7 @@ use starknet_http_server::config::HttpServerConfig; use starknet_mempool_p2p::config::MempoolP2pConfig; use starknet_monitoring_endpoint::config::MonitoringEndpointConfig; use starknet_sierra_compile::config::SierraToCasmCompilationConfig; +use starknet_state_sync::config::StateSyncConfig; use validator::Validate; use crate::config::component_config::ComponentConfig; @@ -50,6 +51,8 @@ pub static CONFIG_POINTERS: LazyLock = LazyLock::new(|| { "consensus_manager_config.consensus_config.network_config.chain_id", "gateway_config.chain_info.chain_id", "mempool_p2p_config.network_config.chain_id", + "state_sync_config.storage_config.db_config.chain_id", + "state_sync_config.network_config.chain_id", ]), ), ( @@ -114,6 +117,8 @@ pub struct SequencerNodeConfig { pub mempool_p2p_config: MempoolP2pConfig, #[validate] pub monitoring_endpoint_config: MonitoringEndpointConfig, + #[validate] + pub state_sync_config: StateSyncConfig, } impl SerializeConfig for SequencerNodeConfig { @@ -134,6 +139,7 @@ impl SerializeConfig for SequencerNodeConfig { self.monitoring_endpoint_config.dump(), "monitoring_endpoint_config", ), + append_sub_config_name(self.state_sync_config.dump(), "state_sync_config"), ]; sub_configs.into_iter().flatten().collect() diff --git a/crates/starknet_state_sync/src/config.rs b/crates/starknet_state_sync/src/config.rs index 7a7061fb25..cb0a371acc 100644 --- a/crates/starknet_state_sync/src/config.rs +++ b/crates/starknet_state_sync/src/config.rs @@ -1,14 +1,16 @@ use std::collections::BTreeMap; +use std::path::PathBuf; use papyrus_config::dumping::{append_sub_config_name, SerializeConfig}; use papyrus_config::{ParamPath, SerializedParam}; use papyrus_network::NetworkConfig; use papyrus_p2p_sync::client::P2PSyncClientConfig; +use papyrus_storage::db::DbConfig; use papyrus_storage::StorageConfig; use serde::{Deserialize, Serialize}; use validator::Validate; -#[derive(Debug, Default, Deserialize, Serialize, Clone, PartialEq, Validate)] +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Validate)] pub struct StateSyncConfig { #[validate] pub storage_config: StorageConfig, @@ -30,3 +32,19 @@ impl SerializeConfig for StateSyncConfig { .collect() } } + +impl Default for StateSyncConfig { + fn default() -> Self { + Self { + storage_config: StorageConfig { + db_config: DbConfig { + path_prefix: PathBuf::from("./sequencer_data"), + ..Default::default() + }, + ..Default::default() + }, + p2p_sync_client_config: Default::default(), + network_config: Default::default(), + } + } +}