diff --git a/nativelink-config/src/cas_server.rs b/nativelink-config/src/cas_server.rs index 60576696d..40574379c 100644 --- a/nativelink-config/src/cas_server.rs +++ b/nativelink-config/src/cas_server.rs @@ -32,15 +32,15 @@ pub type SchedulerRefName = String; /// Used when the config references `instance_name` in the protocol. pub type InstanceName = String; -#[allow(non_camel_case_types)] #[derive(Deserialize, Debug, Default, Clone, Copy)] +#[serde(rename_all = "snake_case")] pub enum HttpCompressionAlgorithm { /// No compression. #[default] - none, + None, /// Zlib compression. - gzip, + Gzip, } /// Note: Compressing data in the cloud rarely has a benefit, since most @@ -360,11 +360,11 @@ pub struct HttpServerConfig { pub experimental_http2_max_header_list_size: Option, } -#[allow(non_camel_case_types)] #[derive(Deserialize, Debug)] +#[serde(rename_all = "snake_case")] pub enum ListenerConfig { /// Listener for HTTP/HTTPS/HTTP2 sockets. - http(HttpListener), + Http(HttpListener), } #[derive(Deserialize, Debug)] @@ -408,18 +408,18 @@ pub struct ServerConfig { pub services: Option, } -#[allow(non_camel_case_types)] #[derive(Deserialize, Debug)] +#[serde(rename_all = "snake_case")] pub enum WorkerProperty { /// List of static values. /// Note: Generally there should only ever be 1 value, but if the platform /// property key is PropertyType::Priority it may have more than one value. #[serde(deserialize_with = "convert_vec_string_with_shellexpand")] - values(Vec), + Values(Vec), /// A dynamic configuration. The string will be executed as a command /// (not sell) and will be split by "\n" (new line character). - query_cmd(String), + QueryCmd(String), } /// Generic config for an endpoint and associated configs. @@ -438,35 +438,35 @@ pub struct EndpointConfig { pub tls_config: Option, } -#[allow(non_camel_case_types)] #[derive(Copy, Clone, Deserialize, Debug, Default)] +#[serde(rename_all = "snake_case")] pub enum UploadCacheResultsStrategy { /// Only upload action results with an exit code of 0. #[default] - success_only, + SuccessOnly, /// Don't upload any action results. - never, + Never, /// Upload all action results that complete. - everything, + Everything, /// Only upload action results that fail. - failures_only, + FailuresOnly, } -#[allow(non_camel_case_types)] #[derive(Clone, Deserialize, Debug)] +#[serde(rename_all = "snake_case")] pub enum EnvironmentSource { /// The name of the platform property in the action to get the value from. - property(String), + Property(String), /// The raw value to set. - value(#[serde(deserialize_with = "convert_string_with_shellexpand")] String), + Value(#[serde(deserialize_with = "convert_string_with_shellexpand")] String), /// The max amount of time in milliseconds the command is allowed to run /// (requested by the client). - timeout_millis, + TimeoutMillis, /// A special file path will be provided that can be used to comminicate /// with the parent process about out-of-band information. This file @@ -484,7 +484,7 @@ pub enum EnvironmentSource { /// /// All fields are optional, file does not need to be created and may be /// empty. - side_channel_file, + SideChannelFile, /// A "root" directory for the action. This directory can be used to /// store temporary files that are not needed after the action has @@ -499,7 +499,7 @@ pub enum EnvironmentSource { /// variable, `mkdir $ENV_VAR_NAME/tmp` and `export TMPDIR=$ENV_VAR_NAME/tmp`. /// Another example might be to bind-mount the `/tmp` path in a container to /// this path in `entrypoint`. - action_directory, + ActionDirectory, } #[derive(Deserialize, Debug, Default)] @@ -653,11 +653,11 @@ pub struct LocalWorkerConfig { pub additional_environment: Option>, } -#[allow(non_camel_case_types)] #[derive(Deserialize, Debug)] +#[serde(rename_all = "snake_case")] pub enum WorkerConfig { /// A worker type that executes jobs locally on this machine. - local(LocalWorkerConfig), + Local(LocalWorkerConfig), } #[derive(Deserialize, Debug, Clone, Copy)] diff --git a/nativelink-config/src/schedulers.rs b/nativelink-config/src/schedulers.rs index 062fb9c98..d33002190 100644 --- a/nativelink-config/src/schedulers.rs +++ b/nativelink-config/src/schedulers.rs @@ -19,50 +19,50 @@ use serde::Deserialize; use crate::serde_utils::{convert_duration_with_shellexpand, convert_numeric_with_shellexpand}; use crate::stores::{GrpcEndpoint, Retry, StoreRefName}; -#[allow(non_camel_case_types)] #[derive(Deserialize, Debug)] +#[serde(rename_all = "snake_case")] pub enum SchedulerConfig { - simple(SimpleScheduler), - grpc(GrpcScheduler), - cache_lookup(CacheLookupScheduler), - property_modifier(PropertyModifierScheduler), + Simple(SimpleScheduler), + Grpc(GrpcScheduler), + CacheLookup(CacheLookupScheduler), + PropertyModifier(PropertyModifierScheduler), } /// When the scheduler matches tasks to workers that are capable of running /// the task, this value will be used to determine how the property is treated. -#[allow(non_camel_case_types)] #[derive(Deserialize, Debug, Clone, Copy, Hash, Eq, PartialEq)] +#[serde(rename_all = "snake_case")] pub enum PropertyType { /// Requires the platform property to be a u64 and when the scheduler looks /// for appropriate worker nodes that are capable of executing the task, /// the task will not run on a node that has less than this value. - minimum, + Minimum, /// Requires the platform property to be a string and when the scheduler /// looks for appropriate worker nodes that are capable of executing the /// task, the task will not run on a node that does not have this property /// set to the value with exact string match. - exact, + Exact, /// Does not restrict on this value and instead will be passed to the worker /// as an informational piece. /// TODO(allada) In the future this will be used by the scheduler and worker /// to cause the scheduler to prefer certain workers over others, but not /// restrict them based on these values. - priority, + Priority, } /// When a worker is being searched for to run a job, this will be used /// on how to choose which worker should run the job when multiple /// workers are able to run the task. -#[allow(non_camel_case_types)] #[derive(Copy, Clone, Deserialize, Debug, Default)] +#[serde(rename_all = "snake_case")] pub enum WorkerAllocationStrategy { /// Prefer workers that have been least recently used to run a job. #[default] - least_recently_used, + LeastRecentlyUsed, /// Prefer workers that have been most recently used to run a job. - most_recently_used, + MostRecentlyUsed, } #[derive(Deserialize, Debug, Default)] @@ -168,13 +168,13 @@ pub struct PlatformPropertyAddition { pub value: String, } -#[allow(non_camel_case_types)] #[derive(Deserialize, Debug, Clone)] +#[serde(rename_all = "snake_case")] pub enum PropertyModification { /// Add a property to the action properties. - add(PlatformPropertyAddition), + Add(PlatformPropertyAddition), /// Remove a named property from the action. - remove(String), + Remove(String), } #[derive(Deserialize, Debug)] diff --git a/nativelink-config/src/stores.rs b/nativelink-config/src/stores.rs index fd4bcfb2f..d4a9e6af6 100644 --- a/nativelink-config/src/stores.rs +++ b/nativelink-config/src/stores.rs @@ -24,20 +24,20 @@ use crate::serde_utils::{ /// in the `CasConfig::stores`'s map key. pub type StoreRefName = String; -#[allow(non_camel_case_types)] #[derive(Serialize, Deserialize, Debug, Clone, Copy)] +#[serde(rename_all = "snake_case")] pub enum ConfigDigestHashFunction { /// Use the sha256 hash function. /// - sha256, + Sha256, /// Use the blake3 hash function. /// - blake3, + Blake3, } -#[allow(non_camel_case_types)] #[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "snake_case")] pub enum StoreConfig { /// Memory store will store all data in a hashmap in memory. /// @@ -52,7 +52,7 @@ pub enum StoreConfig { /// } /// ``` /// - memory(MemoryStore), + Memory(MemoryStore), /// S3 store will use Amazon's S3 service as a backend to store /// the files. This configuration can be used to share files @@ -76,7 +76,7 @@ pub enum StoreConfig { /// } /// ``` /// - experimental_s3_store(S3Store), + ExperimentalS3Store(S3Store), /// Verify store is used to apply verifications to an underlying /// store implementation. It is strongly encouraged to validate @@ -100,7 +100,7 @@ pub enum StoreConfig { /// } /// ``` /// - verify(Box), + Verify(Box), /// Completeness checking store verifies if the /// output files & folders exist in the CAS before forwarding @@ -128,7 +128,7 @@ pub enum StoreConfig { /// } /// ``` /// - completeness_checking(Box), + CompletenessChecking(Box), /// A compression store that will compress the data inbound and /// outbound. There will be a non-trivial cost to compress and @@ -156,7 +156,7 @@ pub enum StoreConfig { /// } /// ``` /// - compression(Box), + Compression(Box), /// A dedup store will take the inputs and run a rolling hash /// algorithm on them to slice the input into smaller parts then @@ -221,7 +221,7 @@ pub enum StoreConfig { /// } /// ``` /// - dedup(Box), + Dedup(Box), /// Existence store will wrap around another store and cache calls /// to has so that subsequent has_with_results calls will be @@ -248,7 +248,7 @@ pub enum StoreConfig { /// } /// ``` /// - existence_cache(Box), + ExistenceCache(Box), /// FastSlow store will first try to fetch the data from the `fast` /// store and then if it does not exist try the `slow` store. @@ -291,7 +291,7 @@ pub enum StoreConfig { /// } /// ``` /// - fast_slow(Box), + FastSlow(Box), /// Shards the data to multiple stores. This is useful for cases /// when you want to distribute the load across multiple stores. @@ -313,7 +313,7 @@ pub enum StoreConfig { /// } /// ``` /// - shard(ShardStore), + Shard(ShardStore), /// Stores the data on the filesystem. This store is designed for /// local persistent storage. Restarts of this program should restore @@ -334,7 +334,7 @@ pub enum StoreConfig { /// } /// ``` /// - filesystem(FilesystemStore), + Filesystem(FilesystemStore), /// Store used to reference a store in the root store manager. /// This is useful for cases when you want to share a store in different @@ -349,7 +349,8 @@ pub enum StoreConfig { /// } /// ``` /// - ref_store(RefStore), + #[serde(rename = "ref_store")] + Ref(RefStore), /// Uses the size field of the digest to separate which store to send the /// data. This is useful for cases when you'd like to put small objects @@ -377,7 +378,7 @@ pub enum StoreConfig { /// } /// ``` /// - size_partitioning(Box), + SizePartitioning(Box), /// This store will pass-through calls to another GRPC store. This store /// is not designed to be used as a sub-store of another store, but it @@ -400,7 +401,7 @@ pub enum StoreConfig { /// } /// ``` /// - grpc(GrpcStore), + Grpc(GrpcStore), /// Stores data in any stores compatible with Redis APIs. /// @@ -417,7 +418,8 @@ pub enum StoreConfig { /// } /// ``` /// - redis_store(RedisStore), + #[serde(rename = "redis_store")] + Redis(RedisStore), /// Noop store is a store that sends streams into the void and all data /// retrieval will return 404 (NotFound). This can be useful for cases @@ -429,7 +431,7 @@ pub enum StoreConfig { /// "noop": {} /// ``` /// - noop, + Noop, } /// Configuration for an individual shard of the store. @@ -666,8 +668,8 @@ pub struct Lz4Config { pub max_decode_block_size: u32, } -#[allow(non_camel_case_types)] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "snake_case")] pub enum CompressionAlgorithm { /// LZ4 compression algorithm is extremely fast for compression and /// decompression, however does not perform very well in compression @@ -676,7 +678,7 @@ pub enum CompressionAlgorithm { /// compressible. /// /// see: - lz4(Lz4Config), + Lz4(Lz4Config), } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -787,13 +789,13 @@ pub struct S3Store { pub disable_http2: bool, } -#[allow(non_camel_case_types)] #[derive(Serialize, Deserialize, Debug, Clone, Copy)] +#[serde(rename_all = "lowercase")] pub enum StoreType { /// The store is content addressable storage. - cas, + Cas, /// The store is an action cache. - ac, + Ac, } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/nativelink-scheduler/src/api_worker_scheduler.rs b/nativelink-scheduler/src/api_worker_scheduler.rs index 1ecd6ac82..8bfec57a4 100644 --- a/nativelink-scheduler/src/api_worker_scheduler.rs +++ b/nativelink-scheduler/src/api_worker_scheduler.rs @@ -162,11 +162,11 @@ impl ApiWorkerSchedulerImpl { let mut workers_iter = self.workers.iter(); let workers_iter = match self.allocation_strategy { // Use rfind to get the least recently used that satisfies the properties. - WorkerAllocationStrategy::least_recently_used => workers_iter.rfind(|(_, w)| { + WorkerAllocationStrategy::LeastRecentlyUsed => workers_iter.rfind(|(_, w)| { w.can_accept_work() && platform_properties.is_satisfied_by(&w.platform_properties) }), // Use find to get the most recently used that satisfies the properties. - WorkerAllocationStrategy::most_recently_used => workers_iter.find(|(_, w)| { + WorkerAllocationStrategy::MostRecentlyUsed => workers_iter.find(|(_, w)| { w.can_accept_work() && platform_properties.is_satisfied_by(&w.platform_properties) }), }; diff --git a/nativelink-scheduler/src/default_scheduler_factory.rs b/nativelink-scheduler/src/default_scheduler_factory.rs index a01c4dccd..d6a8bf124 100644 --- a/nativelink-scheduler/src/default_scheduler_factory.rs +++ b/nativelink-scheduler/src/default_scheduler_factory.rs @@ -42,12 +42,12 @@ fn inner_scheduler_factory( store_manager: &StoreManager, ) -> Result { let scheduler: SchedulerFactoryResults = match scheduler_type_cfg { - SchedulerConfig::simple(config) => { + SchedulerConfig::Simple(config) => { let (action_scheduler, worker_scheduler) = SimpleScheduler::new(config); (Some(action_scheduler), Some(worker_scheduler)) } - SchedulerConfig::grpc(config) => (Some(Arc::new(GrpcScheduler::new(config)?)), None), - SchedulerConfig::cache_lookup(config) => { + SchedulerConfig::Grpc(config) => (Some(Arc::new(GrpcScheduler::new(config)?)), None), + SchedulerConfig::CacheLookup(config) => { let ac_store = store_manager .get_store(&config.ac_store) .err_tip(|| format!("'ac_store': '{}' does not exist", config.ac_store))?; @@ -60,7 +60,7 @@ fn inner_scheduler_factory( )?); (Some(cache_lookup_scheduler), worker_scheduler) } - SchedulerConfig::property_modifier(config) => { + SchedulerConfig::PropertyModifier(config) => { let (action_scheduler, worker_scheduler) = inner_scheduler_factory(&config.scheduler, store_manager) .err_tip(|| "In nested PropertyModifierScheduler construction")?; diff --git a/nativelink-scheduler/src/platform_property_manager.rs b/nativelink-scheduler/src/platform_property_manager.rs index 100889def..4dc9bc27e 100644 --- a/nativelink-scheduler/src/platform_property_manager.rs +++ b/nativelink-scheduler/src/platform_property_manager.rs @@ -77,7 +77,7 @@ impl PlatformPropertyManager { pub fn make_prop_value(&self, key: &str, value: &str) -> Result { if let Some(prop_type) = self.known_properties.get(key) { return match prop_type { - PropertyType::minimum => Ok(PlatformPropertyValue::Minimum( + PropertyType::Minimum => Ok(PlatformPropertyValue::Minimum( value.parse::().err_tip_with_code(|e| { ( Code::InvalidArgument, @@ -85,8 +85,8 @@ impl PlatformPropertyManager { ) })?, )), - PropertyType::exact => Ok(PlatformPropertyValue::Exact(value.to_string())), - PropertyType::priority => Ok(PlatformPropertyValue::Priority(value.to_string())), + PropertyType::Exact => Ok(PlatformPropertyValue::Exact(value.to_string())), + PropertyType::Priority => Ok(PlatformPropertyValue::Priority(value.to_string())), }; } Err(make_input_err!("Unknown platform property '{}'", key)) diff --git a/nativelink-scheduler/src/property_modifier_scheduler.rs b/nativelink-scheduler/src/property_modifier_scheduler.rs index 49044cb58..120779c15 100644 --- a/nativelink-scheduler/src/property_modifier_scheduler.rs +++ b/nativelink-scheduler/src/property_modifier_scheduler.rs @@ -65,10 +65,10 @@ impl PropertyModifierScheduler { ); for modification in &self.modifications { match modification { - PropertyModification::remove(name) => { + PropertyModification::Remove(name) => { known_properties.insert(name.clone()); } - PropertyModification::add(_) => (), + PropertyModification::Add(_) => (), } } let final_known_properties: Vec = known_properties.into_iter().collect(); @@ -87,10 +87,10 @@ impl PropertyModifierScheduler { let action_info_mut = Arc::make_mut(&mut action_info); for modification in &self.modifications { match modification { - PropertyModification::add(addition) => action_info_mut + PropertyModification::Add(addition) => action_info_mut .platform_properties .insert(addition.name.clone(), addition.value.clone()), - PropertyModification::remove(name) => { + PropertyModification::Remove(name) => { action_info_mut.platform_properties.remove(name) } }; diff --git a/nativelink-scheduler/tests/property_modifier_scheduler_test.rs b/nativelink-scheduler/tests/property_modifier_scheduler_test.rs index 5f8d6132f..b9e08f51a 100644 --- a/nativelink-scheduler/tests/property_modifier_scheduler_test.rs +++ b/nativelink-scheduler/tests/property_modifier_scheduler_test.rs @@ -44,7 +44,7 @@ fn make_modifier_scheduler(modifications: Vec) -> TestCont let mock_scheduler = Arc::new(MockActionScheduler::new()); let config = nativelink_config::schedulers::PropertyModifierScheduler { modifications, - scheduler: Box::new(nativelink_config::schedulers::SchedulerConfig::simple( + scheduler: Box::new(nativelink_config::schedulers::SchedulerConfig::Simple( nativelink_config::schedulers::SimpleScheduler::default(), )), }; @@ -60,7 +60,7 @@ async fn add_action_adds_property() -> Result<(), Error> { let name = "name".to_string(); let value = "value".to_string(); let context = - make_modifier_scheduler(vec![PropertyModification::add(PlatformPropertyAddition { + make_modifier_scheduler(vec![PropertyModification::Add(PlatformPropertyAddition { name: name.clone(), value: value.clone(), })]); @@ -98,7 +98,7 @@ async fn add_action_overwrites_property() -> Result<(), Error> { let original_value = "value".to_string(); let replaced_value = "replaced".to_string(); let context = - make_modifier_scheduler(vec![PropertyModification::add(PlatformPropertyAddition { + make_modifier_scheduler(vec![PropertyModification::Add(PlatformPropertyAddition { name: name.clone(), value: replaced_value.clone(), })]); @@ -141,8 +141,8 @@ async fn add_action_property_added_after_remove() -> Result<(), Error> { let name = "name".to_string(); let value = "value".to_string(); let context = make_modifier_scheduler(vec![ - PropertyModification::remove(name.clone()), - PropertyModification::add(PlatformPropertyAddition { + PropertyModification::Remove(name.clone()), + PropertyModification::Add(PlatformPropertyAddition { name: name.clone(), value: value.clone(), }), @@ -180,11 +180,11 @@ async fn add_action_property_remove_after_add() -> Result<(), Error> { let name = "name".to_string(); let value = "value".to_string(); let context = make_modifier_scheduler(vec![ - PropertyModification::add(PlatformPropertyAddition { + PropertyModification::Add(PlatformPropertyAddition { name: name.clone(), value: value.clone(), }), - PropertyModification::remove(name.clone()), + PropertyModification::Remove(name.clone()), ]); let action_info = make_base_action_info(UNIX_EPOCH, DigestInfo::zero_digest()); let (_forward_watch_channel_tx, forward_watch_channel_rx) = @@ -219,7 +219,7 @@ async fn add_action_property_remove_after_add() -> Result<(), Error> { async fn add_action_property_remove() -> Result<(), Error> { let name = "name".to_string(); let value = "value".to_string(); - let context = make_modifier_scheduler(vec![PropertyModification::remove(name.clone())]); + let context = make_modifier_scheduler(vec![PropertyModification::Remove(name.clone())]); let mut action_info = make_base_action_info(UNIX_EPOCH, DigestInfo::zero_digest()) .as_ref() .clone(); @@ -279,7 +279,7 @@ async fn find_by_client_operation_id_call_passed() -> Result<(), Error> { #[nativelink_test] async fn remove_adds_to_underlying_manager() -> Result<(), Error> { let name = "name".to_string(); - let context = make_modifier_scheduler(vec![PropertyModification::remove(name.clone())]); + let context = make_modifier_scheduler(vec![PropertyModification::Remove(name.clone())]); let known_properties = Vec::new(); let instance_name_fut = context .mock_scheduler @@ -296,7 +296,7 @@ async fn remove_adds_to_underlying_manager() -> Result<(), Error> { #[nativelink_test] async fn remove_retains_type_in_underlying_manager() -> Result<(), Error> { let name = "name".to_string(); - let context = make_modifier_scheduler(vec![PropertyModification::remove(name.clone())]); + let context = make_modifier_scheduler(vec![PropertyModification::Remove(name.clone())]); let known_properties = vec![name.clone()]; let instance_name_fut = context .mock_scheduler diff --git a/nativelink-scheduler/tests/simple_scheduler_test.rs b/nativelink-scheduler/tests/simple_scheduler_test.rs index 6ab49fb40..e143c91b6 100644 --- a/nativelink-scheduler/tests/simple_scheduler_test.rs +++ b/nativelink-scheduler/tests/simple_scheduler_test.rs @@ -516,7 +516,7 @@ async fn worker_should_not_queue_if_properties_dont_match_test() -> Result<(), E let worker_id2: WorkerId = WorkerId(Uuid::new_v4()); let mut prop_defs = HashMap::new(); - prop_defs.insert("prop".to_string(), PropertyType::exact); + prop_defs.insert("prop".to_string(), PropertyType::Exact); let (scheduler, _worker_scheduler) = SimpleScheduler::new_with_callback( &nativelink_config::schedulers::SimpleScheduler { supported_platform_properties: Some(prop_defs), @@ -1269,7 +1269,7 @@ async fn run_two_jobs_on_same_worker_with_platform_properties_restrictions() -> let worker_id: WorkerId = WorkerId(Uuid::new_v4()); let mut supported_props = HashMap::new(); - supported_props.insert("prop1".to_string(), PropertyType::minimum); + supported_props.insert("prop1".to_string(), PropertyType::Minimum); let (scheduler, _worker_scheduler) = SimpleScheduler::new_with_callback( &nativelink_config::schedulers::SimpleScheduler { supported_platform_properties: Some(supported_props), @@ -1420,7 +1420,7 @@ async fn run_jobs_in_the_order_they_were_queued() -> Result<(), Error> { let worker_id: WorkerId = WorkerId(Uuid::new_v4()); let mut supported_props = HashMap::new(); - supported_props.insert("prop1".to_string(), PropertyType::minimum); + supported_props.insert("prop1".to_string(), PropertyType::Minimum); let (scheduler, _worker_scheduler) = SimpleScheduler::new_with_callback( &nativelink_config::schedulers::SimpleScheduler { supported_platform_properties: Some(supported_props), diff --git a/nativelink-service/tests/ac_server_test.rs b/nativelink-service/tests/ac_server_test.rs index ba1362a88..f9284c8de 100644 --- a/nativelink-service/tests/ac_server_test.rs +++ b/nativelink-service/tests/ac_server_test.rs @@ -55,7 +55,7 @@ async fn make_store_manager() -> Result, Error> { store_manager.add_store( "main_cas", store_factory( - &nativelink_config::stores::StoreConfig::memory( + &nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), &store_manager, @@ -66,7 +66,7 @@ async fn make_store_manager() -> Result, Error> { store_manager.add_store( "main_ac", store_factory( - &nativelink_config::stores::StoreConfig::memory( + &nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), &store_manager, diff --git a/nativelink-service/tests/bep_server_test.rs b/nativelink-service/tests/bep_server_test.rs index 2770e7e86..5b2251ed4 100644 --- a/nativelink-service/tests/bep_server_test.rs +++ b/nativelink-service/tests/bep_server_test.rs @@ -53,7 +53,7 @@ async fn make_store_manager() -> Result, Error> { store_manager.add_store( BEP_STORE_NAME, store_factory( - &nativelink_config::stores::StoreConfig::memory( + &nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), &store_manager, diff --git a/nativelink-service/tests/bytestream_server_test.rs b/nativelink-service/tests/bytestream_server_test.rs index 089e1b02a..2edba950a 100644 --- a/nativelink-service/tests/bytestream_server_test.rs +++ b/nativelink-service/tests/bytestream_server_test.rs @@ -60,7 +60,7 @@ async fn make_store_manager() -> Result, Error> { store_manager.add_store( "main_cas", store_factory( - &nativelink_config::stores::StoreConfig::memory( + &nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), &store_manager, diff --git a/nativelink-service/tests/cas_server_test.rs b/nativelink-service/tests/cas_server_test.rs index 65e0219af..0ee3523dd 100644 --- a/nativelink-service/tests/cas_server_test.rs +++ b/nativelink-service/tests/cas_server_test.rs @@ -49,7 +49,7 @@ async fn make_store_manager() -> Result, Error> { store_manager.add_store( "main_cas", store_factory( - &nativelink_config::stores::StoreConfig::memory( + &nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), &store_manager, diff --git a/nativelink-store/src/compression_store.rs b/nativelink-store/src/compression_store.rs index fb3884015..306017af6 100644 --- a/nativelink-store/src/compression_store.rs +++ b/nativelink-store/src/compression_store.rs @@ -223,7 +223,7 @@ impl CompressionStore { inner_store: Store, ) -> Result, Error> { let lz4_config = match compression_config.compression_algorithm { - nativelink_config::stores::CompressionAlgorithm::lz4(mut lz4_config) => { + nativelink_config::stores::CompressionAlgorithm::Lz4(mut lz4_config) => { if lz4_config.block_size == 0 { lz4_config.block_size = DEFAULT_BLOCK_SIZE; } diff --git a/nativelink-store/src/default_store_factory.rs b/nativelink-store/src/default_store_factory.rs index 0781ecf66..ef7399022 100644 --- a/nativelink-store/src/default_store_factory.rs +++ b/nativelink-store/src/default_store_factory.rs @@ -49,47 +49,47 @@ pub fn store_factory<'a>( ) -> Pin> { Box::pin(async move { let store: Arc = match backend { - StoreConfig::memory(config) => MemoryStore::new(config), - StoreConfig::experimental_s3_store(config) => { + StoreConfig::Memory(config) => MemoryStore::new(config), + StoreConfig::ExperimentalS3Store(config) => { S3Store::new(config, SystemTime::now).await? } - StoreConfig::redis_store(config) => RedisStore::new(config)?, - StoreConfig::verify(config) => VerifyStore::new( + StoreConfig::Redis(config) => RedisStore::new(config)?, + StoreConfig::Verify(config) => VerifyStore::new( config, store_factory(&config.backend, store_manager, None).await?, ), - StoreConfig::compression(config) => CompressionStore::new( + StoreConfig::Compression(config) => CompressionStore::new( *config.clone(), store_factory(&config.backend, store_manager, None).await?, )?, - StoreConfig::dedup(config) => DedupStore::new( + StoreConfig::Dedup(config) => DedupStore::new( config, store_factory(&config.index_store, store_manager, None).await?, store_factory(&config.content_store, store_manager, None).await?, ), - StoreConfig::existence_cache(config) => ExistenceCacheStore::new( + StoreConfig::ExistenceCache(config) => ExistenceCacheStore::new( config, store_factory(&config.backend, store_manager, None).await?, ), - StoreConfig::completeness_checking(config) => CompletenessCheckingStore::new( + StoreConfig::CompletenessChecking(config) => CompletenessCheckingStore::new( store_factory(&config.backend, store_manager, None).await?, store_factory(&config.cas_store, store_manager, None).await?, ), - StoreConfig::fast_slow(config) => FastSlowStore::new( + StoreConfig::FastSlow(config) => FastSlowStore::new( config, store_factory(&config.fast, store_manager, None).await?, store_factory(&config.slow, store_manager, None).await?, ), - StoreConfig::filesystem(config) => ::new(config).await?, - StoreConfig::ref_store(config) => RefStore::new(config, Arc::downgrade(store_manager)), - StoreConfig::size_partitioning(config) => SizePartitioningStore::new( + StoreConfig::Filesystem(config) => ::new(config).await?, + StoreConfig::Ref(config) => RefStore::new(config, Arc::downgrade(store_manager)), + StoreConfig::SizePartitioning(config) => SizePartitioningStore::new( config, store_factory(&config.lower_store, store_manager, None).await?, store_factory(&config.upper_store, store_manager, None).await?, ), - StoreConfig::grpc(config) => GrpcStore::new(config).await?, - StoreConfig::noop => NoopStore::new(), - StoreConfig::shard(config) => { + StoreConfig::Grpc(config) => GrpcStore::new(config).await?, + StoreConfig::Noop => NoopStore::new(), + StoreConfig::Shard(config) => { let stores = config .stores .iter() diff --git a/nativelink-store/src/grpc_store.rs b/nativelink-store/src/grpc_store.rs index 6f0e38d7a..f4dcc5201 100644 --- a/nativelink-store/src/grpc_store.rs +++ b/nativelink-store/src/grpc_store.rs @@ -145,7 +145,7 @@ impl GrpcStore { grpc_request: Request, ) -> Result, Error> { error_if!( - matches!(self.store_type, nativelink_config::stores::StoreType::ac), + matches!(self.store_type, nativelink_config::stores::StoreType::Ac), "CAS operation on AC store" ); @@ -170,7 +170,7 @@ impl GrpcStore { grpc_request: Request, ) -> Result, Error> { error_if!( - matches!(self.store_type, nativelink_config::stores::StoreType::ac), + matches!(self.store_type, nativelink_config::stores::StoreType::Ac), "CAS operation on AC store" ); @@ -195,7 +195,7 @@ impl GrpcStore { grpc_request: Request, ) -> Result, Error> { error_if!( - matches!(self.store_type, nativelink_config::stores::StoreType::ac), + matches!(self.store_type, nativelink_config::stores::StoreType::Ac), "CAS operation on AC store" ); @@ -220,7 +220,7 @@ impl GrpcStore { grpc_request: Request, ) -> Result>, Error> { error_if!( - matches!(self.store_type, nativelink_config::stores::StoreType::ac), + matches!(self.store_type, nativelink_config::stores::StoreType::Ac), "CAS operation on AC store" ); @@ -276,7 +276,7 @@ impl GrpcStore { grpc_request: impl IntoRequest, ) -> Result>, Error> { error_if!( - matches!(self.store_type, nativelink_config::stores::StoreType::ac), + matches!(self.store_type, nativelink_config::stores::StoreType::Ac), "CAS operation on AC store" ); @@ -296,7 +296,7 @@ impl GrpcStore { E: Into + 'static, { error_if!( - matches!(self.store_type, nativelink_config::stores::StoreType::ac), + matches!(self.store_type, nativelink_config::stores::StoreType::Ac), "CAS operation on AC store" ); @@ -358,7 +358,7 @@ impl GrpcStore { grpc_request: Request, ) -> Result, Error> { error_if!( - matches!(self.store_type, nativelink_config::stores::StoreType::ac), + matches!(self.store_type, nativelink_config::stores::StoreType::Ac), "CAS operation on AC store" ); @@ -512,7 +512,7 @@ impl StoreDriver for GrpcStore { keys: &[StoreKey<'_>], results: &mut [Option], ) -> Result<(), Error> { - if matches!(self.store_type, nativelink_config::stores::StoreType::ac) { + if matches!(self.store_type, nativelink_config::stores::StoreType::Ac) { keys.iter() .zip(results.iter_mut()) .map(|(key, result)| async move { @@ -579,7 +579,7 @@ impl StoreDriver for GrpcStore { _size_info: UploadSizeInfo, ) -> Result<(), Error> { let digest = key.into_digest(); - if matches!(self.store_type, nativelink_config::stores::StoreType::ac) { + if matches!(self.store_type, nativelink_config::stores::StoreType::Ac) { return self.update_action_result_from_bytes(digest, reader).await; } @@ -659,7 +659,7 @@ impl StoreDriver for GrpcStore { length: Option, ) -> Result<(), Error> { let digest = key.into_digest(); - if matches!(self.store_type, nativelink_config::stores::StoreType::ac) { + if matches!(self.store_type, nativelink_config::stores::StoreType::Ac) { return self .get_action_result_as_part(digest, writer, offset, length) .await; diff --git a/nativelink-store/tests/compression_store_test.rs b/nativelink-store/tests/compression_store_test.rs index 65f75de4e..ae0f86f47 100644 --- a/nativelink-store/tests/compression_store_test.rs +++ b/nativelink-store/tests/compression_store_test.rs @@ -72,10 +72,10 @@ const MEGABYTE_SZ: usize = 1024 * 1024; async fn simple_smoke_test() -> Result<(), Error> { let store = CompressionStore::new( nativelink_config::stores::CompressionStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - compression_algorithm: nativelink_config::stores::CompressionAlgorithm::lz4( + compression_algorithm: nativelink_config::stores::CompressionAlgorithm::Lz4( nativelink_config::stores::Lz4Config { ..Default::default() }, @@ -108,10 +108,10 @@ async fn simple_smoke_test() -> Result<(), Error> { async fn partial_reads_test() -> Result<(), Error> { let store_owned = CompressionStore::new( nativelink_config::stores::CompressionStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - compression_algorithm: nativelink_config::stores::CompressionAlgorithm::lz4( + compression_algorithm: nativelink_config::stores::CompressionAlgorithm::Lz4( nativelink_config::stores::Lz4Config { block_size: 10, ..Default::default() @@ -167,10 +167,10 @@ async fn partial_reads_test() -> Result<(), Error> { async fn rand_5mb_smoke_test() -> Result<(), Error> { let store_owned = CompressionStore::new( nativelink_config::stores::CompressionStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - compression_algorithm: nativelink_config::stores::CompressionAlgorithm::lz4( + compression_algorithm: nativelink_config::stores::CompressionAlgorithm::Lz4( nativelink_config::stores::Lz4Config { ..Default::default() }, @@ -204,10 +204,10 @@ async fn sanity_check_zero_bytes_test() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store_owned = CompressionStore::new( nativelink_config::stores::CompressionStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - compression_algorithm: nativelink_config::stores::CompressionAlgorithm::lz4( + compression_algorithm: nativelink_config::stores::CompressionAlgorithm::Lz4( nativelink_config::stores::Lz4Config { ..Default::default() }, @@ -259,10 +259,10 @@ async fn check_header_test() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store_owned = CompressionStore::new( nativelink_config::stores::CompressionStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - compression_algorithm: nativelink_config::stores::CompressionAlgorithm::lz4( + compression_algorithm: nativelink_config::stores::CompressionAlgorithm::Lz4( nativelink_config::stores::Lz4Config { block_size: BLOCK_SIZE, ..Default::default() @@ -346,10 +346,10 @@ async fn check_footer_test() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store_owned = CompressionStore::new( nativelink_config::stores::CompressionStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - compression_algorithm: nativelink_config::stores::CompressionAlgorithm::lz4( + compression_algorithm: nativelink_config::stores::CompressionAlgorithm::Lz4( nativelink_config::stores::Lz4Config { block_size: BLOCK_SIZE, ..Default::default() @@ -496,10 +496,10 @@ async fn get_part_is_zero_digest() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store_owned = CompressionStore::new( nativelink_config::stores::CompressionStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - compression_algorithm: nativelink_config::stores::CompressionAlgorithm::lz4( + compression_algorithm: nativelink_config::stores::CompressionAlgorithm::Lz4( nativelink_config::stores::Lz4Config { block_size: BLOCK_SIZE, ..Default::default() diff --git a/nativelink-store/tests/dedup_store_test.rs b/nativelink-store/tests/dedup_store_test.rs index cc84c7985..ffbd223b1 100644 --- a/nativelink-store/tests/dedup_store_test.rs +++ b/nativelink-store/tests/dedup_store_test.rs @@ -24,10 +24,10 @@ use rand::{Rng, SeedableRng}; fn make_default_config() -> nativelink_config::stores::DedupStore { nativelink_config::stores::DedupStore { - index_store: nativelink_config::stores::StoreConfig::memory( + index_store: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - content_store: nativelink_config::stores::StoreConfig::memory( + content_store: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), min_size: 8 * 1024, @@ -169,10 +169,10 @@ async fn check_length_not_set_with_chunk_read_beyond_first_chunk_regression_test ) -> Result<(), Error> { let store = DedupStore::new( &nativelink_config::stores::DedupStore { - index_store: nativelink_config::stores::StoreConfig::memory( + index_store: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - content_store: nativelink_config::stores::StoreConfig::memory( + content_store: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), min_size: 5, @@ -221,10 +221,10 @@ async fn check_length_not_set_with_chunk_read_beyond_first_chunk_regression_test async fn check_chunk_boundary_reads_test() -> Result<(), Error> { let store = DedupStore::new( &nativelink_config::stores::DedupStore { - index_store: nativelink_config::stores::StoreConfig::memory( + index_store: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - content_store: nativelink_config::stores::StoreConfig::memory( + content_store: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), min_size: 5, diff --git a/nativelink-store/tests/existence_store_test.rs b/nativelink-store/tests/existence_store_test.rs index e48a0f6a1..13bc31acf 100644 --- a/nativelink-store/tests/existence_store_test.rs +++ b/nativelink-store/tests/existence_store_test.rs @@ -33,7 +33,7 @@ const VALID_HASH1: &str = "0123456789abcdef0000000000000000000100000000000001234 async fn simple_exist_cache_test() -> Result<(), Error> { const VALUE: &str = "123"; let config = ExistenceCacheStoreConfig { - backend: StoreConfig::noop, // Note: Not used. + backend: StoreConfig::Noop, // Note: Not used. eviction_policy: Default::default(), }; let inner_store = Store::new(MemoryStore::new( @@ -73,7 +73,7 @@ async fn simple_exist_cache_test() -> Result<(), Error> { async fn update_flags_existance_cache_test() -> Result<(), Error> { const VALUE: &str = "123"; let config = ExistenceCacheStoreConfig { - backend: StoreConfig::noop, + backend: StoreConfig::Noop, eviction_policy: Default::default(), }; let inner_store = Store::new(MemoryStore::new( @@ -98,7 +98,7 @@ async fn update_flags_existance_cache_test() -> Result<(), Error> { async fn get_part_caches_if_exact_size_set() -> Result<(), Error> { const VALUE: &str = "123"; let config = ExistenceCacheStoreConfig { - backend: StoreConfig::noop, + backend: StoreConfig::Noop, eviction_policy: Default::default(), }; let inner_store = Store::new(MemoryStore::new( @@ -135,7 +135,7 @@ async fn ensure_has_requests_eventually_do_let_evictions_happen() -> Result<(), .err_tip(|| "Failed to update store")?; let store = ExistenceCacheStore::new_with_time( &ExistenceCacheStoreConfig { - backend: StoreConfig::noop, + backend: StoreConfig::Noop, eviction_policy: Some(EvictionPolicy { max_seconds: 10, ..Default::default() diff --git a/nativelink-store/tests/fast_slow_store_test.rs b/nativelink-store/tests/fast_slow_store_test.rs index 47748d0c8..9994a81a0 100644 --- a/nativelink-store/tests/fast_slow_store_test.rs +++ b/nativelink-store/tests/fast_slow_store_test.rs @@ -43,10 +43,10 @@ fn make_stores() -> (Store, Store, Store) { )); let fast_slow_store = Store::new(FastSlowStore::new( &nativelink_config::stores::FastSlowStore { - fast: nativelink_config::stores::StoreConfig::memory( + fast: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - slow: nativelink_config::stores::StoreConfig::memory( + slow: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), }, @@ -333,10 +333,10 @@ async fn drop_on_eof_completes_store_futures() -> Result<(), Error> { let fast_slow_store = FastSlowStore::new( &nativelink_config::stores::FastSlowStore { - fast: nativelink_config::stores::StoreConfig::memory( + fast: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - slow: nativelink_config::stores::StoreConfig::memory( + slow: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), }, @@ -382,10 +382,10 @@ async fn ignore_value_in_fast_store() -> Result<(), Error> { )); let fast_slow_store = Arc::new(FastSlowStore::new( &nativelink_config::stores::FastSlowStore { - fast: nativelink_config::stores::StoreConfig::memory( + fast: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - slow: nativelink_config::stores::StoreConfig::memory( + slow: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), }, @@ -411,10 +411,10 @@ async fn has_checks_fast_store_when_noop() -> Result<(), Error> { )); let slow_store = Store::new(NoopStore::new()); let fast_slow_store_config = nativelink_config::stores::FastSlowStore { - fast: nativelink_config::stores::StoreConfig::memory( + fast: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - slow: nativelink_config::stores::StoreConfig::noop, + slow: nativelink_config::stores::StoreConfig::Noop, }; let fast_slow_store = Arc::new(FastSlowStore::new( &fast_slow_store_config, diff --git a/nativelink-store/tests/filesystem_store_test.rs b/nativelink-store/tests/filesystem_store_test.rs index 352ab4d0e..b4eeb1a1f 100644 --- a/nativelink-store/tests/filesystem_store_test.rs +++ b/nativelink-store/tests/filesystem_store_test.rs @@ -1420,10 +1420,10 @@ async fn update_with_whole_file_slow_path_when_low_file_descriptors() -> Result< let store = FastSlowStore::new( // Note: The config is not needed for this test, so use dummy data. &nativelink_config::stores::FastSlowStore { - fast: nativelink_config::stores::StoreConfig::memory( + fast: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - slow: nativelink_config::stores::StoreConfig::memory( + slow: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), }, diff --git a/nativelink-store/tests/shard_store_test.rs b/nativelink-store/tests/shard_store_test.rs index e75208fb5..af416e0b0 100644 --- a/nativelink-store/tests/shard_store_test.rs +++ b/nativelink-store/tests/shard_store_test.rs @@ -29,7 +29,7 @@ const MEGABYTE_SZ: usize = 1024 * 1024; fn make_stores(weights: &[u32]) -> (Arc, Vec>) { let memory_store_config = nativelink_config::stores::MemoryStore::default(); - let store_config = nativelink_config::stores::StoreConfig::memory(memory_store_config.clone()); + let store_config = nativelink_config::stores::StoreConfig::Memory(memory_store_config.clone()); let stores: Vec<_> = weights .iter() .map(|_| MemoryStore::new(&memory_store_config)) diff --git a/nativelink-store/tests/size_partitioning_store_test.rs b/nativelink-store/tests/size_partitioning_store_test.rs index 216e75ca2..d58b5af64 100644 --- a/nativelink-store/tests/size_partitioning_store_test.rs +++ b/nativelink-store/tests/size_partitioning_store_test.rs @@ -43,10 +43,10 @@ fn setup_stores( let size_part_store = SizePartitioningStore::new( &nativelink_config::stores::SizePartitioningStore { size, - lower_store: nativelink_config::stores::StoreConfig::memory( + lower_store: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), - upper_store: nativelink_config::stores::StoreConfig::memory( + upper_store: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), }, diff --git a/nativelink-store/tests/verify_store_test.rs b/nativelink-store/tests/verify_store_test.rs index d3c24b030..3942d370a 100644 --- a/nativelink-store/tests/verify_store_test.rs +++ b/nativelink-store/tests/verify_store_test.rs @@ -35,7 +35,7 @@ async fn verify_size_false_passes_on_update() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: false, @@ -66,7 +66,7 @@ async fn verify_size_true_fails_on_update() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: true, @@ -106,7 +106,7 @@ async fn verify_size_true_suceeds_on_update() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: true, @@ -132,7 +132,7 @@ async fn verify_size_true_suceeds_on_multi_chunk_stream_update() -> Result<(), E let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: true, @@ -170,7 +170,7 @@ async fn verify_sha256_hash_true_suceeds_on_update() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: false, @@ -198,7 +198,7 @@ async fn verify_sha256_hash_true_fails_on_update() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: false, @@ -233,7 +233,7 @@ async fn verify_blake3_hash_true_suceeds_on_update() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: false, @@ -267,7 +267,7 @@ async fn verify_blake3_hash_true_fails_on_update() -> Result<(), Error> { let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: false, @@ -313,7 +313,7 @@ async fn verify_fails_immediately_on_too_much_data_sent_update() -> Result<(), E let inner_store = MemoryStore::new(&nativelink_config::stores::MemoryStore::default()); let store = VerifyStore::new( &nativelink_config::stores::VerifyStore { - backend: nativelink_config::stores::StoreConfig::memory( + backend: nativelink_config::stores::StoreConfig::Memory( nativelink_config::stores::MemoryStore::default(), ), verify_size: true, diff --git a/nativelink-util/src/digest_hasher.rs b/nativelink-util/src/digest_hasher.rs index 72721bffe..74776d406 100644 --- a/nativelink-util/src/digest_hasher.rs +++ b/nativelink-util/src/digest_hasher.rs @@ -98,8 +98,8 @@ impl DigestHasherFunc { impl From for DigestHasherFunc { fn from(value: ConfigDigestHashFunction) -> Self { match value { - ConfigDigestHashFunction::sha256 => Self::Sha256, - ConfigDigestHashFunction::blake3 => Self::Blake3, + ConfigDigestHashFunction::Sha256 => Self::Sha256, + ConfigDigestHashFunction::Blake3 => Self::Blake3, } } } diff --git a/nativelink-worker/src/running_actions_manager.rs b/nativelink-worker/src/running_actions_manager.rs index 7ed7191f3..44bb23ffc 100644 --- a/nativelink-worker/src/running_actions_manager.rs +++ b/nativelink-worker/src/running_actions_manager.rs @@ -85,15 +85,15 @@ const EXIT_CODE_FOR_SIGNAL: i32 = 9; /// Note: If this value changes the config documentation /// should reflect it. const DEFAULT_HISTORICAL_RESULTS_STRATEGY: UploadCacheResultsStrategy = - UploadCacheResultsStrategy::failures_only; + UploadCacheResultsStrategy::FailuresOnly; /// Valid string reasons for a failure. /// Note: If these change, the documentation should be updated. -#[allow(non_camel_case_types)] #[derive(Debug, Deserialize)] +#[serde(rename_all = "snake_case")] enum SideChannelFailureReason { /// Task should be considered timedout. - timeout, + Timeout, } /// This represents the json data that can be passed from the running process @@ -515,7 +515,7 @@ async fn process_side_channel_file( ) })?; Ok(side_channel_info.failure.map(|failure| match failure { - SideChannelFailureReason::timeout => Error::new( + SideChannelFailureReason::Timeout => Error::new( Code::DeadlineExceeded, format!( "Command '{}' timed out after {} seconds", @@ -803,22 +803,22 @@ impl RunningActionImpl { { for (name, source) in additional_environment { let value = match source { - EnvironmentSource::property(property) => self + EnvironmentSource::Property(property) => self .action_info .platform_properties .get(property) .map_or_else(|| Cow::Borrowed(""), |v| Cow::Borrowed(v.as_str())), - EnvironmentSource::value(value) => Cow::Borrowed(value.as_str()), - EnvironmentSource::timeout_millis => { + EnvironmentSource::Value(value) => Cow::Borrowed(value.as_str()), + EnvironmentSource::TimeoutMillis => { Cow::Owned(requested_timeout.as_millis().to_string()) } - EnvironmentSource::side_channel_file => { + EnvironmentSource::SideChannelFile => { let file_cow = format!("{}/{}", self.action_directory, Uuid::new_v4().simple()); maybe_side_channel_file = Some(Cow::Owned(file_cow.clone().into())); Cow::Owned(file_cow) } - EnvironmentSource::action_directory => { + EnvironmentSource::ActionDirectory => { Cow::Borrowed(self.action_directory.as_str()) } }; @@ -1409,7 +1409,7 @@ impl UploadActionResults { .unwrap_or(DEFAULT_HISTORICAL_RESULTS_STRATEGY); if !matches!( config.upload_ac_results_strategy, - UploadCacheResultsStrategy::never + UploadCacheResultsStrategy::Never ) && ac_store.is_none() { return Err(make_input_err!( @@ -1450,13 +1450,13 @@ impl UploadActionResults { did_fail = true; } match strategy { - UploadCacheResultsStrategy::success_only => !did_fail, - UploadCacheResultsStrategy::never => false, + UploadCacheResultsStrategy::SuccessOnly => !did_fail, + UploadCacheResultsStrategy::Never => false, // Never cache internal errors or timeouts. - UploadCacheResultsStrategy::everything => { + UploadCacheResultsStrategy::Everything => { treat_infra_error_as_failure || action_result.error.is_none() } - UploadCacheResultsStrategy::failures_only => did_fail, + UploadCacheResultsStrategy::FailuresOnly => did_fail, } } diff --git a/nativelink-worker/src/worker_utils.rs b/nativelink-worker/src/worker_utils.rs index 06741037e..5aca735af 100644 --- a/nativelink-worker/src/worker_utils.rs +++ b/nativelink-worker/src/worker_utils.rs @@ -33,7 +33,7 @@ pub async fn make_supported_properties( for (property_name, worker_property) in worker_properties { futures.push(async move { match worker_property { - WorkerProperty::values(values) => { + WorkerProperty::Values(values) => { let mut props = Vec::with_capacity(values.len()); for value in values { props.push(Property { @@ -43,7 +43,7 @@ pub async fn make_supported_properties( } Ok(props) } - WorkerProperty::query_cmd(cmd) => { + WorkerProperty::QueryCmd(cmd) => { let maybe_split_cmd = shlex::split(cmd); let (command, args) = match &maybe_split_cmd { Some(split_cmd) => (&split_cmd[0], &split_cmd[1..]), diff --git a/nativelink-worker/tests/local_worker_test.rs b/nativelink-worker/tests/local_worker_test.rs index cd7c77f34..9d7b50be7 100644 --- a/nativelink-worker/tests/local_worker_test.rs +++ b/nativelink-worker/tests/local_worker_test.rs @@ -78,15 +78,15 @@ async fn platform_properties_smoke_test() -> Result<(), Error> { let mut platform_properties = HashMap::new(); platform_properties.insert( "foo".to_string(), - WorkerProperty::values(vec!["bar1".to_string(), "bar2".to_string()]), + WorkerProperty::Values(vec!["bar1".to_string(), "bar2".to_string()]), ); platform_properties.insert( "baz".to_string(), // Note: new lines will result in two entries for same key. #[cfg(target_family = "unix")] - WorkerProperty::query_cmd("printf 'hello\ngoodbye'".to_string()), + WorkerProperty::QueryCmd("printf 'hello\ngoodbye'".to_string()), #[cfg(target_family = "windows")] - WorkerProperty::query_cmd("cmd /C \"echo hello && echo goodbye\"".to_string()), + WorkerProperty::QueryCmd("cmd /C \"echo hello && echo goodbye\"".to_string()), ); let mut test_context = setup_local_worker(platform_properties).await; let streaming_response = test_context.maybe_streaming_response.take().unwrap(); @@ -407,10 +407,10 @@ async fn new_local_worker_creates_work_directory_test() -> Result<(), Box Result< let ac_store = MemoryStore::new(&slow_config); let cas_store = FastSlowStore::new( &nativelink_config::stores::FastSlowStore { - fast: nativelink_config::stores::StoreConfig::filesystem(fast_config), - slow: nativelink_config::stores::StoreConfig::memory(slow_config), + fast: nativelink_config::stores::StoreConfig::Filesystem(fast_config), + slow: nativelink_config::stores::StoreConfig::Memory(slow_config), }, Store::new(fast_store.clone()), Store::new(slow_store.clone()), @@ -432,7 +432,7 @@ async fn ensure_output_files_full_directories_are_created_no_working_directory_t historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -547,7 +547,7 @@ async fn ensure_output_files_full_directories_are_created_test( historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -664,7 +664,7 @@ async fn blake3_upload_files() -> Result<(), Box> { historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -837,7 +837,7 @@ async fn upload_files_from_above_cwd_test() -> Result<(), Box Result<(), Box> historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -1211,7 +1211,7 @@ async fn cleanup_happens_on_job_failure() -> Result<(), Box Result<(), Box> { historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -1497,7 +1497,7 @@ exit 0 historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -1636,15 +1636,15 @@ exit 0 additional_environment: Some(HashMap::from([ ( "PROPERTY".to_string(), - EnvironmentSource::property("property_name".to_string()), + EnvironmentSource::Property("property_name".to_string()), ), ( "VALUE".to_string(), - EnvironmentSource::value("raw_value".to_string()), + EnvironmentSource::Value("raw_value".to_string()), ), ( "INNER_TIMEOUT".to_string(), - EnvironmentSource::timeout_millis, + EnvironmentSource::TimeoutMillis, ), ])), }, @@ -1653,7 +1653,7 @@ exit 0 historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -1799,7 +1799,7 @@ exit 1 entrypoint: Some(test_wrapper_script.into_string().unwrap()), additional_environment: Some(HashMap::from([( "SIDE_CHANNEL_FILE".to_string(), - EnvironmentSource::side_channel_file, + EnvironmentSource::SideChannelFile, )])), }, cas_store: cas_store.clone(), @@ -1807,7 +1807,7 @@ exit 1 historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -1883,7 +1883,7 @@ async fn caches_results_in_action_cache_store() -> Result<(), Box Result<(), Box Result<(), Box Result<(), Box Result<(), Box Result<(), Box Result<(), Box Result<(), Box Result<(), Box Result<(), Box> { historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -2703,7 +2703,7 @@ async fn kill_all_waits_for_all_tasks_to_finish() -> Result<(), Box Result<(), Box> { historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -2958,7 +2958,7 @@ async fn action_directory_contents_are_cleaned() -> Result<(), Box Result<(), Box> { historical_store: Store::new(cas_store.clone()), upload_action_result_config: &nativelink_config::cas_server::UploadActionResultConfig { upload_ac_results_strategy: - nativelink_config::cas_server::UploadCacheResultsStrategy::never, + nativelink_config::cas_server::UploadCacheResultsStrategy::Never, ..Default::default() }, max_action_timeout: Duration::MAX, @@ -3244,7 +3244,7 @@ async fn running_actions_manager_respects_action_timeout() -> Result<(), Box Option { match from { - HttpCompressionAlgorithm::gzip => Some(CompressionEncoding::Gzip), - HttpCompressionAlgorithm::none => None, + HttpCompressionAlgorithm::Gzip => Some(CompressionEncoding::Gzip), + HttpCompressionAlgorithm::None => None, } } @@ -241,7 +241,7 @@ async fn inner_main( let services = server_cfg.services.ok_or("'services' must be configured")?; // Currently we only support http as our socket type. - let ListenerConfig::http(http_config) = server_cfg.listener; + let ListenerConfig::Http(http_config) = server_cfg.listener; let tonic_services = TonicServer::builder() .add_optional_service( @@ -252,7 +252,7 @@ async fn inner_main( let mut service = v.into_service(); let send_algo = &http_config.compression.send_compression_algorithm; if let Some(encoding) = - into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::none)) + into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::None)) { service = service.send_compressed(encoding); } @@ -278,7 +278,7 @@ async fn inner_main( let mut service = v.into_service(); let send_algo = &http_config.compression.send_compression_algorithm; if let Some(encoding) = - into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::none)) + into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::None)) { service = service.send_compressed(encoding); } @@ -304,7 +304,7 @@ async fn inner_main( let mut service = v.into_service(); let send_algo = &http_config.compression.send_compression_algorithm; if let Some(encoding) = - into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::none)) + into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::None)) { service = service.send_compressed(encoding); } @@ -330,7 +330,7 @@ async fn inner_main( let mut service = v.into_service(); let send_algo = &http_config.compression.send_compression_algorithm; if let Some(encoding) = - into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::none)) + into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::None)) { service = service.send_compressed(encoding); } @@ -370,7 +370,7 @@ async fn inner_main( let mut service = v.into_service(); let send_algo = &http_config.compression.send_compression_algorithm; if let Some(encoding) = - into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::none)) + into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::None)) { service = service.send_compressed(encoding); } @@ -394,7 +394,7 @@ async fn inner_main( let mut service = v.into_service(); let send_algo = &http_config.compression.send_compression_algorithm; if let Some(encoding) = - into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::none)) + into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::None)) { service = service.send_compressed(encoding); } @@ -420,7 +420,7 @@ async fn inner_main( let mut service = v.into_service(); let send_algo = &http_config.compression.send_compression_algorithm; if let Some(encoding) = - into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::none)) + into_encoding(&send_algo.unwrap_or(HttpCompressionAlgorithm::None)) { service = service.send_compressed(encoding); } @@ -875,7 +875,7 @@ async fn inner_main( let mut worker_metrics: HashMap> = HashMap::new(); for (i, worker_cfg) in worker_cfgs.into_iter().enumerate() { let spawn_fut = match worker_cfg { - WorkerConfig::local(local_worker_cfg) => { + WorkerConfig::Local(local_worker_cfg) => { let fast_slow_store = store_manager .get_store(&local_worker_cfg.cas_fast_slow_store) .err_tip(|| { @@ -1001,7 +1001,7 @@ fn main() -> Result<(), Box> { set_default_digest_hasher_func(DigestHasherFunc::from( global_cfg .default_digest_hash_function - .unwrap_or(ConfigDigestHashFunction::sha256), + .unwrap_or(ConfigDigestHashFunction::Sha256), ))?; set_default_digest_size_health_check(global_cfg.default_digest_size_health_check)?; // TODO (#513): prevent deadlocks by assigning max blocking threads number of open files * ten