diff --git a/Cargo.toml b/Cargo.toml index c60283b..b1d7447 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,3 +43,12 @@ assert_cmd = "2.0.15" assert_fs = "1.1.2" predicates = { version = "3.1.2", features = ["regex"] } serial_test = "3.2.0" + +[lints.clippy] +pedantic = { level = "warn", priority = -1 } +cast_precision_loss = "allow" +cast_possible_wrap = "allow" +cast_possible_truncation = "allow" +cast_sign_loss = "allow" +must_use_candidate = "allow" +format_push_string = "warn" diff --git a/src/builtin.rs b/src/builtin.rs index ca62a30..9c75c68 100644 --- a/src/builtin.rs +++ b/src/builtin.rs @@ -12,7 +12,6 @@ pub(crate) trait BuiltIn { impl BuiltIn for launcher::Configuration { /// Construct the built-in launchers - /// fn built_in() -> Self { let mut result = Self { launchers: HashMap::with_capacity(2), diff --git a/src/cli.rs b/src/cli.rs index d758f5d..d8b3f5b 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -50,9 +50,10 @@ pub struct GlobalOptions { #[arg(long, global = true, env = "ROW_CLEAR_PROGRESS", display_order = 2)] pub clear_progress: bool, - /// Check the job submission status on the given cluster. - /// - /// Autodetected by default. + /** Check the job submission status on the given cluster. + + Autodetected by default. + */ #[arg(long, global = true, env = "ROW_CLUSTER", display_order = 2, add=ArgValueCandidates::new(autocomplete::get_cluster_candidates))] cluster: Option, @@ -72,124 +73,125 @@ pub enum ColorMode { #[derive(Subcommand, Debug)] pub enum ShowCommands { - /// Show the current state of the workflow. - /// - /// `row show status` prints a summary of all actions in the workflow. - /// The summary includes the number of directories in each status and an - /// estimate of the remaining cost in either CPU-hours or GPU-hours based - /// on the number of submitted, eligible, and waiting jobs and the - /// resources used by the action. - /// - /// EXAMPLES - /// - /// * Show the status of the entire workspace: - /// - /// row show status - /// - /// * Show the status of all actions with eligible directories - /// - /// row show status --eligible - /// - /// * Show the status of a specific action: - /// - /// row show status --action=action - /// - /// * Show the status of all action names that match a wildcard pattern: - /// - /// row show status --action='project*' - /// - /// * Show the status of specific directories in the workspace: - /// - /// row show status directory1 directory2 - /// + /** Show the current state of the workflow. + + `row show status` prints a summary of all actions in the workflow. + The summary includes the number of directories in each status and an + estimate of the remaining cost in either CPU-hours or GPU-hours based + on the number of submitted, eligible, and waiting jobs and the + resources used by the action. + + EXAMPLES + + * Show the status of the entire workspace: + + row show status + + * Show the status of all actions with eligible directories + + row show status --eligible + + * Show the status of a specific action: + + row show status --action=action + + * Show the status of all action names that match a wildcard pattern: + + row show status --action='project*' + + * Show the status of specific directories in the workspace: + + row show status directory1 directory2 + */ Status(status::Arguments), - /// List directories in the workspace. - /// - /// `row show directories` lists each selected directory. - /// - /// When provided an action, `row show directories` also shows each - /// directory's status and scheduler job ID (when submitted) for the given - /// action. You can also show elements from the directory's value, accessed - /// by JSON pointer. Blank lines separate groups. - /// - /// By default, `row show status` displays directories with any status. Set one or more - /// of `--completed`, `--submitted`, `--eligible`, and `--waiting` to show specific - /// directories that have specific statuses. - /// - /// EXAMPLES - /// - /// * Show all the directories for action `one`: - /// - /// row show directories --action one - /// - /// * Show the directory value element `/value`: - /// - /// row show directories --action action --value=/value - /// - /// * Show specific directories: - /// - /// row show directories --action action directory1 directory2 - /// - /// * Show eligible directories - /// - /// row show directories --action action --eligible - /// - /// * Show the names of all directories - /// - /// row show directories - /// - /// * Show the names of eligible directories - /// - /// row show directories --action action --eligible --short - /// + /** List directories in the workspace. + + `row show directories` lists each selected directory. + + When provided an action, `row show directories` also shows each + directory's status and scheduler job ID (when submitted) for the given + action. You can also show elements from the directory's value, accessed + by JSON pointer. Blank lines separate groups. + + By default, `row show status` displays directories with any status. Set one or more + of `--completed`, `--submitted`, `--eligible`, and `--waiting` to show specific + directories that have specific statuses. + + EXAMPLES + + * Show all the directories for action `one`: + + row show directories --action one + + * Show the directory value element `/value`: + + row show directories --action action --value=/value + + * Show specific directories: + + row show directories --action action directory1 directory2 + + * Show eligible directories + + row show directories --action action --eligible + + * Show the names of all directories + + row show directories + + * Show the names of eligible directories + + row show directories --action action --eligible --short + */ Directories(directories::Arguments), - /// Show the cluster configuration. - /// - /// Print the current cluster configuration in TOML format. - /// - /// EXAMPLES - /// - /// * Show the autodetected cluster: - /// - /// row show cluster - /// - /// * Show the configuration of a specific cluster: - /// - /// row show cluster --cluster=anvil - /// - /// * Show all clusters: - /// - /// row show cluster --all - /// + /** Show the cluster configuration. + + Print the current cluster configuration in TOML format. + + EXAMPLES + + * Show the autodetected cluster: + + row show cluster + + * Show the configuration of a specific cluster: + + row show cluster --cluster=anvil + + * Show all clusters: + + row show cluster --all + */ Cluster(cluster::Arguments), - /// Show launcher configurations. - /// - /// Print the launchers defined for the current cluster (or the cluster - /// given in `--cluster`). The output is TOML formatted. - /// - /// This includes the user-provided launchers in `launchers.toml` and the - /// built-in launchers (or the user-provided overrides). - /// - /// EXAMPLES - /// - ///* Show the launchers for the autodetected cluster: - /// - /// row show launchers - /// - ///* Show the launchers for a specific cluster: - /// - /// row show launchers --cluster=anvil - /// - ///* Show all launchers: - /// - /// row show launchers --all - /// - ///* Show only names of all launchers: - /// - /// row show launchers --all --short + /** Show launcher configurations. + + Print the launchers defined for the current cluster (or the cluster + given in `--cluster`). The output is TOML formatted. + + This includes the user-provided launchers in `launchers.toml` and the + built-in launchers (or the user-provided overrides). + + EXAMPLES + + * Show the launchers for the autodetected cluster: + + row show launchers + + * Show the launchers for a specific cluster: + + row show launchers --cluster=anvil + + * Show all launchers: + + row show launchers --all + + * Show only names of all launchers: + + row show launchers --all --short + */ Launchers(launchers::Arguments), /** Show submitted jobs. @@ -224,119 +226,119 @@ pub enum ShowCommands { #[derive(Subcommand, Debug)] pub enum Commands { - /// Initialize a new project. - /// - /// `row init` creates `workflow.toml` and the workspace directory in the - /// given DIRECTORY. It creates the directory if needed. The default workspace - /// path name is `workspace`. Use the `--workspace` option to change this. - /// - /// Set the `--signac` option to create a project compatible with signac. - /// You must separately initialize the signac project. - /// - /// ERRORS - /// - /// `row init` returns an error when a row project already exists at the - /// given DIRECTORY. - /// - /// EXAMPLES - /// - /// * Create a project in the current directory: - /// - /// row init . - /// - /// * Create a signac compatible project in the directory `project`: - /// - /// row init --signac project - /// - /// * Create a project where the workspace is named `data`: - /// - /// row init --workspace data project - /// + /** Initialize a new project. + + `row init` creates `workflow.toml` and the workspace directory in the + given DIRECTORY. It creates the directory if needed. The default workspace + path name is `workspace`. Use the `--workspace` option to change this. + + Set the `--signac` option to create a project compatible with signac. + You must separately initialize the signac project. + + ERRORS + + `row init` returns an error when a row project already exists at the + given DIRECTORY. + + EXAMPLES + + * Create a project in the current directory: + + row init . + + * Create a signac compatible project in the directory `project`: + + row init --signac project + + * Create a project where the workspace is named `data`: + + row init --workspace data project + */ Init(init::Arguments), /// Show properties of the workspace. #[command(subcommand)] Show(ShowCommands), - /// Scan the workspace for completed actions. - /// - /// `row scan` scans the selected directories for action products and - /// updates the cache of completed directories accordingly. - /// - /// EXAMPLES - /// - /// * Scan all directories for all actions: - /// - /// row scan - /// - /// * Scan a specific action: - /// - /// row scan --action=action - /// - /// * Scan specific directories: - /// - /// row scan directory1 directory2 - /// + /** Scan the workspace for completed actions. + + `row scan` scans the selected directories for action products and + updates the cache of completed directories accordingly. + + EXAMPLES + + * Scan all directories for all actions: + + row scan + + * Scan a specific action: + + row scan --action=action + + * Scan specific directories: + + row scan directory1 directory2 + */ Scan(scan::Arguments), - /// Submit workflow actions to the scheduler. - /// - /// `row submit` submits jobs to the scheduler. First it determines the - /// status of all the given directories for the selected actions. Then it - /// forms groups and submits one job for each group. Pass `--dry-run` to see - /// the script(s) that will be submitted. - /// - /// EXAMPLES - /// - /// * Print the job script(s) that will be submitted: - /// - /// row submit --dry-run - /// - /// * Submit jobs for all eligible directories: - /// - /// row submit - /// - /// * Submit the first eligible job: - /// - /// row submit -n 1 - /// - /// * Submit jobs for a specific action: - /// - /// row submit --action=action - /// - /// * Submit jobs for all actions that match a wildcard pattern: - /// - /// row submit --action='project*' - /// - /// * Submit jobs on specific directories: - /// - /// row submit directory1 directory2 - /// + /** Submit workflow actions to the scheduler. + + `row submit` submits jobs to the scheduler. First it determines the + status of all the given directories for the selected actions. Then it + forms groups and submits one job for each group. Pass `--dry-run` to see + the script(s) that will be submitted. + + EXAMPLES + + * Print the job script(s) that will be submitted: + + row submit --dry-run + + * Submit jobs for all eligible directories: + + row submit + + * Submit the first eligible job: + + row submit -n 1 + + * Submit jobs for a specific action: + + row submit --action=action + + * Submit jobs for all actions that match a wildcard pattern: + + row submit --action='project*' + + * Submit jobs on specific directories: + + row submit directory1 directory2 + */ Submit(submit::Arguments), - /// Remove cache files. - /// - /// `row clean` safely removes cache files generated by row. - /// - /// EXAMPLES - /// - /// * Remove the completed cache: - /// - /// row clean --completed - /// + /** Remove cache files. + + `row clean` safely removes cache files generated by row. + + EXAMPLES + + * Remove the completed cache: + + row clean --completed + */ Clean(clean::Arguments), } -/// Parse directories passed in on the command line. -/// -/// # Returns -/// `Ok(Vec)` listing all the selected directories. -/// - No input selects all project directories. -/// - One "-" input reads directories from stdin. -/// - Otherwise, pass through the given directories from the command line. -/// -/// `Err(row::Error)` when there is an error reading from stdin. -/// +/** Parse directories passed in on the command line. + +# Returns +`Ok(Vec)` listing all the selected directories. +- No input selects all project directories. +- One "-" input reads directories from stdin. +- Otherwise, pass through the given directories from the command line. + +`Err(row::Error)` when there is an error reading from stdin. +*/ pub fn parse_directories( mut query_directories: Vec, get_all_directories: F, diff --git a/src/cli/directories.rs b/src/cli/directories.rs index d9eeb16..9934be7 100644 --- a/src/cli/directories.rs +++ b/src/cli/directories.rs @@ -69,10 +69,10 @@ pub struct Arguments { short: bool, } -/// Show directories that match an action. -/// -/// Print a human-readable list of directories, their status, job ID, and value(s). -/// +/** Show directories that match an action. + +Print a human-readable list of directories, their status, job ID, and value(s). +*/ pub fn directories( options: &GlobalOptions, args: Arguments, diff --git a/src/cli/launchers.rs b/src/cli/launchers.rs index b3ace21..0aa4216 100644 --- a/src/cli/launchers.rs +++ b/src/cli/launchers.rs @@ -21,10 +21,10 @@ pub struct Arguments { short: bool, } -/// Show the launchers. -/// -/// Print the launchers to stdout in toml format. -/// +/** Show the launchers. + +Print the launchers to stdout in toml format. +*/ pub fn launchers( options: &GlobalOptions, args: &Arguments, diff --git a/src/cli/scan.rs b/src/cli/scan.rs index a0898fe..a6298da 100644 --- a/src/cli/scan.rs +++ b/src/cli/scan.rs @@ -28,10 +28,10 @@ pub struct Arguments { directories: Vec, } -/// Scan directories and determine whether a given action (or all actions) have completed. -/// -/// Write the resulting list of completed directories to a completion pack file. -/// +/** Scan directories and determine whether a given action (or all actions) have completed. + +Write the resulting list of completed directories to a completion pack file. +*/ pub fn scan( options: &GlobalOptions, args: Arguments, diff --git a/src/cli/status.rs b/src/cli/status.rs index c406c33..15c1de7 100644 --- a/src/cli/status.rs +++ b/src/cli/status.rs @@ -97,10 +97,10 @@ fn make_row(action_name: &str, status: &Status, cost: &ResourceCost) -> Vec( options: &GlobalOptions, args: Arguments, diff --git a/src/cli/submit.rs b/src/cli/submit.rs index 256395a..668991d 100644 --- a/src/cli/submit.rs +++ b/src/cli/submit.rs @@ -10,6 +10,7 @@ use signal_hook::consts::{SIGINT, SIGTERM}; use signal_hook::flag; use std::collections::HashSet; use std::error::Error; +use std::fmt::Write as _; use std::io::prelude::*; use std::io::{self, IsTerminal}; use std::path::PathBuf; @@ -49,7 +50,6 @@ pub struct Arguments { } /// Submit workflow actions to the scheduler. -/// #[allow(clippy::too_many_lines)] pub fn submit( options: &GlobalOptions, @@ -265,7 +265,11 @@ pub fn submit( .italic() .to_string(); } - message += &format!(" ({:#}).", style(HumanDuration(instant.elapsed())).dim()); + let _ = write!( + message, + " ({:#}).", + style(HumanDuration(instant.elapsed())).dim() + ); println!("{message}"); let result = scheduler.submit( diff --git a/src/cluster.rs b/src/cluster.rs index 4c27be0..b77ce00 100644 --- a/src/cluster.rs +++ b/src/cluster.rs @@ -14,11 +14,11 @@ use crate::builtin::BuiltIn; use crate::workflow::Resources; use crate::Error; -/// Cluster configuration -/// -/// `Configuration` stores the cluster configuration for each defined -/// cluster. -/// +/** Cluster configuration + +`Configuration` stores the cluster configuration for each defined +cluster. +*/ #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct Configuration { @@ -27,17 +27,14 @@ pub struct Configuration { pub cluster: Vec, } -/// Cluster -/// -/// `Cluster` stores everything needed to define a single cluster. It is read -/// from the `clusters.toml` file. -/// +/** Cluster +`Cluster` stores everything needed to define a single cluster. It is readom the `clusters.toml` file. +*/ #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] #[serde(deny_unknown_fields)] pub struct Cluster { /// The cluster's name. pub name: String, - /// The method used to automatically identify this cluster. pub identify: IdentificationMethod, @@ -121,16 +118,16 @@ pub struct Partition { } impl Configuration { - /// Identify the cluster. - /// - /// Identifying the current cluster consumes the `Configuration`. - /// - /// # Errors - /// * `row::Error::ClusterNameNotFound` when a cluster by the given name - /// is not present in the configuration (when `name = Some(_)`). - /// * `row::Error::ClusterNotFound` when the automatic identification - /// fails to find a cluster in the configuration. - /// + /** Identify the cluster. + + Identifying the current cluster consumes the `Configuration`. + + # Errors + * `row::Error::ClusterNameNotFound` when a cluster by the given name + is not present in the configuration (when `name = Some(_)`). + * `row::Error::ClusterNotFound` when the automatic identification + fails to find a cluster in the configuration. + */ pub fn identify(self, name: Option<&str>) -> Result { let cluster = if let Some(name) = name { self.cluster @@ -148,15 +145,15 @@ impl Configuration { Ok(cluster) } - /// Open the cluster configuration - /// - /// Open `$HOME/.config/row/clusters.toml` if it exists and merge it with - /// the built-in configuration. - /// - /// # Errors - /// Returns `Err(row::Error)` when the file cannot be read or if there is - /// as parse error. - /// + /** Open the cluster configuration + + Open `$HOME/.config/row/clusters.toml` if it exists and merge it with + the built-in configuration. + + # Errors + Returns `Err(row::Error)` when the file cannot be read or if there is + as parse error. + */ pub fn open() -> Result { let home = match env::var("ROW_HOME") { Ok(row_home) => PathBuf::from(row_home), @@ -195,21 +192,21 @@ impl Configuration { Ok(clusters) } - /// Parse a `Configuration` from a TOML string - /// - /// Does *NOT* merge with the built-in configuration. - /// + /** Parse a `Configuration` from a TOML string + + Does *NOT* merge with the built-in configuration. + */ pub(crate) fn parse_str(path: &Path, toml: &str) -> Result { let cluster: Configuration = toml::from_str(toml).map_err(|e| Error::TOMLParse(path.join("clusters.toml"), e))?; Ok(cluster) } - /// Merge keys from another configuration into this one. - /// - /// Merging adds new keys from `b` into self. It also overrides any keys in - /// both with the value in `b`. - /// + /** Merge keys from another configuration into this one. + + Merging adds new keys from `b` into self. It also overrides any keys in + both with the value in `b`. + */ fn merge(&mut self, b: &Self) { let mut new_cluster = b.cluster.clone(); new_cluster.extend(self.cluster.clone()); @@ -233,11 +230,11 @@ impl Cluster { } } - /// Find the partition to use for the given job. - /// - /// # Errors - /// Returns `Err` when the partition is not found. - /// + /** Find the partition to use for the given job. + + # Errors + Returns `Err` when the partition is not found. + */ pub fn find_partition( &self, partition_name: Option<&str>, diff --git a/src/expr.rs b/src/expr.rs index a6fbb70..7b4f705 100644 --- a/src/expr.rs +++ b/src/expr.rs @@ -7,11 +7,11 @@ use std::iter; use crate::workflow::Comparison; -/// Compares two Values lexicographically. -/// -/// # Returns -/// `Some(Ordering)` when an ordering can be determined, otherwise `None`. -/// +/** Compares two Values lexicographically. + +# Returns +`Some(Ordering)` when an ordering can be determined, otherwise `None`. +*/ pub(crate) fn partial_cmp_json_values(a: &Value, b: &Value) -> Option { match (a, b) { (Value::String(a_str), Value::String(b_str)) => Some(a_str.cmp(b_str)), @@ -49,11 +49,11 @@ pub(crate) fn partial_cmp_json_values(a: &Value, b: &Value) -> Option } } -/// Compares two Values lexicographically with the given comparison operator. -/// -/// # Returns -/// `Some(Ordering)` when an ordering can be determined, otherwise `None`. -/// +/** Compares two Values lexicographically with the given comparison operator. + +# Returns +`Some(Ordering)` when an ordering can be determined, otherwise `None`. +*/ pub(crate) fn evaluate_json_comparison( comparison: &Comparison, a: &Value, diff --git a/src/launcher.rs b/src/launcher.rs index ae37aa3..a212e5a 100644 --- a/src/launcher.rs +++ b/src/launcher.rs @@ -15,21 +15,21 @@ use crate::builtin::BuiltIn; use crate::workflow::Resources; use crate::Error; -/// Launcher configuration -/// -/// `Configuration` stores the launcher configuration for each defined -/// launcher/cluster. -/// +/** Launcher configuration + +`Configuration` stores the launcher configuration for each defined +launcher/cluster. +*/ #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Configuration { /// The launcher configurations. pub(crate) launchers: HashMap>, } -/// Launcher -/// -/// `Launcher` is one element of the launcher configuration. -/// +/** Launcher + +`Launcher` is one element of the launcher configuration. +*/ #[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)] #[serde(deny_unknown_fields)] pub struct Launcher { @@ -90,15 +90,15 @@ impl Launcher { } impl Configuration { - /// Open the launcher configuration - /// - /// Open `$HOME/.config/row/launchers.toml` if it exists and merge it with - /// the built-in configuration. - /// - /// # Errors - /// Returns `Err(row::Error)` when the file cannot be read or if there is - /// as parse error. - /// + /** Open the launcher configuration + + Open `$HOME/.config/row/launchers.toml` if it exists and merge it with + the built-in configuration. + + # Errors + Returns `Err(row::Error)` when the file cannot be read or if there is + as parse error. + */ pub fn open() -> Result { let home = match env::var("ROW_HOME") { Ok(row_home) => PathBuf::from(row_home), @@ -138,10 +138,10 @@ impl Configuration { Ok(launchers) } - /// Parse a `Configuration` from a TOML string - /// - /// Does *NOT* merge with the built-in configuration. - /// + /** Parse a `Configuration` from a TOML string + + Does *NOT* merge with the built-in configuration. + */ pub(crate) fn parse_str(path: &Path, toml: &str) -> Result { Ok(Configuration { launchers: toml::from_str(toml) @@ -149,11 +149,11 @@ impl Configuration { }) } - /// Merge keys from another configuration into this one. - /// - /// Merging adds new keys from `b` into self. It also overrides any keys in - /// both with the value in `b`. - /// + /** Merge keys from another configuration into this one. + + Merging adds new keys from `b` into self. It also overrides any keys in + both with the value in `b`. + */ fn merge(&mut self, b: Self) { for (launcher_name, launcher_clusters) in b.launchers { self.launchers @@ -163,10 +163,11 @@ impl Configuration { } } - /// Validate that the configuration is correct. - /// - /// Valid launcher configurations have a `default` cluster for all - /// launchers. + /** Validate that the configuration is correct. + + Valid launcher configurations have a `default` cluster for all + launchers. + */ fn validate(&self) -> Result<(), Error> { for (launcher_name, launcher_clusters) in &self.launchers { if !launcher_clusters.contains_key("default") { @@ -177,11 +178,11 @@ impl Configuration { Ok(()) } - /// Get all launchers for a specific cluster. - /// - /// # Panics - /// When a given launcher has no default. - /// + /** Get all launchers for a specific cluster. + + # Panics + When a given launcher has no default. + */ pub fn by_cluster(&self, cluster_name: &str) -> HashMap { let mut result = HashMap::with_capacity(self.launchers.len()); diff --git a/src/lib.rs b/src/lib.rs index eabb658..778cd11 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,14 +1,6 @@ // Copyright (c) 2024 The Regents of the University of Michigan. // Part of row, released under the BSD 3-Clause License. -#![warn(clippy::pedantic)] -#![allow(clippy::cast_precision_loss)] -#![allow(clippy::cast_possible_wrap)] -#![allow(clippy::cast_possible_truncation)] -#![allow(clippy::cast_sign_loss)] -#![allow(clippy::must_use_candidate)] -#![warn(clippy::format_push_string)] - pub(crate) mod builtin; pub mod cluster; mod expr; @@ -35,11 +27,11 @@ pub const DIRECTORY_CACHE_FILE_NAME: &str = "directories.json"; pub const COMPLETED_CACHE_FILE_NAME: &str = "completed.postcard"; pub const SUBMITTED_CACHE_FILE_NAME: &str = "submitted.postcard"; -/// Hold a `MultiProgress` and all of its progress bars. -/// -/// This is necessary because a dropped `ProgressBar` will be automatically -/// removed from [MultiProgress](https://github.com/console-rs/indicatif/issues/614) -/// +/** Hold a `MultiProgress` and all of its progress bars. + +This is necessary because a dropped `ProgressBar` will be automatically +removed from [MultiProgress](https://github.com/console-rs/indicatif/issues/614) +*/ pub struct MultiProgressContainer { progress_bars: Vec, multi_progress: MultiProgress, @@ -260,10 +252,11 @@ impl MultiProgressContainer { self.multi_progress.add(progress_bar) } - /// Clear all progress bars - /// - /// # Errors - /// Forwards the error from `indicatif::MultiProgress::clear`. + /** Clear all progress bars + + # Errors + Forwards the error from `indicatif::MultiProgress::clear`. + */ pub fn clear(&mut self) -> Result<(), std::io::Error> { self.progress_bars.clear(); self.multi_progress.clear() diff --git a/src/progress_styles.rs b/src/progress_styles.rs index b490534..8edacca 100644 --- a/src/progress_styles.rs +++ b/src/progress_styles.rs @@ -13,11 +13,11 @@ fn elapsed(state: &ProgressState, w: &mut dyn Write) { let _ = write!(w, "{:#}", HumanDuration(state.elapsed())); } -/// Create a named spinner. -/// -/// # Panics -/// When the progress style is invalid. -/// +/** Create a named spinner. + +# Panics +When the progress style is invalid. +*/ pub fn uncounted_spinner() -> ProgressStyle { ProgressStyle::with_template("{spinner:.green.bold} {msg:.bold}... ({elapsed:.dim})") .expect("Valid template") @@ -25,11 +25,11 @@ pub fn uncounted_spinner() -> ProgressStyle { .tick_strings(&["◐", "◓", "◑", "◒", "⊙"]) } -/// Create a spinner that displays the current counted position. -/// -/// # Panics -/// When the progress style is invalid. -/// +/** Create a spinner that displays the current counted position. + +# Panics +When the progress style is invalid. +*/ pub fn counted_spinner() -> ProgressStyle { ProgressStyle::with_template("{spinner:.green.bold} {msg:.bold}: {human_pos} ({elapsed:.dim})") .expect("Valid template") @@ -37,11 +37,11 @@ pub fn counted_spinner() -> ProgressStyle { .tick_strings(&["◐", "◓", "◑", "◒", "⊙"]) } -/// Create a progress bar that displays the current counted position. -/// -/// # Panics -/// When the progress style is invalid. -/// +/** Create a progress bar that displays the current counted position. + +# Panics +When the progress style is invalid. +*/ pub fn counted_bar() -> ProgressStyle { ProgressStyle::with_template( "|{bar:32.green}| {msg:.bold}: {human_pos}/{human_len} ({elapsed:.dim})", diff --git a/src/scheduler.rs b/src/scheduler.rs index 6695c2b..ca516aa 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -73,28 +73,28 @@ pub trait Scheduler { should_terminate: Arc, ) -> Result, Error>; - /// Query the scheduler and determine which jobs remain active. - /// - /// # Arguments - /// * `jobs`: Identifiers to query - /// - /// `active_jobs` returns a `ActiveJobs` object, which provides the final - /// result via a method. This allows implementations to be asynchronous so - /// that long-running subprocesses can complete in the background while the - /// collar performs other work. - /// - /// # Errors - /// Returns `Err` when the job queue query cannot be executed. - /// + /** Query the scheduler and determine which jobs remain active. + + # Arguments + * `jobs`: Identifiers to query + + `active_jobs` returns a `ActiveJobs` object, which provides the final + result via a method. This allows implementations to be asynchronous so + that long-running subprocesses can complete in the background while the + collar performs other work. + + # Errors + Returns `Err` when the job queue query cannot be executed. + */ fn active_jobs(&self, jobs: &[u32]) -> Result, Error>; } /// Deferred result containing jobs that are still active on the cluster. pub trait ActiveJobs { - /// Complete the operation and return the currently active jobs. - /// - /// # Errors - /// Returns `Err` when the job queue query cannot be executed. - /// + /** Complete the operation and return the currently active jobs. + + # Errors + Returns `Err` when the job queue query cannot be executed. + */ fn get(self: Box) -> Result, Error>; } diff --git a/src/scheduler/bash.rs b/src/scheduler/bash.rs index 7bae31b..de09d95 100644 --- a/src/scheduler/bash.rs +++ b/src/scheduler/bash.rs @@ -410,10 +410,10 @@ impl Scheduler for Bash { Ok(None) } - /// Bash reports no active jobs. - /// - /// All jobs are executed immediately on submission. - /// + /** Bash reports no active jobs. + + All jobs are executed immediately on submission. + */ fn active_jobs(&self, _: &[u32]) -> Result, Error> { Ok(Box::new(ActiveBashJobs {})) } diff --git a/src/scheduler/slurm.rs b/src/scheduler/slurm.rs index 446c64a..b7166a6 100644 --- a/src/scheduler/slurm.rs +++ b/src/scheduler/slurm.rs @@ -33,9 +33,10 @@ impl Slurm { } } -/// Track the running squeue process -/// -/// Or `None` when no process was launched. +/** Track the running squeue process + +Or `None` when no process was launched. +*/ pub struct ActiveSlurmJobs { squeue: Option, max_jobs: usize, @@ -219,11 +220,11 @@ impl Scheduler for Slurm { } } - /// Use `squeue` to determine the jobs that are still present in the queue. - /// - /// Launch `squeue --jobs job0,job1,job2 -o "%A" --noheader` to determine which of - /// these jobs are still in the queue. - /// + /** Use `squeue` to determine the jobs that are still present in the queue. + + Launch `squeue --jobs job0,job1,job2 -o "%A" --noheader` to determine which of + these jobs are still in the queue. + */ fn active_jobs(&self, jobs: &[u32]) -> Result, Error> { if jobs.is_empty() { return Ok(Box::new(ActiveSlurmJobs { diff --git a/src/state.rs b/src/state.rs index ef33cae..67f10f3 100644 --- a/src/state.rs +++ b/src/state.rs @@ -21,10 +21,10 @@ use crate::{ type SubmittedJobs = HashMap>; -/// Directory cache -/// -/// Cache the directory values and store the last modified time. -/// +/** Directory cache + +Cache the directory values and store the last modified time. +*/ #[derive(Debug, Default, Deserialize, PartialEq, Serialize)] pub struct DirectoryCache { /// File system modification time of the workspace. @@ -34,17 +34,17 @@ pub struct DirectoryCache { values: HashMap, } -/// The state of the project. -/// -/// `State` collects the following information on the workspace and manages cache files -/// on the filesystem for these (separately): -/// * JSON values for each directory -/// * Completed directories for each action. -/// * Scheduled jobs by action, directory, (and cluster?). -/// -/// `State` implements methods that synchronize a state with the workspace on disk and -/// to interface with the scheduler's queue. -/// +/** The state of the project. + +`State` collects the following information on the workspace and manages cache files +on the filesystem for these (separately): +* JSON values for each directory +* Completed directories for each action. +* Scheduled jobs by action, directory, (and cluster?). + +`State` implements methods that synchronize a state with the workspace on disk and +to interface with the scheduler's queue. +*/ #[derive(Debug, Default, Deserialize, PartialEq, Serialize)] pub struct State { /// The directory cache. @@ -126,10 +126,10 @@ impl State { self.submitted_modified = true; } - /// Remove inactive jobs on the given cluster. - /// - /// Note: The argument lists the *active* jobs to keep! - /// + /** Remove inactive jobs on the given cluster. + + Note: The argument lists the *active* jobs to keep! + */ pub fn remove_inactive_submitted(&mut self, cluster_name: &str, active_job_ids: &HashSet) { trace!("Removing inactive jobs from the submitted cache."); self.submitted_modified = true; @@ -162,11 +162,11 @@ impl State { result } - /// Read the state cache from disk. - /// - /// # Errors - /// Returns `Err` when the cache files cannot be read or parsed. - /// + /** Read the state cache from disk. + + # Errors + Returns `Err` when the cache files cannot be read or parsed. + */ pub fn from_cache(workflow: &Workflow) -> Result { let mut state = State { directory_cache: Self::read_directory_cache(workflow)?, @@ -275,11 +275,11 @@ impl State { } } - /// Save the state cache to the filesystem. - /// - /// # Errors - /// Returns `Err` when a cache file cannot be saved. - /// + /** Save the state cache to the filesystem. + + # Errors + Returns `Err` when a cache file cannot be saved. + */ pub fn save_cache( &mut self, workflow: &Workflow, @@ -395,20 +395,20 @@ impl State { Ok(()) } - /// Synchronize a workspace on disk with a `State`. - /// - /// * Remove directories from the state that are no longer present on the filesystem. - /// * Make no changes to directories in the state that remain. - /// * When new directories are present on the filesystem, add them to the state - - /// which includes reading the value file and checking which actions are completed. - /// * Remove actions that are no longer present from the completed and submitted caches. - /// * Remove directories that are no longer present from the completed and submitted caches. - /// - /// # Errors - /// - /// * Returns `Error` when there is an I/O error reading the - /// workspace directory - /// + /** Synchronize a workspace on disk with a `State`. + + * Remove directories from the state that are no longer present on the filesystem. + * Make no changes to directories in the state that remain. + * When new directories are present on the filesystem, add them to the state - + which includes reading the value file and checking which actions are completed. + * Remove actions that are no longer present from the completed and submitted caches. + * Remove directories that are no longer present from the completed and submitted caches. + + # Errors + + * Returns `Error` when there is an I/O error reading the + workspace directory + */ pub(crate) fn synchronize_workspace( &mut self, workflow: &Workflow, diff --git a/src/ui.rs b/src/ui.rs index a849ded..5941982 100644 --- a/src/ui.rs +++ b/src/ui.rs @@ -10,11 +10,11 @@ use std::io::{self, Write}; /// The default writer buffer size. const DEFAULT_BUFFER_SIZE: usize = 1024; -/// Buffered writer that interoperates with a `MultiProgress`. -/// -/// Use this writer to buffer writes to stdout/stderr. When flushed, the -/// writer will suspend the `MultiProgress` and write the output. -/// +/** Buffered writer that interoperates with a `MultiProgress`. + +Use this writer to buffer writes to stdout/stderr. When flushed, the +writer will suspend the `MultiProgress` and write the output. +*/ pub struct MultiProgressWriter { inner: T, multi_progress: MultiProgress, @@ -23,12 +23,12 @@ pub struct MultiProgressWriter { } impl MultiProgressWriter { - /// Create a new writer. - /// - /// # Arguments - /// * `inner`: Writer to forward output to. - /// * `multi_progress`: The `MultiProgress` to suspend when writing. - /// + /** Create a new writer. + + # Arguments + * `inner`: Writer to forward output to. + * `multi_progress`: The `MultiProgress` to suspend when writing. + */ pub fn new(inner: T, multi_progress: MultiProgress) -> Self { Self { inner, diff --git a/src/workflow.rs b/src/workflow.rs index 23bcc86..229c972 100644 --- a/src/workflow.rs +++ b/src/workflow.rs @@ -18,10 +18,10 @@ use std::str::FromStr; use crate::Error; -/// The workflow definition. -/// -/// `Workflow` is the in-memory realization of the user provided `workflow.toml`. -/// +/** The workflow definition. + +`Workflow` is the in-memory realization of the user provided `workflow.toml`. +*/ #[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct Workflow { @@ -42,10 +42,10 @@ pub struct Workflow { pub action: Vec, } -/// The workspace definition. -/// -/// `Workspace` stores the user-provided options defining the workspace. -/// +/** The workspace definition. + +`Workspace` stores the user-provided options defining the workspace. +*/ #[derive(Clone, Debug, Deserialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct Workspace { @@ -57,11 +57,11 @@ pub struct Workspace { pub value_file: Option, } -/// The submission options -/// -/// `SubmitOPtions` stores the user-provided cluster specific submission options for a workflow or -/// action. -/// +/** The submission options + +`SubmitOPtions` stores the user-provided cluster specific submission options for a workflow or +action. +*/ #[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct SubmitOptions { @@ -79,10 +79,10 @@ pub struct SubmitOptions { pub partition: Option, } -/// The action definition. -/// -/// `Action` stores the user-provided options for a given action. -/// +/** The action definition. + +`Action` stores the user-provided options for a given action. +*/ #[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct Action { @@ -120,10 +120,10 @@ pub struct Action { pub from: Option, } -/// Default tables -/// -/// Store default options for other tables in the file. -/// +/** Default tables + +Store default options for other tables in the file. +*/ #[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq)] #[serde(deny_unknown_fields)] pub struct DefaultTables { @@ -299,11 +299,11 @@ impl Add for ResourceCost { } impl Resources { - /// Determine the total number of processes this action will use. - /// - /// # Arguments - /// `n_directories`: Number of directories in the submission. - /// + /** Determine the total number of processes this action will use. + + # Arguments + `n_directories`: Number of directories in the submission. + */ pub fn total_processes(&self, n_directories: usize) -> usize { match self.processes() { Processes::PerDirectory(p) => p * n_directories, @@ -311,32 +311,32 @@ impl Resources { } } - /// Determine the total number of CPUs this action will use. - /// - /// # Arguments - /// `n_directories`: Number of directories in the submission. - /// + /** Determine the total number of CPUs this action will use. + + # Arguments + `n_directories`: Number of directories in the submission. + */ pub fn total_cpus(&self, n_directories: usize) -> usize { self.total_processes(n_directories) * self.threads_per_process.unwrap_or(1) } - /// Determine the total number of GPUs this action will use. - /// - /// # Arguments - /// `n_directories`: Number of directories in the submission. - /// + /** Determine the total number of GPUs this action will use. + + # Arguments + `n_directories`: Number of directories in the submission. + */ pub fn total_gpus(&self, n_directories: usize) -> usize { self.total_processes(n_directories) * self.gpus_per_process.unwrap_or(0) } - /// Determine the total walltime this action will use. - /// - /// # Arguments - /// `n_directories`: Number of directories in the submission. - /// - /// # Panics - /// When the resulting walltime cannot be represented. - /// + /** Determine the total walltime this action will use. + + # Arguments + `n_directories`: Number of directories in the submission. + + # Panics + When the resulting walltime cannot be represented. + */ pub fn total_walltime(&self, n_directories: usize) -> Duration { match self.walltime() { Walltime::PerDirectory(ref w) => Duration::new( @@ -350,11 +350,11 @@ impl Resources { } } - /// Compute the total resource usage of an action execution. - /// - /// The cost is computed assuming that every job is executed to the full - /// requested walltime. - /// + /** Compute the total resource usage of an action execution. + + The cost is computed assuming that every job is executed to the full + requested walltime. + */ pub fn cost(&self, n_directories: usize) -> ResourceCost { let process_hours = ((self.total_processes(n_directories) as i64) * self.total_walltime(n_directories).signed_total_seconds()) @@ -539,15 +539,15 @@ impl Group { } impl Workflow { - /// Open the workflow - /// - /// Find `workflow.toml` in the current working directory or any parent directory. Open the - /// file, parse it, and return a `Workflow`. - /// - /// # Errors - /// Returns `Err(row::Error)` when the file is not found, cannot be read, or there is a parse - /// error. - /// + /** Open the workflow + + Find `workflow.toml` in the current working directory or any parent directory. Open the + file, parse it, and return a `Workflow`. + + # Errors + Returns `Err(row::Error)` when the file is not found, cannot be read, or there is a parse + error. + */ pub fn open() -> Result { let (path, file) = find_and_open_workflow()?; let mut buffer = BufReader::new(file); @@ -560,14 +560,14 @@ impl Workflow { Self::open_str(&path, &workflow_string) } - /// Build a workflow from a given path and toml string. - /// - /// Parse the contents of the given string as if it were `workflow.toml` at the given `path`. - /// - /// # Errors - /// Returns `Err(row::Error)` when the file is not found, cannot be read, or there is a parse - /// error. - /// + /** Build a workflow from a given path and toml string. + + Parse the contents of the given string as if it were `workflow.toml` at the given `path`. + + # Errors + Returns `Err(row::Error)` when the file is not found, cannot be read, or there is a parse + error. + */ pub(crate) fn open_str(path: &Path, toml: &str) -> Result { let mut workflow: Workflow = toml::from_str(toml).map_err(|e| Error::TOMLParse(path.join("workflow.toml"), e))?; @@ -584,12 +584,12 @@ impl Workflow { } } - /// Validate a `Workflow` and populate defaults. - /// - /// Resolve each action to a fully defined struct with defaults populated - /// from: The current action, the action named by "from", and the default - /// action (in that order). - /// + /** Validate a `Workflow` and populate defaults. + + Resolve each action to a fully defined struct with defaults populated + from: The current action, the action named by "from", and the default + action (in that order). + */ fn validate_and_set_defaults(mut self) -> Result { let mut action_names = HashSet::with_capacity(self.action.len()); @@ -684,16 +684,16 @@ where Ok(duration) } -/// Finds and opens the file `workflow.toml`. -/// -/// Looks in the current working directory and all parent directories. -/// -/// # Errors -/// Returns `Err(row::Error)` when the file is not found or cannot be opened. -/// -/// # Returns -/// `Ok(PathBuf, File)` including the path where the file was found and the open file handle. -/// +/** Finds and opens the file `workflow.toml`. + +Looks in the current working directory and all parent directories. + +# Errors +Returns `Err(row::Error)` when the file is not found or cannot be opened. + +# Returns +`Ok(PathBuf, File)` including the path where the file was found and the open file handle. +*/ fn find_and_open_workflow() -> Result<(PathBuf, File), Error> { let mut path = env::current_dir()?; diff --git a/src/workspace.rs b/src/workspace.rs index c1068e9..ef5ca24 100644 --- a/src/workspace.rs +++ b/src/workspace.rs @@ -16,11 +16,11 @@ use std::time::Duration; use crate::workflow::Workflow; use crate::{progress_styles, Error, MultiProgressContainer, MIN_PROGRESS_BAR_SIZE}; -/// List all directories in the workspace as found on the filesystem. -/// -/// # Errors -/// Returns `Err` when the workspace directory cannot be accessed. -/// +/** List all directories in the workspace as found on the filesystem. + +# Errors +Returns `Err` when the workspace directory cannot be accessed. +*/ pub fn list_directories( workflow: &Workflow, multi_progress: &mut MultiProgressContainer, @@ -59,10 +59,10 @@ pub fn list_directories( Ok(directories) } -/// Directories that have completed actions. -/// -/// Call `get()` to wait for all pending threads to complete and return the result. -/// +/** Directories that have completed actions. + +Call `get()` to wait for all pending threads to complete and return the result. +*/ pub struct CompletedDirectories { /// Threads scanning the directories. threads: Vec>>, @@ -74,20 +74,20 @@ pub struct CompletedDirectories { progress: ProgressBar, } -/// Find directories that have completed actions. -/// -/// `find_completed_directories` spawns threads to scan the workspace and then -/// returns immediately. Calling `get` on the result will wait for the threads -/// to complete and then provides the list of completions. -/// -/// # Arguments -/// * `workflow` - The `Workflow` to scan for completed directories. -/// * `directories` - The directories to scan. Must be present in the workspace. -/// * `io_threads` - Number of threads to use while scanning directories. -/// -/// # Panics -/// When unable to spawn threads. -/// +/** Find directories that have completed actions. + +`find_completed_directories` spawns threads to scan the workspace and then +returns immediately. Calling `get` on the result will wait for the threads +to complete and then provides the list of completions. + +# Arguments +* `workflow` - The `Workflow` to scan for completed directories. +* `directories` - The directories to scan. Must be present in the workspace. +* `io_threads` - Number of threads to use while scanning directories. + +# Panics +When unable to spawn threads. +*/ pub fn find_completed_directories( workflow: &Workflow, directories: Vec, @@ -185,14 +185,14 @@ pub fn find_completed_directories( } impl CompletedDirectories { - /// Get the directories that have been completed for each action. - /// - /// # Errors - /// Returns `Err` when the workspace directories cannot be accessed. - /// - /// # Panics - /// This method should not panic. - /// + /** Get the directories that have been completed for each action. + + # Errors + Returns `Err` when the workspace directories cannot be accessed. + + # Panics + This method should not panic. + */ pub fn get(self) -> Result>, Error> { let mut result = HashMap::new(); for (directory, action) in &self.receiver { @@ -212,10 +212,10 @@ impl CompletedDirectories { } } -/// JSON values of directories. -/// -/// Call `get()` to wait for all pending threads to complete and return the result. -/// +/** JSON values of directories. + +Call `get()` to wait for all pending threads to complete and return the result. +*/ pub(crate) struct DirectoryValues { /// Threads reading the values. threads: Vec>>, @@ -227,17 +227,17 @@ pub(crate) struct DirectoryValues { progress: ProgressBar, } -/// Read value files from directories. -/// -/// `read_values` spawns threads that read the JSON value files and -/// returns immediately. Calling `get` on the result will wait for the threads -/// to complete and then provides the map of directory names to values. -/// -/// # Arguments -/// * `workflow` - The `Workflow` to read from. -/// * `directories` - The directories to read. Must be present in the workspace. -/// * `io_threads` - Number of threads to use while scanning directories. -/// +/** Read value files from directories. + +`read_values` spawns threads that read the JSON value files and +returns immediately. Calling `get` on the result will wait for the threads +to complete and then provides the map of directory names to values. + +# Arguments +* `workflow` - The `Workflow` to read from. +* `directories` - The directories to read. Must be present in the workspace. +* `io_threads` - Number of threads to use while scanning directories. +*/ pub(crate) fn read_values( workflow: &Workflow, directories: Vec,