From 40c95384ad2c6142f31c0226fbeeb8b2772ee2e9 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 11 Jan 2024 18:40:25 +0000 Subject: [PATCH] WIP: Add oxlog tool and library This still needs a bit of work before it is ready to go. --- Cargo.lock | 11 ++ Cargo.toml | 2 + dev-tools/oxlog/Cargo.toml | 18 ++ dev-tools/oxlog/src/bin/oxlog.rs | 116 ++++++++++++ dev-tools/oxlog/src/lib.rs | 308 +++++++++++++++++++++++++++++++ 5 files changed, 455 insertions(+) create mode 100644 dev-tools/oxlog/Cargo.toml create mode 100644 dev-tools/oxlog/src/bin/oxlog.rs create mode 100644 dev-tools/oxlog/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 7491f30dde..6cc8f7619d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5584,6 +5584,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "oxlog" +version = "0.1.0" +dependencies = [ + "anyhow", + "camino", + "chrono", + "clap 4.4.3", + "uuid", +] + [[package]] name = "p256" version = "0.13.2" diff --git a/Cargo.toml b/Cargo.toml index fbef04d3c0..aad1be4265 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ members = [ "dev-tools/crdb-seed", "dev-tools/omdb", "dev-tools/omicron-dev", + "dev-tools/oxlog", "dev-tools/thing-flinger", "dev-tools/xtask", "dns-server", @@ -91,6 +92,7 @@ default-members = [ "dev-tools/crdb-seed", "dev-tools/omdb", "dev-tools/omicron-dev", + "dev-tools/oxlog", "dev-tools/thing-flinger", # Do not include xtask in the list of default members, because this causes # hakari to not work as well and build times to be longer. diff --git a/dev-tools/oxlog/Cargo.toml b/dev-tools/oxlog/Cargo.toml new file mode 100644 index 0000000000..bf86ea7587 --- /dev/null +++ b/dev-tools/oxlog/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "oxlog" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[dependencies] +anyhow.workspace = true +camino.workspace = true +chrono.workspace = true +clap.workspace = true +uuid.workspace = true + +# Disable doc builds by default for our binaries to work around issue +# rust-lang/cargo#8373. These docs would not be very useful anyway. +[[bin]] +name = "oxlog" +doc = false diff --git a/dev-tools/oxlog/src/bin/oxlog.rs b/dev-tools/oxlog/src/bin/oxlog.rs new file mode 100644 index 0000000000..da76b24272 --- /dev/null +++ b/dev-tools/oxlog/src/bin/oxlog.rs @@ -0,0 +1,116 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tool for discovering oxide related logfiles on sleds + +use clap::{Parser, Subcommand}; +use oxlog::{LogFile, Zones}; + +#[derive(Debug, Parser)] +#[command(version)] +struct Cli { + #[command(subcommand)] + command: Commands, +} + +#[derive(Debug, Subcommand)] +enum Commands { + /// List all zones found on the filesystem + Zones, + + /// List logs for a given service + Logs { + // The name of the zone + zone: String, + + /// The name of the service to list logs for + service: Option, + + /// Print available metadata + #[arg(short, long)] + metadata: bool, + + /// Print only the current log file + #[arg(short, long)] + current: bool, + + /// Print only the archive log files + #[arg(short, long)] + archived: bool, + + // Print only the extra log files + #[arg(short, long)] + extra: bool, + }, +} + +fn main() -> Result<(), anyhow::Error> { + let cli = Cli::parse(); + + match cli.command { + Commands::Zones => { + for zone in Zones::load()?.zones.keys() { + println!("{zone}"); + } + Ok(()) + } + Commands::Logs { + zone, + service, + metadata, + current, + archived, + extra, + } => { + let zones = Zones::load()?; + let print_metadata = |f: &LogFile| { + println!( + "{}\t{}\t{}", + f.path, + f.size.map_or_else(|| "-".to_string(), |s| s.to_string()), + f.modified + .map_or_else(|| "-".to_string(), |s| s.to_rfc3339()) + ); + }; + + let logs = zones.zone_logs(&zone); + for (svc_name, mut svc_logs) in logs { + if let Some(service) = &service { + if svc_name != service.as_str() { + continue; + } + } + svc_logs.archived.sort(); + if current { + if let Some(current) = &svc_logs.current { + if metadata { + print_metadata(current); + } else { + println!("{}", current.path); + } + } + } + if archived { + for f in &svc_logs.archived { + if metadata { + print_metadata(f); + } else { + println!("{}", f.path); + } + } + } + if extra { + for f in &svc_logs.extra { + if metadata { + print_metadata(f); + } else { + println!("{}", f.path); + } + } + } + } + Ok(()) + } + } +} diff --git a/dev-tools/oxlog/src/lib.rs b/dev-tools/oxlog/src/lib.rs new file mode 100644 index 0000000000..43967c8358 --- /dev/null +++ b/dev-tools/oxlog/src/lib.rs @@ -0,0 +1,308 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A tool to show oxide related log file paths +//! +//! All data is based off of reading the filesystem + +use anyhow::Context; +use camino::Utf8PathBuf; +use chrono::{DateTime, Utc}; +use std::collections::BTreeMap; +use std::fs::{read_dir, DirEntry}; +use std::io; +use uuid::Uuid; + +/// Return a UUID if the `DirEntry` contains a directory that parses into a UUID. +fn get_uuid_dir(result: io::Result) -> Option { + let Ok(entry) = result else { + return None; + }; + let Ok(file_type) = entry.file_type() else { + return None; + }; + if !file_type.is_dir() { + return None; + } + let file_name = entry.file_name(); + let Some(s) = file_name.to_str() else { + return None; + }; + if let Ok(uuid) = s.parse() { + Some(uuid) + } else { + None + } +} + +#[derive(Debug)] +pub struct Pools { + pub internal: Vec, + pub external: Vec, +} + +impl Pools { + pub fn read() -> anyhow::Result { + let internal = read_dir("/pool/int/") + .context("Failed to read /pool/int")? + .filter_map(get_uuid_dir) + .collect(); + let external = read_dir("/pool/ext/") + .context("Failed to read /pool/ext")? + .filter_map(get_uuid_dir) + .collect(); + Ok(Pools { internal, external }) + } +} + +/// Path and metadata about a logfile +/// We use options for metadata as retrieval is fallible +#[derive(Debug, Clone, Eq)] +pub struct LogFile { + pub path: Utf8PathBuf, + pub size: Option, + pub modified: Option>, +} + +impl PartialEq for LogFile { + fn eq(&self, other: &Self) -> bool { + self.path == other.path + } +} + +impl PartialOrd for LogFile { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for LogFile { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.path.cmp(&other.path) + } +} + +impl LogFile { + fn new(path: Utf8PathBuf) -> LogFile { + LogFile { path, size: None, modified: None } + } +} + +/// All oxide logs for a given service in a given zone +#[derive(Debug, Clone, Default)] +pub struct SvcLogs { + pub current: Option, + pub archived: Vec, + + // Logs in non-standard places and logs that aren't necessarily oxide logs + pub extra: Vec, +} + +// These probably don't warrant newtypes. They are just to make the nested +// maps in `Logs` a bit easier to read. +type ZoneName = String; +type ServiceName = String; + +pub struct Paths { + pub primary: Utf8PathBuf, + pub debug: Vec, + pub extra: Vec<(&'static str, Utf8PathBuf)>, +} + +pub struct Zones { + pub zones: BTreeMap, +} + +impl Zones { + pub fn load() -> Result { + let mut zones = BTreeMap::new(); + zones.insert( + "global".to_string(), + Paths { + primary: Utf8PathBuf::from("/var/svc/log"), + debug: vec![], + extra: vec![], + }, + ); + zones.insert( + "oxz_switch".to_string(), + Paths { + primary: Utf8PathBuf::from("/zone/oxz_switch/root/var/svc/log"), + debug: vec![], + extra: vec![], + }, + ); + let pools = Pools::read()?; + + // Load the primary and extra logs + for uuid in &pools.external { + let zones_path: Utf8PathBuf = + ["/pool/ext", &uuid.to_string(), "crypt/zone"].iter().collect(); + // Find the zones on the given pool + let Ok(entries) = read_dir(zones_path.as_path()) else { + continue; + }; + for entry in entries { + let Ok(zone_entry) = entry else { + continue; + }; + let zone = zone_entry.file_name(); + let Some(zone) = zone.to_str() else { + // not utf8 + continue; + }; + // Load the current logs + let mut dir = zones_path.clone(); + dir.push(zone); + dir.push("root/var/svc/log"); + let mut paths = + Paths { primary: dir, debug: vec![], extra: vec![] }; + + // Load the extra logs + if zone.starts_with("oxz_cockroachdb") { + let mut dir = zones_path.clone(); + dir.push(zone); + dir.push("root/data/logs"); + paths.extra.push(("cockroachdb", dir)); + } + + zones.insert(zone.to_string(), paths); + } + } + + // Load the debug logs + for uuid in &pools.external { + let zones_path: Utf8PathBuf = + ["/pool/ext", &uuid.to_string(), "crypt/debug"] + .iter() + .collect(); + // Find the zones on the given pool + let Ok(entries) = read_dir(zones_path.as_path()) else { + continue; + }; + for entry in entries { + let Ok(zone_entry) = entry else { + continue; + }; + let zone = zone_entry.file_name(); + let Some(zone) = zone.to_str() else { + // not utf8 + continue; + }; + let mut dir = zones_path.clone(); + dir.push(zone); + + // We only add debug paths if the zones have primary paths + zones.get_mut(zone).map(|paths| paths.debug.push(dir)); + } + } + + Ok(Zones { zones }) + } + + pub fn zone_logs(&self, zone: &str) -> BTreeMap { + let mut output = BTreeMap::new(); + let Some(paths) = self.zones.get(zone) else { + return BTreeMap::new(); + }; + load_svc_logs(paths.primary.clone(), &mut output); + for dir in paths.debug.clone() { + load_svc_logs(dir, &mut output); + } + for (svc_name, dir) in paths.extra.clone() { + load_extra_logs(dir, svc_name, &mut output); + } + output + } +} + +fn load_svc_logs(dir: Utf8PathBuf, logs: &mut BTreeMap) { + let Ok(entries) = read_dir(dir.as_path()) else { + return; + }; + for entry in entries { + let Ok(entry) = entry else { + continue; + }; + let filename = entry.file_name(); + let Some(filename) = filename.to_str() else { + continue; + }; + if filename.starts_with("oxide-") { + let mut path = dir.clone(); + path.push(filename); + let mut logfile = LogFile::new(path); + // If we can't find the service name, then skip the log + let Some((prefix, _suffix)) = filename.split_once(':') else { + continue; + }; + // We already look for this prefix above + let svc_name = prefix.strip_prefix("oxide-").unwrap().to_string(); + if let Ok(metadata) = entry.metadata() { + if metadata.len() == 0 { + // skip 0 size files + continue; + } + logfile.size = Some(metadata.len()); + if let Ok(modified) = metadata.modified() { + logfile.modified = Some(modified.into()); + } + } + + let is_current = filename.ends_with(".log"); + + let svc_logs = + logs.entry(svc_name.clone()).or_insert(SvcLogs::default()); + + if is_current { + svc_logs.current = Some(logfile.clone()); + } else { + svc_logs.archived.push(logfile.clone()); + } + } + } +} + +fn load_extra_logs( + dir: Utf8PathBuf, + svc_name: &str, + logs: &mut BTreeMap, +) { + let Ok(entries) = read_dir(dir.as_path()) else { + return; + }; + + // We only insert extra files if we have already collected + // related current and archived files. + // This should always be the case unless the files are + // for zones that no longer exist. + let Some(svc_logs) = logs.get_mut(svc_name) else { + return; + }; + + for entry in entries { + let Ok(entry) = entry else { + continue; + }; + let filename = entry.file_name(); + let Some(filename) = filename.to_str() else { + continue; + }; + let mut path = dir.clone(); + path.push(filename); + let mut logfile = LogFile::new(path); + if let Ok(metadata) = entry.metadata() { + if metadata.len() == 0 { + // skip 0 size files + continue; + } + logfile.size = Some(metadata.len()); + if let Ok(modified) = metadata.modified() { + logfile.modified = Some(modified.into()); + } + } + + svc_logs.extra.push(logfile); + } +}