From c8eab31215d49f626997c7c434e10e494370d768 Mon Sep 17 00:00:00 2001 From: Daniel Szoke Date: Thu, 21 Nov 2024 15:06:57 -0500 Subject: [PATCH] feat(proguard): Introduce experimental chunk uploading feature Introduce an experimental chunk uploading feature for the `sentry-cli upload-proguard` command. The feature can be activated by setting the `SENTRY_EXPERIMENTAL_PROGUARD_CHUNK_UPLOAD` environment variable to `1`. The feature is only activated when users opt in via this environment variable. The experimental chunk uploading feature is not backwards compatible. We attempt the upload regardless of whether the server supports receiving chunk-uploaded Proguard files. Server-side support will only be available once https://github.com/getsentry/sentry/pull/81131 is released. The goal here was to create something that works, so some optimizations that we use for other chunk uploaded file types (e.g. first checking whether any chunks are present on the server, and only uploading the missing ones), are not implemented for Proguard. Ref #2196 --- src/commands/upload_proguard.rs | 140 +++++++++++++++++++++----------- src/utils/mod.rs | 1 + src/utils/proguard_upload.rs | 95 ++++++++++++++++++++++ 3 files changed, 187 insertions(+), 49 deletions(-) create mode 100644 src/utils/proguard_upload.rs diff --git a/src/commands/upload_proguard.rs b/src/commands/upload_proguard.rs index 276861237b..1f20c0916b 100644 --- a/src/commands/upload_proguard.rs +++ b/src/commands/upload_proguard.rs @@ -1,5 +1,7 @@ +use std::env; use std::fs; use std::io; +use std::path::Path; use std::path::PathBuf; use anyhow::{bail, Error, Result}; @@ -17,16 +19,23 @@ use crate::config::Config; use crate::utils::android::dump_proguard_uuids_as_properties; use crate::utils::args::ArgExt; use crate::utils::fs::TempFile; +use crate::utils::proguard_upload; use crate::utils::system::QuietExit; use crate::utils::ui::{copy_with_progress, make_byte_progress_bar}; #[derive(Debug)] -struct MappingRef { +pub struct MappingRef { pub path: PathBuf, pub size: u64, pub uuid: Uuid, } +impl AsRef for MappingRef { + fn as_ref(&self) -> &Path { + &self.path + } +} + pub fn make_command(command: Command) -> Command { command .about("Upload ProGuard mapping files to a project.") @@ -188,62 +197,95 @@ pub fn execute(matches: &ArgMatches) -> Result<()> { } } - if mappings.is_empty() && matches.get_flag("require_one") { - println!(); - eprintln!("{}", style("error: found no mapping files to upload").red()); - return Err(QuietExit(1).into()); - } + let api = Api::current(); + let config = Config::current(); - println!("{} compressing mappings", style(">").dim()); - let tf = TempFile::create()?; - { - let mut zip = zip::ZipWriter::new(tf.open()?); - for mapping in &mappings { - let pb = make_byte_progress_bar(mapping.size); - zip.start_file( - format!("proguard/{}.txt", mapping.uuid), - zip::write::FileOptions::default(), - )?; - copy_with_progress(&pb, &mut fs::File::open(&mapping.path)?, &mut zip)?; - pb.finish_and_clear(); + // Don't initialize these until we confirm the user did not pass the --no-upload flag, + // or if we are using chunked uploading. This is because auth token, org, and project + // are not needed for the no-upload case. + let authenticated_api; + let (org, project); + + if env::var("SENTRY_EXPERIMENTAL_PROGUARD_CHUNK_UPLOAD") == Ok("1".into()) { + log::warn!( + "EXPERIMENTAL FEATURE: Uploading proguard mappings using chunked uploading. \ + Some functionality may be unavailable when using chunked uploading. Please unset \ + the SENTRY_EXPERIMENTAL_PROGUARD_CHUNK_UPLOAD variable if you encounter any \ + problems." + ); + + authenticated_api = api.authenticated()?; + (org, project) = config.get_org_and_project(matches)?; + + let chunk_upload_options = authenticated_api + .get_chunk_upload_options(&org) + .map_err(|e| anyhow::anyhow!(e)) + .and_then(|options| { + options.ok_or_else(|| { + anyhow::anyhow!( + "server does not support chunked uploading. unset \ + SENTRY_EXPERIMENTAL_PROGUARD_CHUNK_UPLOAD to continue." + ) + }) + })?; + + proguard_upload::chunk_upload(&mappings, &chunk_upload_options, &org, &project)?; + } else { + if mappings.is_empty() && matches.get_flag("require_one") { + println!(); + eprintln!("{}", style("error: found no mapping files to upload").red()); + return Err(QuietExit(1).into()); } - } - // write UUIDs into the mapping file. - if let Some(p) = matches.get_one::("write_properties") { - let uuids: Vec<_> = mappings.iter().map(|x| x.uuid).collect(); - dump_proguard_uuids_as_properties(p, &uuids)?; - } + println!("{} compressing mappings", style(">").dim()); + let tf = TempFile::create()?; + { + let mut zip = zip::ZipWriter::new(tf.open()?); + for mapping in &mappings { + let pb = make_byte_progress_bar(mapping.size); + zip.start_file( + format!("proguard/{}.txt", mapping.uuid), + zip::write::FileOptions::default(), + )?; + copy_with_progress(&pb, &mut fs::File::open(&mapping.path)?, &mut zip)?; + pb.finish_and_clear(); + } + } - if matches.get_flag("no_upload") { - println!("{} skipping upload.", style(">").dim()); - return Ok(()); - } + // write UUIDs into the mapping file. + if let Some(p) = matches.get_one::("write_properties") { + let uuids: Vec<_> = mappings.iter().map(|x| x.uuid).collect(); + dump_proguard_uuids_as_properties(p, &uuids)?; + } - println!("{} uploading mappings", style(">").dim()); - let config = Config::current(); - let (org, project) = config.get_org_and_project(matches)?; + if matches.get_flag("no_upload") { + println!("{} skipping upload.", style(">").dim()); + return Ok(()); + } - info!( - "Issuing a command for Organization: {} Project: {}", - org, project - ); + println!("{} uploading mappings", style(">").dim()); + (org, project) = config.get_org_and_project(matches)?; - let api = Api::current(); - let authenticated_api = api.authenticated()?; + info!( + "Issuing a command for Organization: {} Project: {}", + org, project + ); - let rv = authenticated_api - .region_specific(&org) - .upload_dif_archive(&project, tf.path())?; - println!( - "{} Uploaded a total of {} new mapping files", - style(">").dim(), - style(rv.len()).yellow() - ); - if !rv.is_empty() { - println!("Newly uploaded debug symbols:"); - for df in rv { - println!(" {}", style(&df.id()).dim()); + authenticated_api = api.authenticated()?; + + let rv = authenticated_api + .region_specific(&org) + .upload_dif_archive(&project, tf.path())?; + println!( + "{} Uploaded a total of {} new mapping files", + style(">").dim(), + style(rv.len()).yellow() + ); + if !rv.is_empty() { + println!("Newly uploaded debug symbols:"); + for df in rv { + println!(" {}", style(&df.id()).dim()); + } } } diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 3a7930f8dc..03da7b772c 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -16,6 +16,7 @@ pub mod http; pub mod logging; pub mod metrics; pub mod progress; +pub mod proguard_upload; pub mod releases; pub mod retry; pub mod sourcemaps; diff --git a/src/utils/proguard_upload.rs b/src/utils/proguard_upload.rs new file mode 100644 index 0000000000..cb689bcde5 --- /dev/null +++ b/src/utils/proguard_upload.rs @@ -0,0 +1,95 @@ +//! This file contains code for enabling chunk uploads for Proguard mappings. +//! +//! This code is intended as a temporary solution to enable chunk uploads for +//! Proguard mappings, while we work on a more permanent solution, which will +//! work for all different types of debug files. + +use std::fs; + +use anyhow::Result; +use indicatif::ProgressStyle; +use sha1_smol::Digest; + +use super::chunks; +use super::chunks::Chunk; +use super::fs::get_sha1_checksums; +use crate::api::{Api, ChunkUploadOptions, ChunkedDifRequest}; +use crate::commands::upload_proguard::MappingRef; + +struct ChunkedMapping { + raw_data: Vec, + hash: Digest, + chunk_hashes: Vec, + file_name: String, + chunk_size: usize, +} + +impl ChunkedMapping { + fn try_from_mapping(mapping: &MappingRef, chunk_size: u64) -> Result { + let raw_data = fs::read(mapping)?; + let file_name = format!("/proguard/{}.txt", mapping.uuid); + + let (hash, chunk_hashes) = get_sha1_checksums(&raw_data, chunk_size)?; + Ok(Self { + raw_data, + hash, + chunk_hashes, + file_name, + chunk_size: chunk_size.try_into()?, + }) + } + + fn chunks(&self) -> impl Iterator> { + self.raw_data + .chunks(self.chunk_size) + .zip(self.chunk_hashes.iter()) + .map(|(chunk, hash)| Chunk((*hash, chunk))) + } +} + +impl<'a> From<&'a ChunkedMapping> for ChunkedDifRequest<'a> { + fn from(value: &'a ChunkedMapping) -> Self { + ChunkedDifRequest { + name: &value.file_name, + debug_id: None, + chunks: &value.chunk_hashes, + } + } +} + +fn to_assemble(chunked: &ChunkedMapping) -> (Digest, ChunkedDifRequest<'_>) { + (chunked.hash, chunked.into()) +} + +pub fn chunk_upload( + paths: &[MappingRef], + chunk_upload_options: &ChunkUploadOptions, + org: &str, + project: &str, +) -> Result<()> { + let chunked_mappings: Vec = paths + .iter() + .map(|path| ChunkedMapping::try_from_mapping(path, chunk_upload_options.chunk_size)) + .collect::>()?; + + let progress_style = ProgressStyle::default_bar().template( + "Uploading Proguard mappings...\ + \n{wide_bar} {bytes}/{total_bytes} ({eta})", + ); + + let chunks = chunked_mappings.iter().flat_map(|mapping| mapping.chunks()); + + chunks::upload_chunks( + &chunks.collect::>(), + chunk_upload_options, + progress_style, + )?; + + let assemble_request = chunked_mappings.iter().map(to_assemble).collect(); + + Api::current() + .authenticated()? + .assemble_difs(org, project, &assemble_request)?; + + Ok(()) +}