diff --git a/src/api/data_types/chunking/mod.rs b/src/api/data_types/chunking/mod.rs index e9e6d2fdd7..41ba3a165a 100644 --- a/src/api/data_types/chunking/mod.rs +++ b/src/api/data_types/chunking/mod.rs @@ -13,4 +13,4 @@ pub use self::compression::ChunkCompression; pub use self::dif::{AssembleDifsRequest, AssembleDifsResponse, ChunkedDifRequest}; pub use self::file_state::ChunkedFileState; pub use self::hash_algorithm::ChunkHashAlgorithm; -pub use self::upload::{ChunkUploadCapability, ChunkUploadOptions}; +pub use self::upload::{ChunkUploadCapability, ChunkServerOptions}; diff --git a/src/api/data_types/chunking/upload/mod.rs b/src/api/data_types/chunking/upload/mod.rs index 65b897ec9d..3f70dbdfbf 100644 --- a/src/api/data_types/chunking/upload/mod.rs +++ b/src/api/data_types/chunking/upload/mod.rs @@ -4,4 +4,4 @@ mod capability; mod options; pub use self::capability::ChunkUploadCapability; -pub use self::options::ChunkUploadOptions; +pub use self::options::ChunkServerOptions; diff --git a/src/api/data_types/chunking/upload/options.rs b/src/api/data_types/chunking/upload/options.rs index 47f509d849..f1221c6058 100644 --- a/src/api/data_types/chunking/upload/options.rs +++ b/src/api/data_types/chunking/upload/options.rs @@ -2,9 +2,10 @@ use serde::Deserialize; use super::{ChunkCompression, ChunkHashAlgorithm, ChunkUploadCapability}; +/// Chunk upload options which are set by the Sentry server. #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct ChunkUploadOptions { +pub struct ChunkServerOptions { pub url: String, #[serde(rename = "chunksPerRequest")] pub max_chunks: u64, @@ -24,7 +25,7 @@ pub struct ChunkUploadOptions { pub accept: Vec, } -impl ChunkUploadOptions { +impl ChunkServerOptions { /// Returns whether the given capability is accepted by the chunk upload endpoint. pub fn supports(&self, capability: ChunkUploadCapability) -> bool { self.accept.contains(&capability) diff --git a/src/api/mod.rs b/src/api/mod.rs index 856d40ac72..931fa848cf 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -936,11 +936,11 @@ impl<'a> AuthenticatedApi<'a> { } /// Get the server configuration for chunked file uploads. - pub fn get_chunk_upload_options(&self, org: &str) -> ApiResult> { + pub fn get_chunk_upload_options(&self, org: &str) -> ApiResult> { let url = format!("/organizations/{}/chunk-upload/", PathArg(org)); match self .get(&url)? - .convert_rnf::(ApiErrorKind::ChunkUploadNotSupported) + .convert_rnf::(ApiErrorKind::ChunkUploadNotSupported) { Ok(options) => Ok(Some(options)), Err(error) => { diff --git a/src/utils/chunks/mod.rs b/src/utils/chunks/mod.rs index 2eabcccb24..7d01762864 100644 --- a/src/utils/chunks/mod.rs +++ b/src/utils/chunks/mod.rs @@ -19,7 +19,7 @@ use rayon::prelude::*; use rayon::ThreadPoolBuilder; use sha1_smol::Digest; -use crate::api::{Api, ChunkUploadOptions}; +use crate::api::{Api, ChunkServerOptions}; use crate::utils::progress::{ProgressBar, ProgressBarMode, ProgressStyle}; /// Timeout for polling all assemble endpoints. @@ -164,7 +164,7 @@ impl ItemSize for Chunk<'_> { /// This function blocks until all chunks have been uploaded. pub fn upload_chunks( chunks: &[Chunk<'_>], - chunk_options: &ChunkUploadOptions, + chunk_options: &ChunkServerOptions, progress_style: ProgressStyle, ) -> Result<()> { let total_bytes = chunks.iter().map(|&Chunk((_, data))| data.len()).sum(); @@ -190,7 +190,7 @@ pub fn upload_chunks( info!("using '{}' compression for chunk upload", compression); // The upload is executed in parallel batches. Each batch aggregates objects - // until it exceeds the maximum size configured in ChunkUploadOptions. We + // until it exceeds the maximum size configured in ChunkServerOptions. We // keep track of the overall progress and potential errors. If an error // occurs, all subsequent requests will be cancelled and the error returned. // Otherwise, the after every successful update, the overall progress is diff --git a/src/utils/dif_upload.rs b/src/utils/dif_upload.rs index 097d231638..dd109e1394 100644 --- a/src/utils/dif_upload.rs +++ b/src/utils/dif_upload.rs @@ -34,7 +34,7 @@ use zip::result::ZipError; use zip::{write::FileOptions, ZipArchive, ZipWriter}; use crate::api::{ - Api, AssembleDifsRequest, ChunkUploadCapability, ChunkUploadOptions, ChunkedFileState, + Api, AssembleDifsRequest, ChunkServerOptions, ChunkUploadCapability, ChunkedFileState, }; use crate::config::Config; use crate::constants::{DEFAULT_MAX_DIF_SIZE, DEFAULT_MAX_WAIT}; @@ -1313,7 +1313,7 @@ where /// This function blocks until all chunks have been uploaded. fn upload_missing_chunks( missing_info: &MissingObjectsInfo<'_, T>, - chunk_options: &ChunkUploadOptions, + chunk_options: &ChunkServerOptions, ) -> Result<()> { let (objects, chunks) = missing_info; @@ -1519,7 +1519,7 @@ where /// Uploads debug info files using the chunk-upload endpoint. fn upload_difs_chunked( options: &DifUpload, - chunk_options: &ChunkUploadOptions, + chunk_options: &ChunkServerOptions, ) -> Result<(Vec, bool)> { // Search for debug files in the file system and ZIPs let found = search_difs(options)?; diff --git a/src/utils/file_upload.rs b/src/utils/file_upload.rs index fb5f2ae74d..a49ea52e48 100644 --- a/src/utils/file_upload.rs +++ b/src/utils/file_upload.rs @@ -22,7 +22,7 @@ use symbolic::debuginfo::sourcebundle::{ use url::Url; use crate::api::NewRelease; -use crate::api::{Api, ChunkUploadCapability, ChunkUploadOptions}; +use crate::api::{Api, ChunkUploadCapability, ChunkServerOptions}; use crate::constants::DEFAULT_MAX_WAIT; use crate::utils::chunks::{upload_chunks, Chunk, ASSEMBLE_POLL_INTERVAL}; use crate::utils::fs::{get_sha1_checksum, get_sha1_checksums, TempFile}; @@ -90,7 +90,7 @@ pub struct UploadContext<'a> { pub wait: bool, pub max_wait: Duration, pub dedupe: bool, - pub chunk_upload_options: Option<&'a ChunkUploadOptions>, + pub chunk_upload_options: Option<&'a ChunkServerOptions>, } impl UploadContext<'_> { @@ -317,7 +317,7 @@ fn poll_assemble( checksum: Digest, chunks: &[Digest], context: &UploadContext, - options: &ChunkUploadOptions, + options: &ChunkServerOptions, ) -> Result<()> { let progress_style = ProgressStyle::default_spinner().template("{spinner} Processing files..."); @@ -400,7 +400,7 @@ fn poll_assemble( fn upload_files_chunked( context: &UploadContext, files: &SourceFiles, - options: &ChunkUploadOptions, + options: &ChunkServerOptions, ) -> Result<()> { let archive = build_artifact_bundle(context, files, None)?; diff --git a/src/utils/proguard/upload.rs b/src/utils/proguard/upload.rs index 5a1feda6e5..bf7909a263 100644 --- a/src/utils/proguard/upload.rs +++ b/src/utils/proguard/upload.rs @@ -10,7 +10,7 @@ use std::time::{Duration, Instant}; use anyhow::Result; use indicatif::ProgressStyle; -use crate::api::{Api, ChunkUploadOptions, ChunkedFileState}; +use crate::api::{Api, ChunkServerOptions, ChunkedFileState}; use crate::utils::chunks; use crate::utils::chunks::Chunked; use crate::utils::proguard::ProguardMapping; @@ -28,7 +28,7 @@ const ASSEMBLE_POLL_TIMEOUT: Duration = Duration::from_secs(120); /// Returns an error if the mappings fail to assemble, or if the timeout is reached. pub fn chunk_upload( mappings: &[ProguardMapping<'_>], - chunk_upload_options: &ChunkUploadOptions, + chunk_upload_options: &ChunkServerOptions, org: &str, project: &str, ) -> Result<()> {