Skip to content

Commit

Permalink
ref(api): Rename ChunkedUploadOptions to indicate they are set by s…
Browse files Browse the repository at this point in the history
…erver

This rename makes it clearer that the server sets the `ChunkedUploadServerOptions`. It also will allow `ChunkedUploadOptions` to be used to store options that are user-configured
  • Loading branch information
szokeasaurusrex committed Dec 4, 2024
1 parent 87444d5 commit d97768d
Show file tree
Hide file tree
Showing 8 changed files with 19 additions and 18 deletions.
2 changes: 1 addition & 1 deletion src/api/data_types/chunking/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ pub use self::compression::ChunkCompression;
pub use self::dif::{AssembleDifsRequest, AssembleDifsResponse, ChunkedDifRequest};
pub use self::file_state::ChunkedFileState;
pub use self::hash_algorithm::ChunkHashAlgorithm;
pub use self::upload::{ChunkUploadCapability, ChunkUploadOptions};
pub use self::upload::{ChunkUploadCapability, ChunkServerOptions};
2 changes: 1 addition & 1 deletion src/api/data_types/chunking/upload/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ mod capability;
mod options;

pub use self::capability::ChunkUploadCapability;
pub use self::options::ChunkUploadOptions;
pub use self::options::ChunkServerOptions;
5 changes: 3 additions & 2 deletions src/api/data_types/chunking/upload/options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@ use serde::Deserialize;

use super::{ChunkCompression, ChunkHashAlgorithm, ChunkUploadCapability};

/// Chunk upload options which are set by the Sentry server.
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChunkUploadOptions {
pub struct ChunkServerOptions {
pub url: String,
#[serde(rename = "chunksPerRequest")]
pub max_chunks: u64,
Expand All @@ -24,7 +25,7 @@ pub struct ChunkUploadOptions {
pub accept: Vec<ChunkUploadCapability>,
}

impl ChunkUploadOptions {
impl ChunkServerOptions {
/// Returns whether the given capability is accepted by the chunk upload endpoint.
pub fn supports(&self, capability: ChunkUploadCapability) -> bool {
self.accept.contains(&capability)
Expand Down
4 changes: 2 additions & 2 deletions src/api/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -936,11 +936,11 @@ impl<'a> AuthenticatedApi<'a> {
}

/// Get the server configuration for chunked file uploads.
pub fn get_chunk_upload_options(&self, org: &str) -> ApiResult<Option<ChunkUploadOptions>> {
pub fn get_chunk_upload_options(&self, org: &str) -> ApiResult<Option<ChunkServerOptions>> {
let url = format!("/organizations/{}/chunk-upload/", PathArg(org));
match self
.get(&url)?
.convert_rnf::<ChunkUploadOptions>(ApiErrorKind::ChunkUploadNotSupported)
.convert_rnf::<ChunkServerOptions>(ApiErrorKind::ChunkUploadNotSupported)
{
Ok(options) => Ok(Some(options)),
Err(error) => {
Expand Down
6 changes: 3 additions & 3 deletions src/utils/chunks/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ use rayon::prelude::*;
use rayon::ThreadPoolBuilder;
use sha1_smol::Digest;

use crate::api::{Api, ChunkUploadOptions};
use crate::api::{Api, ChunkServerOptions};
use crate::utils::progress::{ProgressBar, ProgressBarMode, ProgressStyle};

/// Timeout for polling all assemble endpoints.
Expand Down Expand Up @@ -164,7 +164,7 @@ impl ItemSize for Chunk<'_> {
/// This function blocks until all chunks have been uploaded.
pub fn upload_chunks(
chunks: &[Chunk<'_>],
chunk_options: &ChunkUploadOptions,
chunk_options: &ChunkServerOptions,
progress_style: ProgressStyle,
) -> Result<()> {
let total_bytes = chunks.iter().map(|&Chunk((_, data))| data.len()).sum();
Expand All @@ -190,7 +190,7 @@ pub fn upload_chunks(
info!("using '{}' compression for chunk upload", compression);

// The upload is executed in parallel batches. Each batch aggregates objects
// until it exceeds the maximum size configured in ChunkUploadOptions. We
// until it exceeds the maximum size configured in ChunkServerOptions. We
// keep track of the overall progress and potential errors. If an error
// occurs, all subsequent requests will be cancelled and the error returned.
// Otherwise, the after every successful update, the overall progress is
Expand Down
6 changes: 3 additions & 3 deletions src/utils/dif_upload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ use zip::result::ZipError;
use zip::{write::FileOptions, ZipArchive, ZipWriter};

use crate::api::{
Api, AssembleDifsRequest, ChunkUploadCapability, ChunkUploadOptions, ChunkedFileState,
Api, AssembleDifsRequest, ChunkServerOptions, ChunkUploadCapability, ChunkedFileState,
};
use crate::config::Config;
use crate::constants::{DEFAULT_MAX_DIF_SIZE, DEFAULT_MAX_WAIT};
Expand Down Expand Up @@ -1313,7 +1313,7 @@ where
/// This function blocks until all chunks have been uploaded.
fn upload_missing_chunks<T>(
missing_info: &MissingObjectsInfo<'_, T>,
chunk_options: &ChunkUploadOptions,
chunk_options: &ChunkServerOptions,
) -> Result<()> {
let (objects, chunks) = missing_info;

Expand Down Expand Up @@ -1519,7 +1519,7 @@ where
/// Uploads debug info files using the chunk-upload endpoint.
fn upload_difs_chunked(
options: &DifUpload,
chunk_options: &ChunkUploadOptions,
chunk_options: &ChunkServerOptions,
) -> Result<(Vec<DebugInfoFile>, bool)> {
// Search for debug files in the file system and ZIPs
let found = search_difs(options)?;
Expand Down
8 changes: 4 additions & 4 deletions src/utils/file_upload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ use symbolic::debuginfo::sourcebundle::{
use url::Url;

use crate::api::NewRelease;
use crate::api::{Api, ChunkUploadCapability, ChunkUploadOptions};
use crate::api::{Api, ChunkUploadCapability, ChunkServerOptions};
use crate::constants::DEFAULT_MAX_WAIT;
use crate::utils::chunks::{upload_chunks, Chunk, ASSEMBLE_POLL_INTERVAL};
use crate::utils::fs::{get_sha1_checksum, get_sha1_checksums, TempFile};
Expand Down Expand Up @@ -90,7 +90,7 @@ pub struct UploadContext<'a> {
pub wait: bool,
pub max_wait: Duration,
pub dedupe: bool,
pub chunk_upload_options: Option<&'a ChunkUploadOptions>,
pub chunk_upload_options: Option<&'a ChunkServerOptions>,
}

impl UploadContext<'_> {
Expand Down Expand Up @@ -317,7 +317,7 @@ fn poll_assemble(
checksum: Digest,
chunks: &[Digest],
context: &UploadContext,
options: &ChunkUploadOptions,
options: &ChunkServerOptions,
) -> Result<()> {
let progress_style = ProgressStyle::default_spinner().template("{spinner} Processing files...");

Expand Down Expand Up @@ -400,7 +400,7 @@ fn poll_assemble(
fn upload_files_chunked(
context: &UploadContext,
files: &SourceFiles,
options: &ChunkUploadOptions,
options: &ChunkServerOptions,
) -> Result<()> {
let archive = build_artifact_bundle(context, files, None)?;

Expand Down
4 changes: 2 additions & 2 deletions src/utils/proguard/upload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use std::time::{Duration, Instant};
use anyhow::Result;
use indicatif::ProgressStyle;

use crate::api::{Api, ChunkUploadOptions, ChunkedFileState};
use crate::api::{Api, ChunkServerOptions, ChunkedFileState};
use crate::utils::chunks;
use crate::utils::chunks::Chunked;
use crate::utils::proguard::ProguardMapping;
Expand All @@ -28,7 +28,7 @@ const ASSEMBLE_POLL_TIMEOUT: Duration = Duration::from_secs(120);
/// Returns an error if the mappings fail to assemble, or if the timeout is reached.
pub fn chunk_upload(
mappings: &[ProguardMapping<'_>],
chunk_upload_options: &ChunkUploadOptions,
chunk_upload_options: &ChunkServerOptions,
org: &str,
project: &str,
) -> Result<()> {
Expand Down

0 comments on commit d97768d

Please sign in to comment.