Skip to content

Commit

Permalink
fix: (#33) Rare combination of settings could lead to writing a corru…
Browse files Browse the repository at this point in the history
…pt archive with overlength extra data, and data_start locations when reading the archive back were also wrong (#221)

* fix: Rare combination of settings could lead to writing a corrupt archive with overlength extra data

* fix: Previous fix was breaking alignment

* style: cargo fmt --all

* fix: ZIP64 header was being written twice

* style: cargo fmt --all

* ci(fuzz): Add check that file-creation options are individually valid

* fix: Need to update extra_data_start in deep_copy_file

* style: cargo fmt --all

* test(fuzz): fix bug in Arbitrary impl

* fix: Cursor-position bugs when merging archives or opening for append

* fix: unintended feature dependency

* style: cargo fmt --all

* fix: merge_contents was miscalculating new start positions for absorbed archive's files

* fix: shallow_copy_file needs to reset CDE location since the CDE is copied

* fix: ZIP64 header was being written after AES header location was already calculated

* fix: ZIP64 header was being counted twice when writing extra-field length

* fix: deep_copy_file was positioning cursor incorrectly

* test(fuzz): Reimplement Debug so that it prints the method calls actually made

* test(fuzz): Fix issues with `Option<&mut Formatter>`

* chore: Partial debug

* chore: Revert: `merge_contents` already adjusts header_start and data_start

* chore: Revert unused `mut`

* style: cargo fmt --all

* refactor: eliminate a magic number for CDE block size

* chore: WIP: fix bugs

* refactor: Minor refactors

* refactor: eliminate a magic number for CDE block size

* refactor: Minor refactors

* refactor: Can use cde_start_pos to locate ZIP64 end locator

* chore: Fix import that can no longer be feature-gated

* chore: Fix import that can no longer be feature-gated

* refactor: Confusing variable name

* style: cargo fmt --all and fix Clippy warnings

* style: fix another Clippy warning

---------

Signed-off-by: Chris Hennick <[email protected]>
  • Loading branch information
Pr0methean authored Jul 29, 2024
1 parent fd5f804 commit 6d8ab62
Show file tree
Hide file tree
Showing 3 changed files with 436 additions and 309 deletions.
25 changes: 15 additions & 10 deletions src/read.rs
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ fn find_data_start(
// easily overflow a u16.
block.file_name_length as u64 + block.extra_field_length as u64;
let data_start =
data.header_start + mem::size_of::<ZipLocalEntryBlock>() as u64 + variable_fields_len;
data.header_start + size_of::<ZipLocalEntryBlock>() as u64 + variable_fields_len;
// Set the value so we don't have to read it again.
match data.data_start.set(data_start) {
Ok(()) => (),
Expand Down Expand Up @@ -497,22 +497,28 @@ impl<R: Read + Seek> ZipArchive<R> {
* assert_eq!(0, new_files[0].header_start); // Avoid this.
*/

let new_initial_header_start = w.stream_position()?;
let first_new_file_header_start = w.stream_position()?;

/* Push back file header starts for all entries in the covered files. */
new_files.values_mut().try_for_each(|f| {
/* This is probably the only really important thing to change. */
f.header_start = f.header_start.checked_add(new_initial_header_start).ok_or(
ZipError::InvalidArchive("new header start from merge would have been too large"),
)?;
f.header_start = f
.header_start
.checked_add(first_new_file_header_start)
.ok_or(InvalidArchive(
"new header start from merge would have been too large",
))?;
/* This is only ever used internally to cache metadata lookups (it's not part of the
* zip spec), and 0 is the sentinel value. */
// f.central_header_start = 0;
f.central_header_start = 0;
/* This is an atomic variable so it can be updated from another thread in the
* implementation (which is good!). */
if let Some(old_data_start) = f.data_start.take() {
let new_data_start = old_data_start.checked_add(new_initial_header_start).ok_or(
ZipError::InvalidArchive("new data start from merge would have been too large"),
)?;
let new_data_start = old_data_start
.checked_add(first_new_file_header_start)
.ok_or(InvalidArchive(
"new data start from merge would have been too large",
))?;
f.data_start.get_or_init(|| new_data_start);
}
Ok::<_, ZipError>(())
Expand Down Expand Up @@ -1239,7 +1245,6 @@ pub(crate) fn central_header_to_zip_file<R: Read + Seek>(
"A file can't start after its central-directory header",
));
}
file.data_start.get_or_init(|| data_start);
reader.seek(SeekFrom::Start(central_header_end))?;
Ok(file)
}
Expand Down
109 changes: 55 additions & 54 deletions src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use crate::extra_fields::ExtraField;
use crate::result::DateTimeRangeError;
use crate::spec::is_dir;
use crate::types::ffi::S_IFDIR;
use crate::CompressionMethod;
use crate::{CompressionMethod, ZIP64_BYTES_THR};
#[cfg(feature = "time")]
use time::{error::ComponentRange, Date, Month, OffsetDateTime, PrimitiveDateTime, Time};

Expand Down Expand Up @@ -625,10 +625,10 @@ impl ZipFileData {
extra_field: &[u8],
) -> Self
where
S: Into<Box<str>>,
S: ToString,
{
let permissions = options.permissions.unwrap_or(0o100644);
let file_name: Box<str> = name.into();
let file_name: Box<str> = name.to_string().into_boxed_str();
let file_name_raw: Box<[u8]> = file_name.bytes().collect();
let mut local_block = ZipFileData {
system: System::Unix,
Expand Down Expand Up @@ -778,12 +778,8 @@ impl ZipFileData {
pub(crate) fn local_block(&self) -> ZipResult<ZipLocalEntryBlock> {
let compressed_size: u32 = self.clamp_size_field(self.compressed_size);
let uncompressed_size: u32 = self.clamp_size_field(self.uncompressed_size);

let extra_block_len: usize = self
.zip64_extra_field_block()
.map(|block| block.full_size())
.unwrap_or(0);
let extra_field_length: u16 = (self.extra_field_len() + extra_block_len)
let extra_field_length: u16 = self
.extra_field_len()
.try_into()
.map_err(|_| ZipError::InvalidArchive("Extra data field is too large"))?;

Expand All @@ -805,7 +801,7 @@ impl ZipFileData {
})
}

pub(crate) fn block(&self, zip64_extra_field_length: u16) -> ZipResult<ZipCentralEntryBlock> {
pub(crate) fn block(&self) -> ZipResult<ZipCentralEntryBlock> {
let extra_field_len: u16 = self.extra_field_len().try_into().unwrap();
let central_extra_field_len: u16 = self.central_extra_field_len().try_into().unwrap();
let last_modified_time = self
Expand All @@ -832,11 +828,9 @@ impl ZipFileData {
.try_into()
.unwrap(),
file_name_length: self.file_name_raw.len().try_into().unwrap(),
extra_field_length: zip64_extra_field_length
.checked_add(extra_field_len + central_extra_field_len)
.ok_or(ZipError::InvalidArchive(
"Extra field length in central directory exceeds 64KiB",
))?,
extra_field_length: extra_field_len.checked_add(central_extra_field_len).ok_or(
ZipError::InvalidArchive("Extra field length in central directory exceeds 64KiB"),
)?,
file_comment_length: self.file_comment.as_bytes().len().try_into().unwrap(),
disk_number: 0,
internal_file_attributes: 0,
Expand All @@ -850,45 +844,12 @@ impl ZipFileData {
}

pub(crate) fn zip64_extra_field_block(&self) -> Option<Zip64ExtraFieldBlock> {
let uncompressed_size: Option<u64> =
if self.uncompressed_size >= spec::ZIP64_BYTES_THR || self.large_file {
Some(spec::ZIP64_BYTES_THR)
} else {
None
};
let compressed_size: Option<u64> =
if self.compressed_size >= spec::ZIP64_BYTES_THR || self.large_file {
Some(spec::ZIP64_BYTES_THR)
} else {
None
};
let header_start: Option<u64> = if self.header_start >= spec::ZIP64_BYTES_THR {
Some(spec::ZIP64_BYTES_THR)
} else {
None
};

let mut size: u16 = 0;
if uncompressed_size.is_some() {
size += mem::size_of::<u64>() as u16;
}
if compressed_size.is_some() {
size += mem::size_of::<u64>() as u16;
}
if header_start.is_some() {
size += mem::size_of::<u64>() as u16;
}
if size == 0 {
return None;
}

Some(Zip64ExtraFieldBlock {
magic: spec::ExtraFieldMagic::ZIP64_EXTRA_FIELD_TAG,
size,
uncompressed_size,
compressed_size,
header_start,
})
Zip64ExtraFieldBlock::maybe_new(
self.large_file,
self.uncompressed_size,
self.compressed_size,
self.header_start,
)
}
}

Expand Down Expand Up @@ -1002,6 +963,46 @@ pub(crate) struct Zip64ExtraFieldBlock {
// u32: disk start number
}

impl Zip64ExtraFieldBlock {
pub(crate) fn maybe_new(
large_file: bool,
uncompressed_size: u64,
compressed_size: u64,
header_start: u64,
) -> Option<Zip64ExtraFieldBlock> {
let mut size: u16 = 0;
let uncompressed_size = if uncompressed_size >= ZIP64_BYTES_THR || large_file {
size += mem::size_of::<u64>() as u16;
Some(uncompressed_size)
} else {
None
};
let compressed_size = if compressed_size >= ZIP64_BYTES_THR || large_file {
size += mem::size_of::<u64>() as u16;
Some(compressed_size)
} else {
None
};
let header_start = if header_start >= ZIP64_BYTES_THR {
size += mem::size_of::<u64>() as u16;
Some(header_start)
} else {
None
};
if size == 0 {
return None;
}

Some(Zip64ExtraFieldBlock {
magic: spec::ExtraFieldMagic::ZIP64_EXTRA_FIELD_TAG,
size,
uncompressed_size,
compressed_size,
header_start,
})
}
}

impl Zip64ExtraFieldBlock {
pub fn full_size(&self) -> usize {
assert!(self.size > 0);
Expand Down
Loading

0 comments on commit 6d8ab62

Please sign in to comment.