Skip to content

Commit

Permalink
Clippy
Browse files Browse the repository at this point in the history
  • Loading branch information
Hocuri committed Dec 27, 2024
1 parent 63a00e0 commit 70ac6a7
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 12 deletions.
22 changes: 11 additions & 11 deletions src/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ impl<'a> BlobObject<'a> {

// This will also replace an already-existing file.
// Renaming is atomic, so this will avoid race conditions.
if let Err(_) = std::fs::rename(src, &new_path) {
if std::fs::rename(src, &new_path).is_err() {
// Try a second time in case there was some temporary error.
// There is no need to try and create the blobdir since create_and_deduplicate()
// only works for files that already are in the blobdir, anyway.
Expand Down Expand Up @@ -198,11 +198,11 @@ impl<'a> BlobObject<'a> {
data: &[u8],
) -> Result<BlobObject<'a>> {
let blobdir = context.get_blobdir();
let blob = BlobObject::from_hash(blobdir, blake3::hash(&data))?;
let blob = BlobObject::from_hash(blobdir, blake3::hash(data))?;
let new_path = blob.to_abs_path();

// This call to `std::fs::write` is thread safe because all threads write the same data.
if let Err(_) = std::fs::write(&new_path, &data) {
if std::fs::write(&new_path, data).is_err() {
if new_path.exists() {
// Looks like the file is read-only and exists already
// TODO: Maybe we should check if the file contents are the same,
Expand All @@ -214,7 +214,7 @@ impl<'a> BlobObject<'a> {
} else {
// Try to create the blob directory
std::fs::create_dir_all(blobdir).log_err(context).ok();
std::fs::write(&new_path, &data).context("fs::write")?;
std::fs::write(&new_path, data).context("fs::write")?;
}
}

Expand All @@ -227,7 +227,7 @@ impl<'a> BlobObject<'a> {
let hash = hash.to_hex();
let hash = hash.as_str().get(0..31).context("Too short hash")?;
let blob = BlobObject {
blobdir: blobdir,
blobdir,
name: format!("$BLOBDIR/{hash}"),
};
Ok(blob)
Expand Down Expand Up @@ -539,7 +539,7 @@ impl<'a> BlobObject<'a> {
file.rewind()?;
ImageReader::with_format(
std::io::BufReader::new(&file),
ImageFormat::from_path(&self.to_abs_path())?,
ImageFormat::from_path(self.to_abs_path())?,
)
}
};
Expand Down Expand Up @@ -693,9 +693,9 @@ impl<'a> BlobObject<'a> {
}

fn set_readonly(new_path: &Path) -> Result<()> {
let mut perms = std::fs::metadata(&new_path)?.permissions();
let mut perms = std::fs::metadata(new_path)?.permissions();
perms.set_readonly(true);
std::fs::set_permissions(&new_path, perms)?;
std::fs::set_permissions(new_path, perms)?;
Ok(())
}

Expand Down Expand Up @@ -1147,7 +1147,7 @@ mod tests {
avatar_blob.ends_with("d98cd30ed8f2129bf3968420208849d"),
"The avatar filename should be its hash, put instead it's {avatar_blob}"
);
let scaled_avatar_size = file_size(&avatar_path).await;
let scaled_avatar_size = file_size(avatar_path).await;
assert!(scaled_avatar_size < avatar_bytes.len() as u64);

check_image_size(avatar_src, 1000, 1000);
Expand Down Expand Up @@ -1175,9 +1175,9 @@ mod tests {
// The new file should be smaller:
assert!(new_file_size < scaled_avatar_size);
// And the original file should not be touched:
assert_eq!(file_size(&avatar_path).await, scaled_avatar_size);
assert_eq!(file_size(avatar_path).await, scaled_avatar_size);
tokio::task::block_in_place(move || {
let img = ImageReader::open(&blob.to_abs_path())
let img = ImageReader::open(blob.to_abs_path())
.unwrap()
.with_guessed_format()
.unwrap()
Expand Down
2 changes: 1 addition & 1 deletion src/receive_imf/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3223,7 +3223,7 @@ async fn test_weird_and_duplicated_filenames() -> Result<()> {
"a. tar.tar.gz",
] {
let attachment = alice.blobdir.join(filename_sent);
let content = format!("File content of tar.gz archive");
let content = "File content of tar.gz archive".to_string();
tokio::fs::write(&attachment, content.as_bytes()).await?;

let mut msg_alice = Message::new(Viewtype::File);
Expand Down

0 comments on commit 70ac6a7

Please sign in to comment.