diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs index 4ae8fc649ddbf..8753c495737c3 100644 --- a/src/liballoc/alloc.rs +++ b/src/liballoc/alloc.rs @@ -115,7 +115,7 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { if !ptr.is_null() { ptr as *mut u8 } else { - oom() + oom(layout) } } } @@ -134,12 +134,13 @@ pub(crate) unsafe fn box_free(ptr: Unique) { } #[rustc_allocator_nounwind] -pub fn oom() -> ! { - extern { +pub fn oom(layout: Layout) -> ! { + #[allow(improper_ctypes)] + extern "Rust" { #[lang = "oom"] - fn oom_impl() -> !; + fn oom_impl(layout: Layout) -> !; } - unsafe { oom_impl() } + unsafe { oom_impl(layout) } } #[cfg(test)] @@ -154,7 +155,7 @@ mod tests { unsafe { let layout = Layout::from_size_align(1024, 1).unwrap(); let ptr = Global.alloc_zeroed(layout.clone()) - .unwrap_or_else(|_| oom()); + .unwrap_or_else(|_| oom(layout)); let mut i = ptr.cast::().as_ptr(); let end = i.offset(layout.size() as isize); diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index d0950bff9ce80..f75132487849f 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -553,7 +553,7 @@ impl Arc { let layout = Layout::for_value(&*fake_ptr); let mem = Global.alloc(layout) - .unwrap_or_else(|_| oom()); + .unwrap_or_else(|_| oom(layout)); // Initialize the real ArcInner let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner; diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index 5c6f6b22aae06..07bb7f1a3ebd8 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -96,14 +96,15 @@ impl RawVec { NonNull::::dangling().as_opaque() } else { let align = mem::align_of::(); + let layout = Layout::from_size_align(alloc_size, align).unwrap(); let result = if zeroed { - a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap()) + a.alloc_zeroed(layout) } else { - a.alloc(Layout::from_size_align(alloc_size, align).unwrap()) + a.alloc(layout) }; match result { Ok(ptr) => ptr, - Err(_) => oom(), + Err(_) => oom(layout), } }; @@ -318,7 +319,7 @@ impl RawVec { new_size); match ptr_res { Ok(ptr) => (new_cap, ptr.cast().into()), - Err(_) => oom(), + Err(_) => oom(Layout::from_size_align_unchecked(new_size, cur.align())), } } None => { @@ -327,7 +328,7 @@ impl RawVec { let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; match self.a.alloc_array::(new_cap) { Ok(ptr) => (new_cap, ptr.into()), - Err(_) => oom(), + Err(_) => oom(Layout::array::(new_cap).unwrap()), } } }; @@ -389,37 +390,7 @@ impl RawVec { pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) -> Result<(), CollectionAllocErr> { - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. - // Wrapping in case they gave a bad `used_cap`. - if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { - return Ok(()); - } - - // Nothing we can really do about these checks :( - let new_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?; - let new_layout = Layout::array::(new_cap).map_err(|_| CapacityOverflow)?; - - alloc_guard(new_layout.size())?; - - let res = match self.current_layout() { - Some(layout) => { - debug_assert!(new_layout.align() == layout.align()); - self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size()) - } - None => self.a.alloc(new_layout), - }; - - self.ptr = res?.cast().into(); - self.cap = new_cap; - - Ok(()) - } + self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact) } /// Ensures that the buffer contains at least enough space to hold @@ -443,9 +414,9 @@ impl RawVec { /// /// Aborts on OOM pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { - match self.try_reserve_exact(used_cap, needed_extra_cap) { + match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) { Err(CapacityOverflow) => capacity_overflow(), - Err(AllocErr) => oom(), + Err(AllocErr) => unreachable!(), Ok(()) => { /* yay */ } } } @@ -467,37 +438,7 @@ impl RawVec { /// The same as `reserve`, but returns on errors instead of panicking or aborting. pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize) -> Result<(), CollectionAllocErr> { - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. - // Wrapping in case they give a bad `used_cap` - if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { - return Ok(()); - } - - let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)?; - let new_layout = Layout::array::(new_cap).map_err(|_| CapacityOverflow)?; - - // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size())?; - - let res = match self.current_layout() { - Some(layout) => { - debug_assert!(new_layout.align() == layout.align()); - self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size()) - } - None => self.a.alloc(new_layout), - }; - - self.ptr = res?.cast().into(); - self.cap = new_cap; - - Ok(()) - } + self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized) } /// Ensures that the buffer contains at least enough space to hold @@ -553,12 +494,12 @@ impl RawVec { /// # } /// ``` pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { - match self.try_reserve(used_cap, needed_extra_cap) { + match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) { Err(CapacityOverflow) => capacity_overflow(), - Err(AllocErr) => oom(), + Err(AllocErr) => unreachable!(), Ok(()) => { /* yay */ } - } - } + } + } /// Attempts to ensure that the buffer contains at least enough space to hold /// `used_cap + needed_extra_cap` elements. If it doesn't already have /// enough capacity, will reallocate in place enough space plus comfortable slack @@ -670,7 +611,7 @@ impl RawVec { old_layout, new_size) { Ok(p) => self.ptr = p.cast().into(), - Err(_) => oom(), + Err(_) => oom(Layout::from_size_align_unchecked(new_size, align)), } } self.cap = amount; @@ -678,6 +619,73 @@ impl RawVec { } } +enum Fallibility { + Fallible, + Infallible, +} + +use self::Fallibility::*; + +enum ReserveStrategy { + Exact, + Amortized, +} + +use self::ReserveStrategy::*; + +impl RawVec { + fn reserve_internal( + &mut self, + used_cap: usize, + needed_extra_cap: usize, + fallibility: Fallibility, + strategy: ReserveStrategy, + ) -> Result<(), CollectionAllocErr> { + unsafe { + use alloc::AllocErr; + + // NOTE: we don't early branch on ZSTs here because we want this + // to actually catch "asking for more than usize::MAX" in that case. + // If we make it past the first branch then we are guaranteed to + // panic. + + // Don't actually need any more capacity. + // Wrapping in case they gave a bad `used_cap`. + if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { + return Ok(()); + } + + // Nothing we can really do about these checks :( + let new_cap = match strategy { + Exact => used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?, + Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?, + }; + let new_layout = Layout::array::(new_cap).map_err(|_| CapacityOverflow)?; + + alloc_guard(new_layout.size())?; + + let res = match self.current_layout() { + Some(layout) => { + debug_assert!(new_layout.align() == layout.align()); + self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size()) + } + None => self.a.alloc(new_layout), + }; + + match (&res, fallibility) { + (Err(AllocErr), Infallible) => oom(new_layout), + _ => {} + } + + self.ptr = res?.cast().into(); + self.cap = new_cap; + + Ok(()) + } + } + +} + impl RawVec { /// Converts the entire buffer into `Box<[T]>`. /// diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index d0188c6e82858..1648fc6b7ef4b 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -668,7 +668,7 @@ impl Rc { let layout = Layout::for_value(&*fake_ptr); let mem = Global.alloc(layout) - .unwrap_or_else(|_| oom()); + .unwrap_or_else(|_| oom(layout)); // Initialize the real RcBox let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox; diff --git a/src/libstd/alloc.rs b/src/libstd/alloc.rs index 78d3d6d5e60cc..4f9dffc7c9549 100644 --- a/src/libstd/alloc.rs +++ b/src/libstd/alloc.rs @@ -13,15 +13,59 @@ #![unstable(issue = "32838", feature = "allocator_api")] #[doc(inline)] #[allow(deprecated)] pub use alloc_crate::alloc::Heap; -#[doc(inline)] pub use alloc_crate::alloc::{Global, oom}; +#[doc(inline)] pub use alloc_crate::alloc::{Global, Layout, oom}; #[doc(inline)] pub use alloc_system::System; #[doc(inline)] pub use core::alloc::*; +use core::sync::atomic::{AtomicPtr, Ordering}; +use core::{mem, ptr}; + +static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut()); + +/// Registers a custom OOM hook, replacing any that was previously registered. +/// +/// The OOM hook is invoked when an infallible memory allocation fails. +/// The default hook prints a message to standard error and aborts the +/// execution, but this behavior can be customized with the [`set_oom_hook`] +/// and [`take_oom_hook`] functions. +/// +/// The hook is provided with a `Layout` struct which contains information +/// about the allocation that failed. +/// +/// The OOM hook is a global resource. +pub fn set_oom_hook(hook: fn(Layout) -> !) { + HOOK.store(hook as *mut (), Ordering::SeqCst); +} + +/// Unregisters the current OOM hook, returning it. +/// +/// *See also the function [`set_oom_hook`].* +/// +/// If no custom hook is registered, the default hook will be returned. +pub fn take_oom_hook() -> fn(Layout) -> ! { + let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst); + if hook.is_null() { + default_oom_hook + } else { + unsafe { mem::transmute(hook) } + } +} + +fn default_oom_hook(layout: Layout) -> ! { + rtabort!("memory allocation of {} bytes failed", layout.size()) +} + #[cfg(not(test))] #[doc(hidden)] #[lang = "oom"] -pub extern fn rust_oom() -> ! { - rtabort!("memory allocation failed"); +pub extern fn rust_oom(layout: Layout) -> ! { + let hook = HOOK.load(Ordering::SeqCst); + let hook: fn(Layout) -> ! = if hook.is_null() { + default_oom_hook + } else { + unsafe { mem::transmute(hook) } + }; + hook(layout) } #[cfg(not(test))] diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs index a7eb002d5a1d9..935ea4b62b5eb 100644 --- a/src/libstd/collections/hash/map.rs +++ b/src/libstd/collections/hash/map.rs @@ -11,7 +11,7 @@ use self::Entry::*; use self::VacantEntryState::*; -use alloc::{CollectionAllocErr, oom}; +use alloc::CollectionAllocErr; use cell::Cell; use borrow::Borrow; use cmp::max; @@ -23,8 +23,10 @@ use mem::{self, replace}; use ops::{Deref, Index}; use sys; -use super::table::{self, Bucket, EmptyBucket, FullBucket, FullBucketMut, RawTable, SafeHash}; +use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable, + SafeHash}; use super::table::BucketState::{Empty, Full}; +use super::table::Fallibility::{Fallible, Infallible}; const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two @@ -783,11 +785,11 @@ impl HashMap /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { - match self.try_reserve(additional) { + match self.reserve_internal(additional, Infallible) { Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => oom(), + Err(CollectionAllocErr::AllocErr) => unreachable!(), Ok(()) => { /* yay */ } - } + } } /// Tries to reserve capacity for at least `additional` more elements to be inserted @@ -809,17 +811,24 @@ impl HashMap /// ``` #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.reserve_internal(additional, Fallible) + } + + fn reserve_internal(&mut self, additional: usize, fallibility: Fallibility) + -> Result<(), CollectionAllocErr> { + let remaining = self.capacity() - self.len(); // this can't overflow if remaining < additional { - let min_cap = self.len().checked_add(additional) + let min_cap = self.len() + .checked_add(additional) .ok_or(CollectionAllocErr::CapacityOverflow)?; let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?; - self.try_resize(raw_cap)?; + self.try_resize(raw_cap, fallibility)?; } else if self.table.tag() && remaining <= self.len() { // Probe sequence is too long and table is half full, // resize early to reduce probing length. let new_capacity = self.table.capacity() * 2; - self.try_resize(new_capacity)?; + self.try_resize(new_capacity, fallibility)?; } Ok(()) } @@ -831,11 +840,21 @@ impl HashMap /// 2) Ensure `new_raw_cap` is a power of two or zero. #[inline(never)] #[cold] - fn try_resize(&mut self, new_raw_cap: usize) -> Result<(), CollectionAllocErr> { + fn try_resize( + &mut self, + new_raw_cap: usize, + fallibility: Fallibility, + ) -> Result<(), CollectionAllocErr> { assert!(self.table.size() <= new_raw_cap); assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0); - let mut old_table = replace(&mut self.table, RawTable::try_new(new_raw_cap)?); + let mut old_table = replace( + &mut self.table, + match fallibility { + Infallible => RawTable::new(new_raw_cap), + Fallible => RawTable::try_new(new_raw_cap)?, + } + ); let old_size = old_table.size(); if old_table.size() == 0 { diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs index b50652ed6b557..eed2debcaa282 100644 --- a/src/libstd/collections/hash/table.rs +++ b/src/libstd/collections/hash/table.rs @@ -711,11 +711,21 @@ fn test_offset_calculation() { assert_eq!(calculate_offsets(6, 12, 4), (8, 20, false)); } +pub(crate) enum Fallibility { + Fallible, + Infallible, +} + +use self::Fallibility::*; + impl RawTable { /// Does not initialize the buckets. The caller should ensure they, /// at the very least, set every hash to EMPTY_BUCKET. /// Returns an error if it cannot allocate or capacity overflows. - unsafe fn try_new_uninitialized(capacity: usize) -> Result, CollectionAllocErr> { + unsafe fn new_uninitialized_internal( + capacity: usize, + fallibility: Fallibility, + ) -> Result, CollectionAllocErr> { if capacity == 0 { return Ok(RawTable { size: 0, @@ -754,8 +764,12 @@ impl RawTable { return Err(CollectionAllocErr::CapacityOverflow); } - let buffer = Global.alloc(Layout::from_size_align(size, alignment) - .map_err(|_| CollectionAllocErr::CapacityOverflow)?)?; + let layout = Layout::from_size_align(size, alignment) + .map_err(|_| CollectionAllocErr::CapacityOverflow)?; + let buffer = Global.alloc(layout).map_err(|e| match fallibility { + Infallible => oom(layout), + Fallible => e, + })?; Ok(RawTable { capacity_mask: capacity.wrapping_sub(1), @@ -768,9 +782,9 @@ impl RawTable { /// Does not initialize the buckets. The caller should ensure they, /// at the very least, set every hash to EMPTY_BUCKET. unsafe fn new_uninitialized(capacity: usize) -> RawTable { - match Self::try_new_uninitialized(capacity) { + match Self::new_uninitialized_internal(capacity, Infallible) { Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => oom(), + Err(CollectionAllocErr::AllocErr) => unreachable!(), Ok(table) => { table } } } @@ -794,22 +808,29 @@ impl RawTable { } } - /// Tries to create a new raw table from a given capacity. If it cannot allocate, - /// it returns with AllocErr. - pub fn try_new(capacity: usize) -> Result, CollectionAllocErr> { + fn new_internal( + capacity: usize, + fallibility: Fallibility, + ) -> Result, CollectionAllocErr> { unsafe { - let ret = RawTable::try_new_uninitialized(capacity)?; + let ret = RawTable::new_uninitialized_internal(capacity, fallibility)?; ptr::write_bytes(ret.hashes.ptr(), 0, capacity); Ok(ret) } } + /// Tries to create a new raw table from a given capacity. If it cannot allocate, + /// it returns with AllocErr. + pub fn try_new(capacity: usize) -> Result, CollectionAllocErr> { + Self::new_internal(capacity, Fallible) + } + /// Creates a new raw table from a given capacity. All buckets are /// initially empty. pub fn new(capacity: usize) -> RawTable { - match Self::try_new(capacity) { + match Self::new_internal(capacity, Infallible) { Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => oom(), + Err(CollectionAllocErr::AllocErr) => unreachable!(), Ok(table) => { table } } } diff --git a/src/test/run-pass/allocator-alloc-one.rs b/src/test/run-pass/allocator-alloc-one.rs index 12b115d09380e..f1fdbfc702ddf 100644 --- a/src/test/run-pass/allocator-alloc-one.rs +++ b/src/test/run-pass/allocator-alloc-one.rs @@ -10,11 +10,11 @@ #![feature(allocator_api, nonnull)] -use std::alloc::{Alloc, Global, oom}; +use std::alloc::{Alloc, Global, Layout, oom}; fn main() { unsafe { - let ptr = Global.alloc_one::().unwrap_or_else(|_| oom()); + let ptr = Global.alloc_one::().unwrap_or_else(|_| oom(Layout::new::())); *ptr.as_ptr() = 4; assert_eq!(*ptr.as_ptr(), 4); Global.dealloc_one(ptr); diff --git a/src/test/run-pass/realloc-16687.rs b/src/test/run-pass/realloc-16687.rs index 308792e5d8924..febd249d776af 100644 --- a/src/test/run-pass/realloc-16687.rs +++ b/src/test/run-pass/realloc-16687.rs @@ -50,7 +50,7 @@ unsafe fn test_triangle() -> bool { println!("allocate({:?})", layout); } - let ret = Global.alloc(layout.clone()).unwrap_or_else(|_| oom()); + let ret = Global.alloc(layout).unwrap_or_else(|_| oom(layout)); if PRINT { println!("allocate({:?}) = {:?}", layout, ret); @@ -72,8 +72,8 @@ unsafe fn test_triangle() -> bool { println!("reallocate({:?}, old={:?}, new={:?})", ptr, old, new); } - let ret = Global.realloc(NonNull::new_unchecked(ptr).as_opaque(), old.clone(), new.size()) - .unwrap_or_else(|_| oom()); + let ret = Global.realloc(NonNull::new_unchecked(ptr).as_opaque(), old, new.size()) + .unwrap_or_else(|_| oom(Layout::from_size_align_unchecked(new.size(), old.align()))); if PRINT { println!("reallocate({:?}, old={:?}, new={:?}) = {:?}", diff --git a/src/test/run-pass/regions-mock-codegen.rs b/src/test/run-pass/regions-mock-codegen.rs index 60a7f70931d48..745a19dec4d78 100644 --- a/src/test/run-pass/regions-mock-codegen.rs +++ b/src/test/run-pass/regions-mock-codegen.rs @@ -32,8 +32,8 @@ struct Ccx { fn alloc<'a>(_bcx : &'a arena) -> &'a Bcx<'a> { unsafe { - let ptr = Global.alloc(Layout::new::()) - .unwrap_or_else(|_| oom()); + let layout = Layout::new::(); + let ptr = Global.alloc(layout).unwrap_or_else(|_| oom(layout)); &*(ptr.as_ptr() as *const _) } }