summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2018-05-30 11:35:00 +0000
committerbors <bors@rust-lang.org>2018-05-30 11:35:00 +0000
commit4f99f37b7e213d69a489884f651adfc6d217cef5 (patch)
tree8b12fd25064a7c3df77c522bdff475e83aff8e23
parentAuto merge of #51106 - davidtwco:issue-50934, r=nikomatsakis (diff)
parentAdd hooks allowing to override the `oom` behavior (diff)
downloadgrust-4f99f37b7e213d69a489884f651adfc6d217cef5.tar.gz
grust-4f99f37b7e213d69a489884f651adfc6d217cef5.tar.bz2
grust-4f99f37b7e213d69a489884f651adfc6d217cef5.tar.xz
Auto merge of #50880 - glandium:oom, r=SimonSapin
OOM handling changes As discussed in https://github.com/rust-lang/rust/issues/49668#issuecomment-384893456 and subsequent. This does have codegen implications. Even without the hooks, and with a handler that ignores the arguments, the compiler doesn't eliminate calling `rust_oom` with the `Layout`. Even if it managed to eliminate that, with the hooks, I don't know if the compiler would be able to figure out it can skip it if the hook is never set. A couple implementation notes: - I went with explicit enums rather than bools because it makes it clearer in callers what is being requested. - I didn't know what `feature` to put the hook setting functions behind. (and surprisingly, the compile went through without any annotation on the functions) - There's probably some bikeshedding to do on the naming. Cc: @Simonsapin, @sfackler
-rw-r--r--src/liballoc/alloc.rs13
-rw-r--r--src/liballoc/arc.rs2
-rw-r--r--src/liballoc/raw_vec.rs156
-rw-r--r--src/liballoc/rc.rs2
-rw-r--r--src/libstd/alloc.rs50
-rw-r--r--src/libstd/collections/hash/map.rs39
-rw-r--r--src/libstd/collections/hash/table.rs43
-rw-r--r--src/test/run-pass/allocator-alloc-one.rs4
-rw-r--r--src/test/run-pass/realloc-16687.rs6
-rw-r--r--src/test/run-pass/regions-mock-codegen.rs4
10 files changed, 206 insertions, 113 deletions
diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs
index 4ae8fc649d..8753c49573 100644
--- a/src/liballoc/alloc.rs
+++ b/src/liballoc/alloc.rs
@@ -115,7 +115,7 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
115 if !ptr.is_null() { 115 if !ptr.is_null() {
116 ptr as *mut u8 116 ptr as *mut u8
117 } else { 117 } else {
118 oom() 118 oom(layout)
119 } 119 }
120 } 120 }
121} 121}
@@ -134,12 +134,13 @@ pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) {
134} 134}
135 135
136#[rustc_allocator_nounwind] 136#[rustc_allocator_nounwind]
137pub fn oom() -> ! { 137pub fn oom(layout: Layout) -> ! {
138 extern { 138 #[allow(improper_ctypes)]
139 extern "Rust" {
139 #[lang = "oom"] 140 #[lang = "oom"]
140 fn oom_impl() -> !; 141 fn oom_impl(layout: Layout) -> !;
141 } 142 }
142 unsafe { oom_impl() } 143 unsafe { oom_impl(layout) }
143} 144}
144 145
145#[cfg(test)] 146#[cfg(test)]
@@ -154,7 +155,7 @@ mod tests {
154 unsafe { 155 unsafe {
155 let layout = Layout::from_size_align(1024, 1).unwrap(); 156 let layout = Layout::from_size_align(1024, 1).unwrap();
156 let ptr = Global.alloc_zeroed(layout.clone()) 157 let ptr = Global.alloc_zeroed(layout.clone())
157 .unwrap_or_else(|_| oom()); 158 .unwrap_or_else(|_| oom(layout));
158 159
159 let mut i = ptr.cast::<u8>().as_ptr(); 160 let mut i = ptr.cast::<u8>().as_ptr();
160 let end = i.offset(layout.size() as isize); 161 let end = i.offset(layout.size() as isize);
diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs
index d0950bff9c..f751324878 100644
--- a/src/liballoc/arc.rs
+++ b/src/liballoc/arc.rs
@@ -553,7 +553,7 @@ impl<T: ?Sized> Arc<T> {
553 let layout = Layout::for_value(&*fake_ptr); 553 let layout = Layout::for_value(&*fake_ptr);
554 554
555 let mem = Global.alloc(layout) 555 let mem = Global.alloc(layout)
556 .unwrap_or_else(|_| oom()); 556 .unwrap_or_else(|_| oom(layout));
557 557
558 // Initialize the real ArcInner 558 // Initialize the real ArcInner
559 let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>; 559 let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>;
diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs
index 5c6f6b22aa..07bb7f1a3e 100644
--- a/src/liballoc/raw_vec.rs
+++ b/src/liballoc/raw_vec.rs
@@ -96,14 +96,15 @@ impl<T, A: Alloc> RawVec<T, A> {
96 NonNull::<T>::dangling().as_opaque() 96 NonNull::<T>::dangling().as_opaque()
97 } else { 97 } else {
98 let align = mem::align_of::<T>(); 98 let align = mem::align_of::<T>();
99 let layout = Layout::from_size_align(alloc_size, align).unwrap();
99 let result = if zeroed { 100 let result = if zeroed {
100 a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap()) 101 a.alloc_zeroed(layout)
101 } else { 102 } else {
102 a.alloc(Layout::from_size_align(alloc_size, align).unwrap()) 103 a.alloc(layout)
103 }; 104 };
104 match result { 105 match result {
105 Ok(ptr) => ptr, 106 Ok(ptr) => ptr,
106 Err(_) => oom(), 107 Err(_) => oom(layout),
107 } 108 }
108 }; 109 };
109 110
@@ -318,7 +319,7 @@ impl<T, A: Alloc> RawVec<T, A> {
318 new_size); 319 new_size);
319 match ptr_res { 320 match ptr_res {
320 Ok(ptr) => (new_cap, ptr.cast().into()), 321 Ok(ptr) => (new_cap, ptr.cast().into()),
321 Err(_) => oom(), 322 Err(_) => oom(Layout::from_size_align_unchecked(new_size, cur.align())),
322 } 323 }
323 } 324 }
324 None => { 325 None => {
@@ -327,7 +328,7 @@ impl<T, A: Alloc> RawVec<T, A> {
327 let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; 328 let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
328 match self.a.alloc_array::<T>(new_cap) { 329 match self.a.alloc_array::<T>(new_cap) {
329 Ok(ptr) => (new_cap, ptr.into()), 330 Ok(ptr) => (new_cap, ptr.into()),
330 Err(_) => oom(), 331 Err(_) => oom(Layout::array::<T>(new_cap).unwrap()),
331 } 332 }
332 } 333 }
333 }; 334 };
@@ -389,37 +390,7 @@ impl<T, A: Alloc> RawVec<T, A> {
389 pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) 390 pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize)
390 -> Result<(), CollectionAllocErr> { 391 -> Result<(), CollectionAllocErr> {
391 392
392 unsafe { 393 self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact)
393 // NOTE: we don't early branch on ZSTs here because we want this
394 // to actually catch "asking for more than usize::MAX" in that case.
395 // If we make it past the first branch then we are guaranteed to
396 // panic.
397
398 // Don't actually need any more capacity.
399 // Wrapping in case they gave a bad `used_cap`.
400 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
401 return Ok(());
402 }
403
404 // Nothing we can really do about these checks :(
405 let new_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?;
406 let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
407
408 alloc_guard(new_layout.size())?;
409
410 let res = match self.current_layout() {
411 Some(layout) => {
412 debug_assert!(new_layout.align() == layout.align());
413 self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
414 }
415 None => self.a.alloc(new_layout),
416 };
417
418 self.ptr = res?.cast().into();
419 self.cap = new_cap;
420
421 Ok(())
422 }
423 } 394 }
424 395
425 /// Ensures that the buffer contains at least enough space to hold 396 /// Ensures that the buffer contains at least enough space to hold
@@ -443,9 +414,9 @@ impl<T, A: Alloc> RawVec<T, A> {
443 /// 414 ///
444 /// Aborts on OOM 415 /// Aborts on OOM
445 pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { 416 pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
446 match self.try_reserve_exact(used_cap, needed_extra_cap) { 417 match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) {
447 Err(CapacityOverflow) => capacity_overflow(), 418 Err(CapacityOverflow) => capacity_overflow(),
448 Err(AllocErr) => oom(), 419 Err(AllocErr) => unreachable!(),
449 Ok(()) => { /* yay */ } 420 Ok(()) => { /* yay */ }
450 } 421 }
451 } 422 }
@@ -467,37 +438,7 @@ impl<T, A: Alloc> RawVec<T, A> {
467 /// The same as `reserve`, but returns on errors instead of panicking or aborting. 438 /// The same as `reserve`, but returns on errors instead of panicking or aborting.
468 pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize) 439 pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize)
469 -> Result<(), CollectionAllocErr> { 440 -> Result<(), CollectionAllocErr> {
470 unsafe { 441 self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized)
471 // NOTE: we don't early branch on ZSTs here because we want this
472 // to actually catch "asking for more than usize::MAX" in that case.
473 // If we make it past the first branch then we are guaranteed to
474 // panic.
475
476 // Don't actually need any more capacity.
477 // Wrapping in case they give a bad `used_cap`
478 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
479 return Ok(());
480 }
481
482 let new_cap = self.amortized_new_size(used_cap, needed_extra_cap)?;
483 let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
484
485 // FIXME: may crash and burn on over-reserve
486 alloc_guard(new_layout.size())?;
487
488 let res = match self.current_layout() {
489 Some(layout) => {
490 debug_assert!(new_layout.align() == layout.align());
491 self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
492 }
493 None => self.a.alloc(new_layout),
494 };
495
496 self.ptr = res?.cast().into();
497 self.cap = new_cap;
498
499 Ok(())
500 }
501 } 442 }
502 443
503 /// Ensures that the buffer contains at least enough space to hold 444 /// Ensures that the buffer contains at least enough space to hold
@@ -553,12 +494,12 @@ impl<T, A: Alloc> RawVec<T, A> {
553 /// # } 494 /// # }
554 /// ``` 495 /// ```
555 pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { 496 pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
556 match self.try_reserve(used_cap, needed_extra_cap) { 497 match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) {
557 Err(CapacityOverflow) => capacity_overflow(), 498 Err(CapacityOverflow) => capacity_overflow(),
558 Err(AllocErr) => oom(), 499 Err(AllocErr) => unreachable!(),
559 Ok(()) => { /* yay */ } 500 Ok(()) => { /* yay */ }
560 } 501 }
561 } 502 }
562 /// Attempts to ensure that the buffer contains at least enough space to hold 503 /// Attempts to ensure that the buffer contains at least enough space to hold
563 /// `used_cap + needed_extra_cap` elements. If it doesn't already have 504 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
564 /// enough capacity, will reallocate in place enough space plus comfortable slack 505 /// enough capacity, will reallocate in place enough space plus comfortable slack
@@ -670,7 +611,7 @@ impl<T, A: Alloc> RawVec<T, A> {
670 old_layout, 611 old_layout,
671 new_size) { 612 new_size) {
672 Ok(p) => self.ptr = p.cast().into(), 613 Ok(p) => self.ptr = p.cast().into(),
673 Err(_) => oom(), 614 Err(_) => oom(Layout::from_size_align_unchecked(new_size, align)),
674 } 615 }
675 } 616 }
676 self.cap = amount; 617 self.cap = amount;
@@ -678,6 +619,73 @@ impl<T, A: Alloc> RawVec<T, A> {
678 } 619 }
679} 620}
680 621
622enum Fallibility {
623 Fallible,
624 Infallible,
625}
626
627use self::Fallibility::*;
628
629enum ReserveStrategy {
630 Exact,
631 Amortized,
632}
633
634use self::ReserveStrategy::*;
635
636impl<T, A: Alloc> RawVec<T, A> {
637 fn reserve_internal(
638 &mut self,
639 used_cap: usize,
640 needed_extra_cap: usize,
641 fallibility: Fallibility,
642 strategy: ReserveStrategy,
643 ) -> Result<(), CollectionAllocErr> {
644 unsafe {
645 use alloc::AllocErr;
646
647 // NOTE: we don't early branch on ZSTs here because we want this
648 // to actually catch "asking for more than usize::MAX" in that case.
649 // If we make it past the first branch then we are guaranteed to
650 // panic.
651
652 // Don't actually need any more capacity.
653 // Wrapping in case they gave a bad `used_cap`.
654 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
655 return Ok(());
656 }
657
658 // Nothing we can really do about these checks :(
659 let new_cap = match strategy {
660 Exact => used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?,
661 Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?,
662 };
663 let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
664
665 alloc_guard(new_layout.size())?;
666
667 let res = match self.current_layout() {
668 Some(layout) => {
669 debug_assert!(new_layout.align() == layout.align());
670 self.a.realloc(NonNull::from(self.ptr).as_opaque(), layout, new_layout.size())
671 }
672 None => self.a.alloc(new_layout),
673 };
674
675 match (&res, fallibility) {
676 (Err(AllocErr), Infallible) => oom(new_layout),
677 _ => {}
678 }
679
680 self.ptr = res?.cast().into();
681 self.cap = new_cap;
682
683 Ok(())
684 }
685 }
686
687}
688
681impl<T> RawVec<T, Global> { 689impl<T> RawVec<T, Global> {
682 /// Converts the entire buffer into `Box<[T]>`. 690 /// Converts the entire buffer into `Box<[T]>`.
683 /// 691 ///
diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs
index d0188c6e82..1648fc6b7e 100644
--- a/src/liballoc/rc.rs
+++ b/src/liballoc/rc.rs
@@ -668,7 +668,7 @@ impl<T: ?Sized> Rc<T> {
668 let layout = Layout::for_value(&*fake_ptr); 668 let layout = Layout::for_value(&*fake_ptr);
669 669
670 let mem = Global.alloc(layout) 670 let mem = Global.alloc(layout)
671 .unwrap_or_else(|_| oom()); 671 .unwrap_or_else(|_| oom(layout));
672 672
673 // Initialize the real RcBox 673 // Initialize the real RcBox
674 let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox<T>; 674 let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox<T>;
diff --git a/src/libstd/alloc.rs b/src/libstd/alloc.rs
index 78d3d6d5e6..4f9dffc7c9 100644
--- a/src/libstd/alloc.rs
+++ b/src/libstd/alloc.rs
@@ -13,15 +13,59 @@
13#![unstable(issue = "32838", feature = "allocator_api")] 13#![unstable(issue = "32838", feature = "allocator_api")]
14 14
15#[doc(inline)] #[allow(deprecated)] pub use alloc_crate::alloc::Heap; 15#[doc(inline)] #[allow(deprecated)] pub use alloc_crate::alloc::Heap;
16#[doc(inline)] pub use alloc_crate::alloc::{Global, oom}; 16#[doc(inline)] pub use alloc_crate::alloc::{Global, Layout, oom};
17#[doc(inline)] pub use alloc_system::System; 17#[doc(inline)] pub use alloc_system::System;
18#[doc(inline)] pub use core::alloc::*; 18#[doc(inline)] pub use core::alloc::*;
19 19
20use core::sync::atomic::{AtomicPtr, Ordering};
21use core::{mem, ptr};
22
23static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
24
25/// Registers a custom OOM hook, replacing any that was previously registered.
26///
27/// The OOM hook is invoked when an infallible memory allocation fails.
28/// The default hook prints a message to standard error and aborts the
29/// execution, but this behavior can be customized with the [`set_oom_hook`]
30/// and [`take_oom_hook`] functions.
31///
32/// The hook is provided with a `Layout` struct which contains information
33/// about the allocation that failed.
34///
35/// The OOM hook is a global resource.
36pub fn set_oom_hook(hook: fn(Layout) -> !) {
37 HOOK.store(hook as *mut (), Ordering::SeqCst);
38}
39
40/// Unregisters the current OOM hook, returning it.
41///
42/// *See also the function [`set_oom_hook`].*
43///
44/// If no custom hook is registered, the default hook will be returned.
45pub fn take_oom_hook() -> fn(Layout) -> ! {
46 let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst);
47 if hook.is_null() {
48 default_oom_hook
49 } else {
50 unsafe { mem::transmute(hook) }
51 }
52}
53
54fn default_oom_hook(layout: Layout) -> ! {
55 rtabort!("memory allocation of {} bytes failed", layout.size())
56}
57
20#[cfg(not(test))] 58#[cfg(not(test))]
21#[doc(hidden)] 59#[doc(hidden)]
22#[lang = "oom"] 60#[lang = "oom"]
23pub extern fn rust_oom() -> ! { 61pub extern fn rust_oom(layout: Layout) -> ! {
24 rtabort!("memory allocation failed"); 62 let hook = HOOK.load(Ordering::SeqCst);
63 let hook: fn(Layout) -> ! = if hook.is_null() {
64 default_oom_hook
65 } else {
66 unsafe { mem::transmute(hook) }
67 };
68 hook(layout)
25} 69}
26 70
27#[cfg(not(test))] 71#[cfg(not(test))]
diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs
index a7eb002d5a..935ea4b62b 100644
--- a/src/libstd/collections/hash/map.rs
+++ b/src/libstd/collections/hash/map.rs
@@ -11,7 +11,7 @@
11use self::Entry::*; 11use self::Entry::*;
12use self::VacantEntryState::*; 12use self::VacantEntryState::*;
13 13
14use alloc::{CollectionAllocErr, oom}; 14use alloc::CollectionAllocErr;
15use cell::Cell; 15use cell::Cell;
16use borrow::Borrow; 16use borrow::Borrow;
17use cmp::max; 17use cmp::max;
@@ -23,8 +23,10 @@ use mem::{self, replace};
23use ops::{Deref, Index}; 23use ops::{Deref, Index};
24use sys; 24use sys;
25 25
26use super::table::{self, Bucket, EmptyBucket, FullBucket, FullBucketMut, RawTable, SafeHash}; 26use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable,
27 SafeHash};
27use super::table::BucketState::{Empty, Full}; 28use super::table::BucketState::{Empty, Full};
29use super::table::Fallibility::{Fallible, Infallible};
28 30
29const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two 31const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two
30 32
@@ -783,11 +785,11 @@ impl<K, V, S> HashMap<K, V, S>
783 /// ``` 785 /// ```
784 #[stable(feature = "rust1", since = "1.0.0")] 786 #[stable(feature = "rust1", since = "1.0.0")]
785 pub fn reserve(&mut self, additional: usize) { 787 pub fn reserve(&mut self, additional: usize) {
786 match self.try_reserve(additional) { 788 match self.reserve_internal(additional, Infallible) {
787 Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), 789 Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
788 Err(CollectionAllocErr::AllocErr) => oom(), 790 Err(CollectionAllocErr::AllocErr) => unreachable!(),
789 Ok(()) => { /* yay */ } 791 Ok(()) => { /* yay */ }
790 } 792 }
791 } 793 }
792 794
793 /// Tries to reserve capacity for at least `additional` more elements to be inserted 795 /// Tries to reserve capacity for at least `additional` more elements to be inserted
@@ -809,17 +811,24 @@ impl<K, V, S> HashMap<K, V, S>
809 /// ``` 811 /// ```
810 #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] 812 #[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
811 pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { 813 pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
814 self.reserve_internal(additional, Fallible)
815 }
816
817 fn reserve_internal(&mut self, additional: usize, fallibility: Fallibility)
818 -> Result<(), CollectionAllocErr> {
819
812 let remaining = self.capacity() - self.len(); // this can't overflow 820 let remaining = self.capacity() - self.len(); // this can't overflow
813 if remaining < additional { 821 if remaining < additional {
814 let min_cap = self.len().checked_add(additional) 822 let min_cap = self.len()
823 .checked_add(additional)
815 .ok_or(CollectionAllocErr::CapacityOverflow)?; 824 .ok_or(CollectionAllocErr::CapacityOverflow)?;
816 let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?; 825 let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?;
817 self.try_resize(raw_cap)?; 826 self.try_resize(raw_cap, fallibility)?;
818 } else if self.table.tag() && remaining <= self.len() { 827 } else if self.table.tag() && remaining <= self.len() {
819 // Probe sequence is too long and table is half full, 828 // Probe sequence is too long and table is half full,
820 // resize early to reduce probing length. 829 // resize early to reduce probing length.
821 let new_capacity = self.table.capacity() * 2; 830 let new_capacity = self.table.capacity() * 2;
822 self.try_resize(new_capacity)?; 831 self.try_resize(new_capacity, fallibility)?;
823 } 832 }
824 Ok(()) 833 Ok(())
825 } 834 }
@@ -831,11 +840,21 @@ impl<K, V, S> HashMap<K, V, S>
831 /// 2) Ensure `new_raw_cap` is a power of two or zero. 840 /// 2) Ensure `new_raw_cap` is a power of two or zero.
832 #[inline(never)] 841 #[inline(never)]
833 #[cold] 842 #[cold]
834 fn try_resize(&mut self, new_raw_cap: usize) -> Result<(), CollectionAllocErr> { 843 fn try_resize(
844 &mut self,
845 new_raw_cap: usize,
846 fallibility: Fallibility,
847 ) -> Result<(), CollectionAllocErr> {
835 assert!(self.table.size() <= new_raw_cap); 848 assert!(self.table.size() <= new_raw_cap);
836 assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0); 849 assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0);
837 850
838 let mut old_table = replace(&mut self.table, RawTable::try_new(new_raw_cap)?); 851 let mut old_table = replace(
852 &mut self.table,
853 match fallibility {
854 Infallible => RawTable::new(new_raw_cap),
855 Fallible => RawTable::try_new(new_raw_cap)?,
856 }
857 );
839 let old_size = old_table.size(); 858 let old_size = old_table.size();
840 859
841 if old_table.size() == 0 { 860 if old_table.size() == 0 {
diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs
index b50652ed6b..eed2debcaa 100644
--- a/src/libstd/collections/hash/table.rs
+++ b/src/libstd/collections/hash/table.rs
@@ -711,11 +711,21 @@ fn test_offset_calculation() {
711 assert_eq!(calculate_offsets(6, 12, 4), (8, 20, false)); 711 assert_eq!(calculate_offsets(6, 12, 4), (8, 20, false));
712} 712}
713 713
714pub(crate) enum Fallibility {
715 Fallible,
716 Infallible,
717}
718
719use self::Fallibility::*;
720
714impl<K, V> RawTable<K, V> { 721impl<K, V> RawTable<K, V> {
715 /// Does not initialize the buckets. The caller should ensure they, 722 /// Does not initialize the buckets. The caller should ensure they,
716 /// at the very least, set every hash to EMPTY_BUCKET. 723 /// at the very least, set every hash to EMPTY_BUCKET.
717 /// Returns an error if it cannot allocate or capacity overflows. 724 /// Returns an error if it cannot allocate or capacity overflows.
718 unsafe fn try_new_uninitialized(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> { 725 unsafe fn new_uninitialized_internal(
726 capacity: usize,
727 fallibility: Fallibility,
728 ) -> Result<RawTable<K, V>, CollectionAllocErr> {
719 if capacity == 0 { 729 if capacity == 0 {
720 return Ok(RawTable { 730 return Ok(RawTable {
721 size: 0, 731 size: 0,
@@ -754,8 +764,12 @@ impl<K, V> RawTable<K, V> {
754 return Err(CollectionAllocErr::CapacityOverflow); 764 return Err(CollectionAllocErr::CapacityOverflow);
755 } 765 }
756 766
757 let buffer = Global.alloc(Layout::from_size_align(size, alignment) 767 let layout = Layout::from_size_align(size, alignment)
758 .map_err(|_| CollectionAllocErr::CapacityOverflow)?)?; 768 .map_err(|_| CollectionAllocErr::CapacityOverflow)?;
769 let buffer = Global.alloc(layout).map_err(|e| match fallibility {
770 Infallible => oom(layout),
771 Fallible => e,
772 })?;
759 773
760 Ok(RawTable { 774 Ok(RawTable {
761 capacity_mask: capacity.wrapping_sub(1), 775 capacity_mask: capacity.wrapping_sub(1),
@@ -768,9 +782,9 @@ impl<K, V> RawTable<K, V> {
768 /// Does not initialize the buckets. The caller should ensure they, 782 /// Does not initialize the buckets. The caller should ensure they,
769 /// at the very least, set every hash to EMPTY_BUCKET. 783 /// at the very least, set every hash to EMPTY_BUCKET.
770 unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> { 784 unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
771 match Self::try_new_uninitialized(capacity) { 785 match Self::new_uninitialized_internal(capacity, Infallible) {
772 Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), 786 Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
773 Err(CollectionAllocErr::AllocErr) => oom(), 787 Err(CollectionAllocErr::AllocErr) => unreachable!(),
774 Ok(table) => { table } 788 Ok(table) => { table }
775 } 789 }
776 } 790 }
@@ -794,22 +808,29 @@ impl<K, V> RawTable<K, V> {
794 } 808 }
795 } 809 }
796 810
797 /// Tries to create a new raw table from a given capacity. If it cannot allocate, 811 fn new_internal(
798 /// it returns with AllocErr. 812 capacity: usize,
799 pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> { 813 fallibility: Fallibility,
814 ) -> Result<RawTable<K, V>, CollectionAllocErr> {
800 unsafe { 815 unsafe {
801 let ret = RawTable::try_new_uninitialized(capacity)?; 816 let ret = RawTable::new_uninitialized_internal(capacity, fallibility)?;
802 ptr::write_bytes(ret.hashes.ptr(), 0, capacity); 817 ptr::write_bytes(ret.hashes.ptr(), 0, capacity);
803 Ok(ret) 818 Ok(ret)
804 } 819 }
805 } 820 }
806 821
822 /// Tries to create a new raw table from a given capacity. If it cannot allocate,
823 /// it returns with AllocErr.
824 pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
825 Self::new_internal(capacity, Fallible)
826 }
827
807 /// Creates a new raw table from a given capacity. All buckets are 828 /// Creates a new raw table from a given capacity. All buckets are
808 /// initially empty. 829 /// initially empty.
809 pub fn new(capacity: usize) -> RawTable<K, V> { 830 pub fn new(capacity: usize) -> RawTable<K, V> {
810 match Self::try_new(capacity) { 831 match Self::new_internal(capacity, Infallible) {
811 Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), 832 Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
812 Err(CollectionAllocErr::AllocErr) => oom(), 833 Err(CollectionAllocErr::AllocErr) => unreachable!(),
813 Ok(table) => { table } 834 Ok(table) => { table }
814 } 835 }
815 } 836 }
diff --git a/src/test/run-pass/allocator-alloc-one.rs b/src/test/run-pass/allocator-alloc-one.rs
index 12b115d093..f1fdbfc702 100644
--- a/src/test/run-pass/allocator-alloc-one.rs
+++ b/src/test/run-pass/allocator-alloc-one.rs
@@ -10,11 +10,11 @@
10 10
11#![feature(allocator_api, nonnull)] 11#![feature(allocator_api, nonnull)]
12 12
13use std::alloc::{Alloc, Global, oom}; 13use std::alloc::{Alloc, Global, Layout, oom};
14 14
15fn main() { 15fn main() {
16 unsafe { 16 unsafe {
17 let ptr = Global.alloc_one::<i32>().unwrap_or_else(|_| oom()); 17 let ptr = Global.alloc_one::<i32>().unwrap_or_else(|_| oom(Layout::new::<i32>()));
18 *ptr.as_ptr() = 4; 18 *ptr.as_ptr() = 4;
19 assert_eq!(*ptr.as_ptr(), 4); 19 assert_eq!(*ptr.as_ptr(), 4);
20 Global.dealloc_one(ptr); 20 Global.dealloc_one(ptr);
diff --git a/src/test/run-pass/realloc-16687.rs b/src/test/run-pass/realloc-16687.rs
index 308792e5d8..febd249d77 100644
--- a/src/test/run-pass/realloc-16687.rs
+++ b/src/test/run-pass/realloc-16687.rs
@@ -50,7 +50,7 @@ unsafe fn test_triangle() -> bool {
50 println!("allocate({:?})", layout); 50 println!("allocate({:?})", layout);
51 } 51 }
52 52
53 let ret = Global.alloc(layout.clone()).unwrap_or_else(|_| oom()); 53 let ret = Global.alloc(layout).unwrap_or_else(|_| oom(layout));
54 54
55 if PRINT { 55 if PRINT {
56 println!("allocate({:?}) = {:?}", layout, ret); 56 println!("allocate({:?}) = {:?}", layout, ret);
@@ -72,8 +72,8 @@ unsafe fn test_triangle() -> bool {
72 println!("reallocate({:?}, old={:?}, new={:?})", ptr, old, new); 72 println!("reallocate({:?}, old={:?}, new={:?})", ptr, old, new);
73 } 73 }
74 74
75 let ret = Global.realloc(NonNull::new_unchecked(ptr).as_opaque(), old.clone(), new.size()) 75 let ret = Global.realloc(NonNull::new_unchecked(ptr).as_opaque(), old, new.size())
76 .unwrap_or_else(|_| oom()); 76 .unwrap_or_else(|_| oom(Layout::from_size_align_unchecked(new.size(), old.align())));
77 77
78 if PRINT { 78 if PRINT {
79 println!("reallocate({:?}, old={:?}, new={:?}) = {:?}", 79 println!("reallocate({:?}, old={:?}, new={:?}) = {:?}",
diff --git a/src/test/run-pass/regions-mock-codegen.rs b/src/test/run-pass/regions-mock-codegen.rs
index 60a7f70931..745a19dec4 100644
--- a/src/test/run-pass/regions-mock-codegen.rs
+++ b/src/test/run-pass/regions-mock-codegen.rs
@@ -32,8 +32,8 @@ struct Ccx {
32 32
33fn alloc<'a>(_bcx : &'a arena) -> &'a Bcx<'a> { 33fn alloc<'a>(_bcx : &'a arena) -> &'a Bcx<'a> {
34 unsafe { 34 unsafe {
35 let ptr = Global.alloc(Layout::new::<Bcx>()) 35 let layout = Layout::new::<Bcx>();
36 .unwrap_or_else(|_| oom()); 36 let ptr = Global.alloc(layout).unwrap_or_else(|_| oom(layout));
37 &*(ptr.as_ptr() as *const _) 37 &*(ptr.as_ptr() as *const _)
38 } 38 }
39} 39}