bumpalo/collections/raw_vec.rs
1// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11#![allow(unstable_name_collisions)]
12#![allow(dead_code)]
13
14use crate::Bump;
15
16use core::cmp;
17use core::mem;
18use core::ptr::{self, NonNull};
19
20use crate::alloc::{handle_alloc_error, Alloc, Layout, UnstableLayoutMethods};
21use crate::collections::CollectionAllocErr;
22use crate::collections::CollectionAllocErr::*;
23// use boxed::Box;
24
25/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
26/// a buffer of memory on the heap without having to worry about all the corner cases
27/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
28/// In particular:
29///
30/// * Produces Unique::empty() on zero-sized types
31/// * Produces Unique::empty() on zero-length allocations
32/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
33/// * Guards against 32-bit systems allocating more than isize::MAX bytes
34/// * Guards against overflowing your length
35/// * Aborts on OOM
36/// * Avoids freeing Unique::empty()
37/// * Contains a ptr::Unique and thus endows the user with all related benefits
38///
39/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
40/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
41/// to handle the actual things *stored* inside of a RawVec.
42///
43/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
44/// This enables you to use capacity growing logic catch the overflows in your length
45/// that might occur with zero-sized types.
46///
47/// However this means that you need to be careful when round-tripping this type
48/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
49/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
50/// field. This allows zero-sized types to not be special-cased by consumers of
51/// this type.
52#[allow(missing_debug_implementations)]
53pub struct RawVec<'a, T> {
54 ptr: NonNull<T>,
55 cap: usize,
56 a: &'a Bump,
57}
58
59impl<'a, T> RawVec<'a, T> {
60 /// Like `new` but parameterized over the choice of allocator for
61 /// the returned RawVec.
62 pub fn new_in(a: &'a Bump) -> Self {
63 // !0 is usize::MAX. This branch should be stripped at compile time.
64 // FIXME(mark-i-m): use this line when `if`s are allowed in `const`
65 //let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
66
67 // Unique::empty() doubles as "unallocated" and "zero-sized allocation"
68 RawVec {
69 ptr: unsafe { NonNull::new_unchecked(mem::align_of::<T>() as *mut T) },
70 // FIXME(mark-i-m): use `cap` when ifs are allowed in const
71 cap: [0, !0][(mem::size_of::<T>() == 0) as usize],
72 a,
73 }
74 }
75
76 /// Like `with_capacity` but parameterized over the choice of
77 /// allocator for the returned RawVec.
78 #[inline]
79 pub fn with_capacity_in(cap: usize, a: &'a Bump) -> Self {
80 RawVec::allocate_in(cap, false, a)
81 }
82
83 /// Like `with_capacity_zeroed` but parameterized over the choice
84 /// of allocator for the returned RawVec.
85 #[inline]
86 pub fn with_capacity_zeroed_in(cap: usize, a: &'a Bump) -> Self {
87 RawVec::allocate_in(cap, true, a)
88 }
89
90 fn allocate_in(cap: usize, zeroed: bool, mut a: &'a Bump) -> Self {
91 unsafe {
92 let elem_size = mem::size_of::<T>();
93
94 let alloc_size = cap
95 .checked_mul(elem_size)
96 .unwrap_or_else(|| capacity_overflow());
97 alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
98
99 // handles ZSTs and `cap = 0` alike
100 let ptr = if alloc_size == 0 {
101 NonNull::<T>::dangling()
102 } else {
103 let align = mem::align_of::<T>();
104 let layout = Layout::from_size_align(alloc_size, align).unwrap();
105 let result = if zeroed {
106 a.alloc_zeroed(layout)
107 } else {
108 Alloc::alloc(&mut a, layout)
109 };
110 match result {
111 Ok(ptr) => ptr.cast(),
112 Err(_) => handle_alloc_error(layout),
113 }
114 };
115
116 RawVec { ptr, cap, a }
117 }
118 }
119}
120
121impl<'a, T> RawVec<'a, T> {
122 /// Reconstitutes a RawVec from a pointer, capacity, and allocator.
123 ///
124 /// # Undefined Behavior
125 ///
126 /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
127 /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
128 /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
129 pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: &'a Bump) -> Self {
130 RawVec {
131 ptr: NonNull::new_unchecked(ptr),
132 cap,
133 a,
134 }
135 }
136}
137
138impl<'a, T> RawVec<'a, T> {
139 /// Gets a raw pointer to the start of the allocation. Note that this is
140 /// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
141 /// be careful.
142 pub fn ptr(&self) -> *mut T {
143 self.ptr.as_ptr()
144 }
145
146 /// Gets the capacity of the allocation.
147 ///
148 /// This will always be `usize::MAX` if `T` is zero-sized.
149 #[inline(always)]
150 pub fn cap(&self) -> usize {
151 if mem::size_of::<T>() == 0 {
152 !0
153 } else {
154 self.cap
155 }
156 }
157
158 /// Returns a shared reference to the allocator backing this RawVec.
159 pub fn bump(&self) -> &'a Bump {
160 self.a
161 }
162
163 fn current_layout(&self) -> Option<Layout> {
164 if self.cap == 0 {
165 None
166 } else {
167 // We have an allocated chunk of memory, so we can bypass runtime
168 // checks to get our current layout.
169 unsafe {
170 let align = mem::align_of::<T>();
171 let size = mem::size_of::<T>() * self.cap;
172 Some(Layout::from_size_align_unchecked(size, align))
173 }
174 }
175 }
176
177 /// Doubles the size of the type's backing allocation. This is common enough
178 /// to want to do that it's easiest to just have a dedicated method. Slightly
179 /// more efficient logic can be provided for this than the general case.
180 ///
181 /// This function is ideal for when pushing elements one-at-a-time because
182 /// you don't need to incur the costs of the more general computations
183 /// reserve needs to do to guard against overflow. You do however need to
184 /// manually check if your `len == cap`.
185 ///
186 /// # Panics
187 ///
188 /// * Panics if T is zero-sized on the assumption that you managed to exhaust
189 /// all `usize::MAX` slots in your imaginary buffer.
190 /// * Panics on 32-bit platforms if the requested capacity exceeds
191 /// `isize::MAX` bytes.
192 ///
193 /// # Aborts
194 ///
195 /// Aborts on OOM
196 ///
197 /// # Examples
198 ///
199 /// ```ignore
200 /// # #![feature(alloc, raw_vec_internals)]
201 /// # extern crate alloc;
202 /// # use std::ptr;
203 /// # use alloc::raw_vec::RawVec;
204 /// struct MyVec<T> {
205 /// buf: RawVec<T>,
206 /// len: usize,
207 /// }
208 ///
209 /// impl<T> MyVec<T> {
210 /// pub fn push(&mut self, elem: T) {
211 /// if self.len == self.buf.cap() { self.buf.double(); }
212 /// // double would have aborted or panicked if the len exceeded
213 /// // `isize::MAX` so this is safe to do unchecked now.
214 /// unsafe {
215 /// ptr::write(self.buf.ptr().add(self.len), elem);
216 /// }
217 /// self.len += 1;
218 /// }
219 /// }
220 /// # fn main() {
221 /// # let mut vec = MyVec { buf: RawVec::new(), len: 0 };
222 /// # vec.push(1);
223 /// # }
224 /// ```
225 #[inline(never)]
226 #[cold]
227 pub fn double(&mut self) {
228 unsafe {
229 let elem_size = mem::size_of::<T>();
230
231 // since we set the capacity to usize::MAX when elem_size is
232 // 0, getting to here necessarily means the RawVec is overfull.
233 assert!(elem_size != 0, "capacity overflow");
234
235 let (new_cap, uniq) = match self.current_layout() {
236 Some(cur) => {
237 // Since we guarantee that we never allocate more than
238 // isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as
239 // a precondition, so this can't overflow. Additionally the
240 // alignment will never be too large as to "not be
241 // satisfiable", so `Layout::from_size_align` will always
242 // return `Some`.
243 //
244 // tl;dr; we bypass runtime checks due to dynamic assertions
245 // in this module, allowing us to use
246 // `from_size_align_unchecked`.
247 let new_cap = 2 * self.cap;
248 let new_size = new_cap * elem_size;
249 alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
250 let ptr_res = self.a.realloc(self.ptr.cast(), cur, new_size);
251 match ptr_res {
252 Ok(ptr) => (new_cap, ptr.cast()),
253 Err(_) => handle_alloc_error(Layout::from_size_align_unchecked(
254 new_size,
255 cur.align(),
256 )),
257 }
258 }
259 None => {
260 // skip to 4 because tiny Vec's are dumb; but not if that
261 // would cause overflow
262 let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
263 match self.a.alloc_array::<T>(new_cap) {
264 Ok(ptr) => (new_cap, ptr),
265 Err(_) => handle_alloc_error(Layout::array::<T>(new_cap).unwrap()),
266 }
267 }
268 };
269 self.ptr = uniq;
270 self.cap = new_cap;
271 }
272 }
273
274 /// Attempts to double the size of the type's backing allocation in place. This is common
275 /// enough to want to do that it's easiest to just have a dedicated method. Slightly
276 /// more efficient logic can be provided for this than the general case.
277 ///
278 /// Returns true if the reallocation attempt has succeeded, or false otherwise.
279 ///
280 /// # Panics
281 ///
282 /// * Panics if T is zero-sized on the assumption that you managed to exhaust
283 /// all `usize::MAX` slots in your imaginary buffer.
284 /// * Panics on 32-bit platforms if the requested capacity exceeds
285 /// `isize::MAX` bytes.
286 #[inline(never)]
287 #[cold]
288 pub fn double_in_place(&mut self) -> bool {
289 unsafe {
290 let elem_size = mem::size_of::<T>();
291 let old_layout = match self.current_layout() {
292 Some(layout) => layout,
293 None => return false, // nothing to double
294 };
295
296 // since we set the capacity to usize::MAX when elem_size is
297 // 0, getting to here necessarily means the RawVec is overfull.
298 assert!(elem_size != 0, "capacity overflow");
299
300 // Since we guarantee that we never allocate more than isize::MAX
301 // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
302 // this can't overflow.
303 //
304 // Similarly like with `double` above we can go straight to
305 // `Layout::from_size_align_unchecked` as we know this won't
306 // overflow and the alignment is sufficiently small.
307 let new_cap = 2 * self.cap;
308 let new_size = new_cap * elem_size;
309 alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
310 match self.a.grow_in_place(self.ptr.cast(), old_layout, new_size) {
311 Ok(_) => {
312 // We can't directly divide `size`.
313 self.cap = new_cap;
314 true
315 }
316 Err(_) => false,
317 }
318 }
319 }
320
321 /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
322 pub fn try_reserve_exact(
323 &mut self,
324 used_cap: usize,
325 needed_extra_cap: usize,
326 ) -> Result<(), CollectionAllocErr> {
327 self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact)
328 }
329
330 /// Ensures that the buffer contains at least enough space to hold
331 /// `used_cap + needed_extra_cap` elements. If it doesn't already,
332 /// will reallocate the minimum possible amount of memory necessary.
333 /// Generally this will be exactly the amount of memory necessary,
334 /// but in principle the allocator is free to give back more than
335 /// we asked for.
336 ///
337 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
338 /// the requested space. This is not really unsafe, but the unsafe
339 /// code *you* write that relies on the behavior of this function may break.
340 ///
341 /// # Panics
342 ///
343 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
344 /// * Panics on 32-bit platforms if the requested capacity exceeds
345 /// `isize::MAX` bytes.
346 ///
347 /// # Aborts
348 ///
349 /// Aborts on OOM
350 pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
351 match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) {
352 Err(CapacityOverflow) => capacity_overflow(),
353 Err(AllocErr) => unreachable!(),
354 Ok(()) => { /* yay */ }
355 }
356 }
357
358 /// Calculates the buffer's new size given that it'll hold `used_cap +
359 /// needed_extra_cap` elements. This logic is used in amortized reserve methods.
360 /// Returns `(new_capacity, new_alloc_size)`.
361 fn amortized_new_size(
362 &self,
363 used_cap: usize,
364 needed_extra_cap: usize,
365 ) -> Result<usize, CollectionAllocErr> {
366 // Nothing we can really do about these checks :(
367 let required_cap = used_cap
368 .checked_add(needed_extra_cap)
369 .ok_or(CapacityOverflow)?;
370 // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
371 let double_cap = self.cap * 2;
372 // `double_cap` guarantees exponential growth.
373 Ok(cmp::max(double_cap, required_cap))
374 }
375
376 /// The same as `reserve`, but returns on errors instead of panicking or aborting.
377 pub fn try_reserve(
378 &mut self,
379 used_cap: usize,
380 needed_extra_cap: usize,
381 ) -> Result<(), CollectionAllocErr> {
382 self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized)
383 }
384
385 /// Ensures that the buffer contains at least enough space to hold
386 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
387 /// enough capacity, will reallocate enough space plus comfortable slack
388 /// space to get amortized `O(1)` behavior. Will limit this behavior
389 /// if it would needlessly cause itself to panic.
390 ///
391 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
392 /// the requested space. This is not really unsafe, but the unsafe
393 /// code *you* write that relies on the behavior of this function may break.
394 ///
395 /// This is ideal for implementing a bulk-push operation like `extend`.
396 ///
397 /// # Panics
398 ///
399 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
400 /// * Panics on 32-bit platforms if the requested capacity exceeds
401 /// `isize::MAX` bytes.
402 ///
403 /// # Aborts
404 ///
405 /// Aborts on OOM
406 ///
407 /// # Examples
408 ///
409 /// ```ignore
410 /// # #![feature(alloc, raw_vec_internals)]
411 /// # extern crate alloc;
412 /// # use std::ptr;
413 /// # use alloc::raw_vec::RawVec;
414 /// struct MyVec<T> {
415 /// buf: RawVec<T>,
416 /// len: usize,
417 /// }
418 ///
419 /// impl<T: Clone> MyVec<T> {
420 /// pub fn push_all(&mut self, elems: &[T]) {
421 /// self.buf.reserve(self.len, elems.len());
422 /// // reserve would have aborted or panicked if the len exceeded
423 /// // `isize::MAX` so this is safe to do unchecked now.
424 /// for x in elems {
425 /// unsafe {
426 /// ptr::write(self.buf.ptr().add(self.len), x.clone());
427 /// }
428 /// self.len += 1;
429 /// }
430 /// }
431 /// }
432 /// # fn main() {
433 /// # let mut vector = MyVec { buf: RawVec::new(), len: 0 };
434 /// # vector.push_all(&[1, 3, 5, 7, 9]);
435 /// # }
436 /// ```
437 pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
438 match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) {
439 Err(CapacityOverflow) => capacity_overflow(),
440 Err(AllocErr) => unreachable!(),
441 Ok(()) => { /* yay */ }
442 }
443 }
444 /// Attempts to ensure that the buffer contains at least enough space to hold
445 /// `used_cap + needed_extra_cap` elements. If it doesn't already have
446 /// enough capacity, will reallocate in place enough space plus comfortable slack
447 /// space to get amortized `O(1)` behavior. Will limit this behaviour
448 /// if it would needlessly cause itself to panic.
449 ///
450 /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
451 /// the requested space. This is not really unsafe, but the unsafe
452 /// code *you* write that relies on the behavior of this function may break.
453 ///
454 /// Returns true if the reallocation attempt has succeeded, or false otherwise.
455 ///
456 /// # Panics
457 ///
458 /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
459 /// * Panics on 32-bit platforms if the requested capacity exceeds
460 /// `isize::MAX` bytes.
461 pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
462 unsafe {
463 // NOTE: we don't early branch on ZSTs here because we want this
464 // to actually catch "asking for more than usize::MAX" in that case.
465 // If we make it past the first branch then we are guaranteed to
466 // panic.
467
468 // Don't actually need any more capacity. If the current `cap` is 0, we can't
469 // reallocate in place.
470 // Wrapping in case they give a bad `used_cap`
471 let old_layout = match self.current_layout() {
472 Some(layout) => layout,
473 None => return false,
474 };
475 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
476 return false;
477 }
478
479 let new_cap = self
480 .amortized_new_size(used_cap, needed_extra_cap)
481 .unwrap_or_else(|_| capacity_overflow());
482
483 // Here, `cap < used_cap + needed_extra_cap <= new_cap`
484 // (regardless of whether `self.cap - used_cap` wrapped).
485 // Therefore we can safely call grow_in_place.
486
487 let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
488 // FIXME: may crash and burn on over-reserve
489 alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
490 match self
491 .a
492 .grow_in_place(self.ptr.cast(), old_layout, new_layout.size())
493 {
494 Ok(_) => {
495 self.cap = new_cap;
496 true
497 }
498 Err(_) => false,
499 }
500 }
501 }
502
503 /// Shrinks the allocation down to the specified amount. If the given amount
504 /// is 0, actually completely deallocates.
505 ///
506 /// # Panics
507 ///
508 /// Panics if the given amount is *larger* than the current capacity.
509 ///
510 /// # Aborts
511 ///
512 /// Aborts on OOM.
513 pub fn shrink_to_fit(&mut self, amount: usize) {
514 let elem_size = mem::size_of::<T>();
515
516 // Set the `cap` because they might be about to promote to a `Box<[T]>`
517 if elem_size == 0 {
518 self.cap = amount;
519 return;
520 }
521
522 // This check is my waterloo; it's the only thing Vec wouldn't have to do.
523 assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
524
525 if amount == 0 {
526 // We want to create a new zero-length vector within the
527 // same allocator. We use ptr::write to avoid an
528 // erroneous attempt to drop the contents, and we use
529 // ptr::read to sidestep condition against destructuring
530 // types that implement Drop.
531
532 unsafe {
533 let a = self.a;
534 self.dealloc_buffer();
535 ptr::write(self, RawVec::new_in(a));
536 }
537 } else if self.cap != amount {
538 unsafe {
539 // We know here that our `amount` is greater than zero. This
540 // implies, via the assert above, that capacity is also greater
541 // than zero, which means that we've got a current layout that
542 // "fits"
543 //
544 // We also know that `self.cap` is greater than `amount`, and
545 // consequently we don't need runtime checks for creating either
546 // layout
547 let old_size = elem_size * self.cap;
548 let new_size = elem_size * amount;
549 let align = mem::align_of::<T>();
550 let old_layout = Layout::from_size_align_unchecked(old_size, align);
551 match self.a.realloc(self.ptr.cast(), old_layout, new_size) {
552 Ok(p) => self.ptr = p.cast(),
553 Err(_) => {
554 handle_alloc_error(Layout::from_size_align_unchecked(new_size, align))
555 }
556 }
557 }
558 self.cap = amount;
559 }
560 }
561}
562
563enum Fallibility {
564 Fallible,
565 Infallible,
566}
567
568use self::Fallibility::*;
569
570enum ReserveStrategy {
571 Exact,
572 Amortized,
573}
574
575use self::ReserveStrategy::*;
576
577impl<'a, T> RawVec<'a, T> {
578 fn reserve_internal(
579 &mut self,
580 used_cap: usize,
581 needed_extra_cap: usize,
582 fallibility: Fallibility,
583 strategy: ReserveStrategy,
584 ) -> Result<(), CollectionAllocErr> {
585 unsafe {
586 use crate::alloc::AllocErr;
587
588 // NOTE: we don't early branch on ZSTs here because we want this
589 // to actually catch "asking for more than usize::MAX" in that case.
590 // If we make it past the first branch then we are guaranteed to
591 // panic.
592
593 // Don't actually need any more capacity.
594 // Wrapping in case they gave a bad `used_cap`.
595 if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
596 return Ok(());
597 }
598
599 // Nothing we can really do about these checks :(
600 let new_cap = match strategy {
601 Exact => used_cap
602 .checked_add(needed_extra_cap)
603 .ok_or(CapacityOverflow)?,
604 Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?,
605 };
606 let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
607
608 alloc_guard(new_layout.size())?;
609
610 let res = match self.current_layout() {
611 Some(layout) => {
612 debug_assert!(new_layout.align() == layout.align());
613 self.a.realloc(self.ptr.cast(), layout, new_layout.size())
614 }
615 None => Alloc::alloc(&mut self.a, new_layout),
616 };
617
618 if let (Err(AllocErr), Infallible) = (&res, fallibility) {
619 handle_alloc_error(new_layout);
620 }
621
622 self.ptr = res?.cast();
623 self.cap = new_cap;
624
625 Ok(())
626 }
627 }
628}
629
630impl<'a, T> RawVec<'a, T> {
631 /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
632 pub unsafe fn dealloc_buffer(&mut self) {
633 let elem_size = mem::size_of::<T>();
634 if elem_size != 0 {
635 if let Some(layout) = self.current_layout() {
636 self.a.dealloc(self.ptr.cast(), layout);
637 }
638 }
639 }
640}
641
642impl<'a, T> Drop for RawVec<'a, T> {
643 /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
644 fn drop(&mut self) {
645 unsafe {
646 self.dealloc_buffer();
647 }
648 }
649}
650
651// We need to guarantee the following:
652// * We don't ever allocate `> isize::MAX` byte-size objects
653// * We don't overflow `usize::MAX` and actually allocate too little
654//
655// On 64-bit we just need to check for overflow since trying to allocate
656// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
657// an extra guard for this in case we're running on a platform which can use
658// all 4GB in user-space. e.g. PAE or x32
659
660#[inline]
661fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> {
662 if mem::size_of::<usize>() < 8 && alloc_size > ::core::isize::MAX as usize {
663 Err(CapacityOverflow)
664 } else {
665 Ok(())
666 }
667}
668
669// One central function responsible for reporting capacity overflows. This'll
670// ensure that the code generation related to these panics is minimal as there's
671// only one location which panics rather than a bunch throughout the module.
672fn capacity_overflow() -> ! {
673 panic!("capacity overflow")
674}
675
676#[cfg(test)]
677mod tests {
678 use super::*;
679
680 #[test]
681 fn reserve_does_not_overallocate() {
682 let bump = Bump::new();
683 {
684 let mut v: RawVec<u32> = RawVec::new_in(&bump);
685 // First `reserve` allocates like `reserve_exact`
686 v.reserve(0, 9);
687 assert_eq!(9, v.cap());
688 }
689
690 {
691 let mut v: RawVec<u32> = RawVec::new_in(&bump);
692 v.reserve(0, 7);
693 assert_eq!(7, v.cap());
694 // 97 if more than double of 7, so `reserve` should work
695 // like `reserve_exact`.
696 v.reserve(7, 90);
697 assert_eq!(97, v.cap());
698 }
699
700 {
701 let mut v: RawVec<u32> = RawVec::new_in(&bump);
702 v.reserve(0, 12);
703 assert_eq!(12, v.cap());
704 v.reserve(12, 3);
705 // 3 is less than half of 12, so `reserve` must grow
706 // exponentially. At the time of writing this test grow
707 // factor is 2, so new capacity is 24, however, grow factor
708 // of 1.5 is OK too. Hence `>= 18` in assert.
709 assert!(v.cap() >= 12 + 12 / 2);
710 }
711 }
712}