bumpalo/collections/
raw_vec.rs

1// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11#![allow(unstable_name_collisions)]
12#![allow(dead_code)]
13
14use crate::Bump;
15
16use core::cmp;
17use core::mem;
18use core::ptr::{self, NonNull};
19
20use crate::alloc::{handle_alloc_error, Alloc, Layout, UnstableLayoutMethods};
21use crate::collections::CollectionAllocErr;
22use crate::collections::CollectionAllocErr::*;
23// use boxed::Box;
24
25/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
26/// a buffer of memory on the heap without having to worry about all the corner cases
27/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
28/// In particular:
29///
30/// * Produces Unique::empty() on zero-sized types
31/// * Produces Unique::empty() on zero-length allocations
32/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
33/// * Guards against 32-bit systems allocating more than isize::MAX bytes
34/// * Guards against overflowing your length
35/// * Aborts on OOM
36/// * Avoids freeing Unique::empty()
37/// * Contains a ptr::Unique and thus endows the user with all related benefits
38///
39/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
40/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
41/// to handle the actual things *stored* inside of a RawVec.
42///
43/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
44/// This enables you to use capacity growing logic catch the overflows in your length
45/// that might occur with zero-sized types.
46///
47/// However this means that you need to be careful when round-tripping this type
48/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
49/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
50/// field. This allows zero-sized types to not be special-cased by consumers of
51/// this type.
52#[allow(missing_debug_implementations)]
53pub struct RawVec<'a, T> {
54    ptr: NonNull<T>,
55    cap: usize,
56    a: &'a Bump,
57}
58
59impl<'a, T> RawVec<'a, T> {
60    /// Like `new` but parameterized over the choice of allocator for
61    /// the returned RawVec.
62    pub fn new_in(a: &'a Bump) -> Self {
63        // `cap: 0` means "unallocated". zero-sized types are ignored.
64        RawVec {
65            ptr: NonNull::dangling(),
66            cap: 0,
67            a,
68        }
69    }
70
71    /// Like `with_capacity` but parameterized over the choice of
72    /// allocator for the returned RawVec.
73    #[inline]
74    pub fn with_capacity_in(cap: usize, a: &'a Bump) -> Self {
75        RawVec::allocate_in(cap, false, a)
76    }
77
78    /// Like `with_capacity_zeroed` but parameterized over the choice
79    /// of allocator for the returned RawVec.
80    #[inline]
81    pub fn with_capacity_zeroed_in(cap: usize, a: &'a Bump) -> Self {
82        RawVec::allocate_in(cap, true, a)
83    }
84
85    fn allocate_in(cap: usize, zeroed: bool, mut a: &'a Bump) -> Self {
86        unsafe {
87            let elem_size = mem::size_of::<T>();
88
89            let alloc_size = cap
90                .checked_mul(elem_size)
91                .unwrap_or_else(|| capacity_overflow());
92            alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
93
94            // handles ZSTs and `cap = 0` alike
95            let ptr = if alloc_size == 0 {
96                NonNull::<T>::dangling()
97            } else {
98                let align = mem::align_of::<T>();
99                let layout = Layout::from_size_align(alloc_size, align).unwrap();
100                let result = if zeroed {
101                    a.alloc_zeroed(layout)
102                } else {
103                    Alloc::alloc(&mut a, layout)
104                };
105                match result {
106                    Ok(ptr) => ptr.cast(),
107                    Err(_) => handle_alloc_error(layout),
108                }
109            };
110
111            RawVec { ptr, cap, a }
112        }
113    }
114}
115
116impl<'a, T> RawVec<'a, T> {
117    /// Reconstitutes a RawVec from a pointer, capacity, and allocator.
118    ///
119    /// # Undefined Behavior
120    ///
121    /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
122    /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
123    /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
124    pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: &'a Bump) -> Self {
125        RawVec {
126            ptr: NonNull::new_unchecked(ptr),
127            cap,
128            a,
129        }
130    }
131}
132
133impl<'a, T> RawVec<'a, T> {
134    /// Gets a raw pointer to the start of the allocation. Note that this is
135    /// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
136    /// be careful.
137    pub fn ptr(&self) -> *mut T {
138        self.ptr.as_ptr()
139    }
140
141    /// Gets the capacity of the allocation.
142    ///
143    /// This will always be `usize::MAX` if `T` is zero-sized.
144    #[inline(always)]
145    pub fn cap(&self) -> usize {
146        if mem::size_of::<T>() == 0 {
147            !0
148        } else {
149            self.cap
150        }
151    }
152
153    /// Returns a shared reference to the allocator backing this RawVec.
154    pub fn bump(&self) -> &'a Bump {
155        self.a
156    }
157
158    fn current_layout(&self) -> Option<Layout> {
159        if self.cap == 0 {
160            None
161        } else {
162            // We have an allocated chunk of memory, so we can bypass runtime
163            // checks to get our current layout.
164            unsafe {
165                let align = mem::align_of::<T>();
166                let size = mem::size_of::<T>() * self.cap;
167                Some(Layout::from_size_align_unchecked(size, align))
168            }
169        }
170    }
171
172    /// Doubles the size of the type's backing allocation. This is common enough
173    /// to want to do that it's easiest to just have a dedicated method. Slightly
174    /// more efficient logic can be provided for this than the general case.
175    ///
176    /// This function is ideal for when pushing elements one-at-a-time because
177    /// you don't need to incur the costs of the more general computations
178    /// reserve needs to do to guard against overflow. You do however need to
179    /// manually check if your `len == cap`.
180    ///
181    /// # Panics
182    ///
183    /// * Panics if T is zero-sized on the assumption that you managed to exhaust
184    ///   all `usize::MAX` slots in your imaginary buffer.
185    /// * Panics on 32-bit platforms if the requested capacity exceeds
186    ///   `isize::MAX` bytes.
187    ///
188    /// # Aborts
189    ///
190    /// Aborts on OOM
191    ///
192    /// # Examples
193    ///
194    /// ```ignore
195    /// # #![feature(alloc, raw_vec_internals)]
196    /// # extern crate alloc;
197    /// # use std::ptr;
198    /// # use alloc::raw_vec::RawVec;
199    /// struct MyVec<T> {
200    ///     buf: RawVec<T>,
201    ///     len: usize,
202    /// }
203    ///
204    /// impl<T> MyVec<T> {
205    ///     pub fn push(&mut self, elem: T) {
206    ///         if self.len == self.buf.cap() { self.buf.double(); }
207    ///         // double would have aborted or panicked if the len exceeded
208    ///         // `isize::MAX` so this is safe to do unchecked now.
209    ///         unsafe {
210    ///             ptr::write(self.buf.ptr().add(self.len), elem);
211    ///         }
212    ///         self.len += 1;
213    ///     }
214    /// }
215    /// # fn main() {
216    /// #   let mut vec = MyVec { buf: RawVec::new(), len: 0 };
217    /// #   vec.push(1);
218    /// # }
219    /// ```
220    #[inline(never)]
221    #[cold]
222    pub fn double(&mut self) {
223        unsafe {
224            let elem_size = mem::size_of::<T>();
225
226            // since we set the capacity to usize::MAX when elem_size is
227            // 0, getting to here necessarily means the RawVec is overfull.
228            assert!(elem_size != 0, "capacity overflow");
229
230            let (new_cap, uniq) = match self.current_layout() {
231                Some(cur) => {
232                    // Since we guarantee that we never allocate more than
233                    // isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as
234                    // a precondition, so this can't overflow. Additionally the
235                    // alignment will never be too large as to "not be
236                    // satisfiable", so `Layout::from_size_align` will always
237                    // return `Some`.
238                    //
239                    // tl;dr; we bypass runtime checks due to dynamic assertions
240                    // in this module, allowing us to use
241                    // `from_size_align_unchecked`.
242                    let new_cap = 2 * self.cap;
243                    let new_size = new_cap * elem_size;
244                    alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
245                    let ptr_res = self.a.realloc(self.ptr.cast(), cur, new_size);
246                    match ptr_res {
247                        Ok(ptr) => (new_cap, ptr.cast()),
248                        Err(_) => handle_alloc_error(Layout::from_size_align_unchecked(
249                            new_size,
250                            cur.align(),
251                        )),
252                    }
253                }
254                None => {
255                    // skip to 4 because tiny Vec's are dumb; but not if that
256                    // would cause overflow
257                    let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
258                    match self.a.alloc_array::<T>(new_cap) {
259                        Ok(ptr) => (new_cap, ptr),
260                        Err(_) => handle_alloc_error(Layout::array::<T>(new_cap).unwrap()),
261                    }
262                }
263            };
264            self.ptr = uniq;
265            self.cap = new_cap;
266        }
267    }
268
269    /// Attempts to double the size of the type's backing allocation in place. This is common
270    /// enough to want to do that it's easiest to just have a dedicated method. Slightly
271    /// more efficient logic can be provided for this than the general case.
272    ///
273    /// Returns true if the reallocation attempt has succeeded, or false otherwise.
274    ///
275    /// # Panics
276    ///
277    /// * Panics if T is zero-sized on the assumption that you managed to exhaust
278    ///   all `usize::MAX` slots in your imaginary buffer.
279    /// * Panics on 32-bit platforms if the requested capacity exceeds
280    ///   `isize::MAX` bytes.
281    #[inline(never)]
282    #[cold]
283    pub fn double_in_place(&mut self) -> bool {
284        unsafe {
285            let elem_size = mem::size_of::<T>();
286            let old_layout = match self.current_layout() {
287                Some(layout) => layout,
288                None => return false, // nothing to double
289            };
290
291            // since we set the capacity to usize::MAX when elem_size is
292            // 0, getting to here necessarily means the RawVec is overfull.
293            assert!(elem_size != 0, "capacity overflow");
294
295            // Since we guarantee that we never allocate more than isize::MAX
296            // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
297            // this can't overflow.
298            //
299            // Similarly like with `double` above we can go straight to
300            // `Layout::from_size_align_unchecked` as we know this won't
301            // overflow and the alignment is sufficiently small.
302            let new_cap = 2 * self.cap;
303            let new_size = new_cap * elem_size;
304            alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
305            match self.a.grow_in_place(self.ptr.cast(), old_layout, new_size) {
306                Ok(_) => {
307                    // We can't directly divide `size`.
308                    self.cap = new_cap;
309                    true
310                }
311                Err(_) => false,
312            }
313        }
314    }
315
316    /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
317    pub fn try_reserve_exact(
318        &mut self,
319        used_cap: usize,
320        needed_extra_cap: usize,
321    ) -> Result<(), CollectionAllocErr> {
322        self.fallible_reserve_internal(used_cap, needed_extra_cap, Exact)
323    }
324
325    /// Ensures that the buffer contains at least enough space to hold
326    /// `used_cap + needed_extra_cap` elements. If it doesn't already,
327    /// will reallocate the minimum possible amount of memory necessary.
328    /// Generally this will be exactly the amount of memory necessary,
329    /// but in principle the allocator is free to give back more than
330    /// we asked for.
331    ///
332    /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
333    /// the requested space. This is not really unsafe, but the unsafe
334    /// code *you* write that relies on the behavior of this function may break.
335    ///
336    /// # Panics
337    ///
338    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
339    /// * Panics on 32-bit platforms if the requested capacity exceeds
340    ///   `isize::MAX` bytes.
341    ///
342    /// # Aborts
343    ///
344    /// Aborts on OOM
345    pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
346        self.infallible_reserve_internal(used_cap, needed_extra_cap, Exact)
347    }
348
349    /// Calculates the buffer's new size given that it'll hold `used_cap +
350    /// needed_extra_cap` elements. This logic is used in amortized reserve methods.
351    /// Returns `(new_capacity, new_alloc_size)`.
352    fn amortized_new_size(
353        &self,
354        used_cap: usize,
355        needed_extra_cap: usize,
356    ) -> Result<usize, CollectionAllocErr> {
357        // Nothing we can really do about these checks :(
358        let required_cap = used_cap
359            .checked_add(needed_extra_cap)
360            .ok_or(CapacityOverflow)?;
361        // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
362        let double_cap = self.cap * 2;
363        // `double_cap` guarantees exponential growth.
364        Ok(cmp::max(double_cap, required_cap))
365    }
366
367    /// The same as `reserve`, but returns on errors instead of panicking or aborting.
368    pub fn try_reserve(
369        &mut self,
370        used_cap: usize,
371        needed_extra_cap: usize,
372    ) -> Result<(), CollectionAllocErr> {
373        self.fallible_reserve_internal(used_cap, needed_extra_cap, Amortized)
374    }
375
376    /// Ensures that the buffer contains at least enough space to hold
377    /// `used_cap + needed_extra_cap` elements. If it doesn't already have
378    /// enough capacity, will reallocate enough space plus comfortable slack
379    /// space to get amortized `O(1)` behavior. Will limit this behavior
380    /// if it would needlessly cause itself to panic.
381    ///
382    /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
383    /// the requested space. This is not really unsafe, but the unsafe
384    /// code *you* write that relies on the behavior of this function may break.
385    ///
386    /// This is ideal for implementing a bulk-push operation like `extend`.
387    ///
388    /// # Panics
389    ///
390    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
391    /// * Panics on 32-bit platforms if the requested capacity exceeds
392    ///   `isize::MAX` bytes.
393    ///
394    /// # Aborts
395    ///
396    /// Aborts on OOM
397    ///
398    /// # Examples
399    ///
400    /// ```ignore
401    /// # #![feature(alloc, raw_vec_internals)]
402    /// # extern crate alloc;
403    /// # use std::ptr;
404    /// # use alloc::raw_vec::RawVec;
405    /// struct MyVec<T> {
406    ///     buf: RawVec<T>,
407    ///     len: usize,
408    /// }
409    ///
410    /// impl<T: Clone> MyVec<T> {
411    ///     pub fn push_all(&mut self, elems: &[T]) {
412    ///         self.buf.reserve(self.len, elems.len());
413    ///         // reserve would have aborted or panicked if the len exceeded
414    ///         // `isize::MAX` so this is safe to do unchecked now.
415    ///         for x in elems {
416    ///             unsafe {
417    ///                 ptr::write(self.buf.ptr().add(self.len), x.clone());
418    ///             }
419    ///             self.len += 1;
420    ///         }
421    ///     }
422    /// }
423    /// # fn main() {
424    /// #   let mut vector = MyVec { buf: RawVec::new(), len: 0 };
425    /// #   vector.push_all(&[1, 3, 5, 7, 9]);
426    /// # }
427    /// ```
428    #[inline(always)]
429    pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
430        self.infallible_reserve_internal(used_cap, needed_extra_cap, Amortized)
431    }
432
433    /// Attempts to ensure that the buffer contains at least enough space to hold
434    /// `used_cap + needed_extra_cap` elements. If it doesn't already have
435    /// enough capacity, will reallocate in place enough space plus comfortable slack
436    /// space to get amortized `O(1)` behavior. Will limit this behaviour
437    /// if it would needlessly cause itself to panic.
438    ///
439    /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
440    /// the requested space. This is not really unsafe, but the unsafe
441    /// code *you* write that relies on the behavior of this function may break.
442    ///
443    /// Returns true if the reallocation attempt has succeeded, or false otherwise.
444    ///
445    /// # Panics
446    ///
447    /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
448    /// * Panics on 32-bit platforms if the requested capacity exceeds
449    ///   `isize::MAX` bytes.
450    pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
451        unsafe {
452            // NOTE: we don't early branch on ZSTs here because we want this
453            // to actually catch "asking for more than usize::MAX" in that case.
454            // If we make it past the first branch then we are guaranteed to
455            // panic.
456
457            // Don't actually need any more capacity. If the current `cap` is 0, we can't
458            // reallocate in place.
459            // Wrapping in case they give a bad `used_cap`
460            let old_layout = match self.current_layout() {
461                Some(layout) => layout,
462                None => return false,
463            };
464            if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
465                return false;
466            }
467
468            let new_cap = self
469                .amortized_new_size(used_cap, needed_extra_cap)
470                .unwrap_or_else(|_| capacity_overflow());
471
472            // Here, `cap < used_cap + needed_extra_cap <= new_cap`
473            // (regardless of whether `self.cap - used_cap` wrapped).
474            // Therefore we can safely call grow_in_place.
475
476            let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
477            // FIXME: may crash and burn on over-reserve
478            alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
479            match self
480                .a
481                .grow_in_place(self.ptr.cast(), old_layout, new_layout.size())
482            {
483                Ok(_) => {
484                    self.cap = new_cap;
485                    true
486                }
487                Err(_) => false,
488            }
489        }
490    }
491
492    /// Shrinks the allocation down to the specified amount. If the given amount
493    /// is 0, actually completely deallocates.
494    ///
495    /// # Panics
496    ///
497    /// Panics if the given amount is *larger* than the current capacity.
498    ///
499    /// # Aborts
500    ///
501    /// Aborts on OOM.
502    pub fn shrink_to_fit(&mut self, amount: usize) {
503        let elem_size = mem::size_of::<T>();
504
505        // Set the `cap` because they might be about to promote to a `Box<[T]>`
506        if elem_size == 0 {
507            self.cap = amount;
508            return;
509        }
510
511        // This check is my waterloo; it's the only thing Vec wouldn't have to do.
512        assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
513
514        if amount == 0 {
515            // We want to create a new zero-length vector within the
516            // same allocator.  We use ptr::write to avoid an
517            // erroneous attempt to drop the contents, and we use
518            // ptr::read to sidestep condition against destructuring
519            // types that implement Drop.
520
521            unsafe {
522                let a = self.a;
523                self.dealloc_buffer();
524                ptr::write(self, RawVec::new_in(a));
525            }
526        } else if self.cap != amount {
527            unsafe {
528                // We know here that our `amount` is greater than zero. This
529                // implies, via the assert above, that capacity is also greater
530                // than zero, which means that we've got a current layout that
531                // "fits"
532                //
533                // We also know that `self.cap` is greater than `amount`, and
534                // consequently we don't need runtime checks for creating either
535                // layout
536                let old_size = elem_size * self.cap;
537                let new_size = elem_size * amount;
538                let align = mem::align_of::<T>();
539                let old_layout = Layout::from_size_align_unchecked(old_size, align);
540                match self.a.realloc(self.ptr.cast(), old_layout, new_size) {
541                    Ok(p) => self.ptr = p.cast(),
542                    Err(_) => {
543                        handle_alloc_error(Layout::from_size_align_unchecked(new_size, align))
544                    }
545                }
546            }
547            self.cap = amount;
548        }
549    }
550}
551
552#[cfg(feature = "boxed")]
553impl<'a, T> RawVec<'a, T> {
554    /// Converts the entire buffer into `Box<[T]>`.
555    ///
556    /// Note that this will correctly reconstitute any `cap` changes
557    /// that may have been performed. (See description of type for details.)
558    ///
559    /// # Undefined Behavior
560    ///
561    /// All elements of `RawVec<T>` must be initialized. Notice that
562    /// the rules around uninitialized boxed values are not finalized yet,
563    /// but until they are, it is advisable to avoid them.
564    pub unsafe fn into_box(self) -> crate::boxed::Box<'a, [T]> {
565        use crate::boxed::Box;
566
567        // NOTE: not calling `cap()` here; actually using the real `cap` field!
568        let slice = core::slice::from_raw_parts_mut(self.ptr(), self.cap);
569        let output: Box<'a, [T]> = Box::from_raw(slice);
570        mem::forget(self);
571        output
572    }
573}
574
575enum Fallibility {
576    Fallible,
577    Infallible,
578}
579
580use self::Fallibility::*;
581
582enum ReserveStrategy {
583    Exact,
584    Amortized,
585}
586
587use self::ReserveStrategy::*;
588
589impl<'a, T> RawVec<'a, T> {
590    #[inline(always)]
591    fn fallible_reserve_internal(
592        &mut self,
593        used_cap: usize,
594        needed_extra_cap: usize,
595        strategy: ReserveStrategy,
596    ) -> Result<(), CollectionAllocErr> {
597        // This portion of the method should always be inlined.
598        if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
599            return Ok(());
600        }
601        // This portion of the method should never be inlined, and will only be called when
602        // the check above has confirmed that it is necessary.
603        self.reserve_internal_or_error(used_cap, needed_extra_cap, Fallible, strategy)
604    }
605
606    #[inline(always)]
607    fn infallible_reserve_internal(
608        &mut self,
609        used_cap: usize,
610        needed_extra_cap: usize,
611        strategy: ReserveStrategy,
612    ) {
613        // This portion of the method should always be inlined.
614        if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
615            return;
616        }
617        // This portion of the method should never be inlined, and will only be called when
618        // the check above has confirmed that it is necessary.
619        self.reserve_internal_or_panic(used_cap, needed_extra_cap, strategy)
620    }
621
622    #[inline(never)]
623    fn reserve_internal_or_panic(
624        &mut self,
625        used_cap: usize,
626        needed_extra_cap: usize,
627        strategy: ReserveStrategy,
628    ) {
629        // Delegates the call to `reserve_internal_or_error` and panics in the event of an error.
630        // This allows the method to have a return type of `()`, simplifying the assembly at the
631        // call site.
632        match self.reserve_internal(used_cap, needed_extra_cap, Infallible, strategy) {
633            Err(CapacityOverflow) => capacity_overflow(),
634            Err(AllocErr) => unreachable!(),
635            Ok(()) => { /* yay */ }
636        }
637    }
638
639    #[inline(never)]
640    fn reserve_internal_or_error(
641        &mut self,
642         used_cap: usize,
643         needed_extra_cap: usize,
644         fallibility: Fallibility,
645         strategy: ReserveStrategy,)-> Result<(), CollectionAllocErr> {
646        // Delegates the call to `reserve_internal`, which can be inlined.
647        self.reserve_internal(used_cap, needed_extra_cap, fallibility, strategy)
648    }
649
650    /// Helper method to reserve additional space, reallocating the backing memory.
651    /// The caller is responsible for confirming that there is not already enough space available.
652    fn reserve_internal(
653        &mut self,
654        used_cap: usize,
655        needed_extra_cap: usize,
656        fallibility: Fallibility,
657        strategy: ReserveStrategy,
658    ) -> Result<(), CollectionAllocErr> {
659        unsafe {
660            use crate::AllocErr;
661
662            // NOTE: we don't early branch on ZSTs here because we want this
663            // to actually catch "asking for more than usize::MAX" in that case.
664            // If we make it past the first branch then we are guaranteed to
665            // panic.
666
667            // Nothing we can really do about these checks :(
668            let new_cap = match strategy {
669                Exact => used_cap
670                    .checked_add(needed_extra_cap)
671                    .ok_or(CapacityOverflow)?,
672                Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?,
673            };
674            let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
675
676            alloc_guard(new_layout.size())?;
677
678            let res = match self.current_layout() {
679                Some(layout) => {
680                    debug_assert!(new_layout.align() == layout.align());
681                    self.a.realloc(self.ptr.cast(), layout, new_layout.size())
682                }
683                None => Alloc::alloc(&mut self.a, new_layout),
684            };
685
686            if let (Err(AllocErr), Infallible) = (&res, fallibility) {
687                handle_alloc_error(new_layout);
688            }
689
690            self.ptr = res?.cast();
691            self.cap = new_cap;
692
693            Ok(())
694        }
695    }
696}
697
698impl<'a, T> RawVec<'a, T> {
699    /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
700    pub unsafe fn dealloc_buffer(&mut self) {
701        let elem_size = mem::size_of::<T>();
702        if elem_size != 0 {
703            if let Some(layout) = self.current_layout() {
704                self.a.dealloc(self.ptr.cast(), layout);
705            }
706        }
707    }
708}
709
710impl<'a, T> Drop for RawVec<'a, T> {
711    /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
712    fn drop(&mut self) {
713        unsafe {
714            self.dealloc_buffer();
715        }
716    }
717}
718
719// We need to guarantee the following:
720// * We don't ever allocate `> isize::MAX` byte-size objects
721// * We don't overflow `usize::MAX` and actually allocate too little
722//
723// On 64-bit we just need to check for overflow since trying to allocate
724// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
725// an extra guard for this in case we're running on a platform which can use
726// all 4GB in user-space. e.g. PAE or x32
727
728#[inline]
729fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> {
730    if mem::size_of::<usize>() < 8 && alloc_size > ::core::isize::MAX as usize {
731        Err(CapacityOverflow)
732    } else {
733        Ok(())
734    }
735}
736
737// One central function responsible for reporting capacity overflows. This'll
738// ensure that the code generation related to these panics is minimal as there's
739// only one location which panics rather than a bunch throughout the module.
740fn capacity_overflow() -> ! {
741    panic!("capacity overflow")
742}
743
744#[cfg(test)]
745mod tests {
746    use super::*;
747
748    #[test]
749    fn reserve_does_not_overallocate() {
750        let bump = Bump::new();
751        {
752            let mut v: RawVec<u32> = RawVec::new_in(&bump);
753            // First `reserve` allocates like `reserve_exact`
754            v.reserve(0, 9);
755            assert_eq!(9, v.cap());
756        }
757
758        {
759            let mut v: RawVec<u32> = RawVec::new_in(&bump);
760            v.reserve(0, 7);
761            assert_eq!(7, v.cap());
762            // 97 if more than double of 7, so `reserve` should work
763            // like `reserve_exact`.
764            v.reserve(7, 90);
765            assert_eq!(97, v.cap());
766        }
767
768        {
769            let mut v: RawVec<u32> = RawVec::new_in(&bump);
770            v.reserve(0, 12);
771            assert_eq!(12, v.cap());
772            v.reserve(12, 3);
773            // 3 is less than half of 12, so `reserve` must grow
774            // exponentially. At the time of writing this test grow
775            // factor is 2, so new capacity is 24, however, grow factor
776            // of 1.5 is OK too. Hence `>= 18` in assert.
777            assert!(v.cap() >= 12 + 12 / 2);
778        }
779    }
780}