crossbeam_epoch/
atomic.rs

1use core::borrow::{Borrow, BorrowMut};
2use core::cmp;
3use core::fmt;
4use core::marker::PhantomData;
5use core::mem::{self, MaybeUninit};
6use core::ops::{Deref, DerefMut};
7use core::slice;
8use core::sync::atomic::Ordering;
9
10use crate::alloc::alloc;
11use crate::alloc::boxed::Box;
12use crate::guard::Guard;
13use crate::primitive::sync::atomic::AtomicUsize;
14use crossbeam_utils::atomic::AtomicConsume;
15
16/// Given ordering for the success case in a compare-exchange operation, returns the strongest
17/// appropriate ordering for the failure case.
18#[inline]
19fn strongest_failure_ordering(ord: Ordering) -> Ordering {
20    use self::Ordering::*;
21    match ord {
22        Relaxed | Release => Relaxed,
23        Acquire | AcqRel => Acquire,
24        _ => SeqCst,
25    }
26}
27
28/// The error returned on failed compare-and-set operation.
29// TODO: remove in the next major version.
30#[deprecated(note = "Use `CompareExchangeError` instead")]
31pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
32
33/// The error returned on failed compare-and-swap operation.
34pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
35    /// The value in the atomic pointer at the time of the failed operation.
36    pub current: Shared<'g, T>,
37
38    /// The new value, which the operation failed to store.
39    pub new: P,
40}
41
42impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
43    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
44        f.debug_struct("CompareExchangeError")
45            .field("current", &self.current)
46            .field("new", &self.new)
47            .finish()
48    }
49}
50
51/// Memory orderings for compare-and-set operations.
52///
53/// A compare-and-set operation can have different memory orderings depending on whether it
54/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
55///
56/// The two ways of specifying orderings for compare-and-set are:
57///
58/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
59///    ordering is chosen.
60/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
61///    for the failure case.
62// TODO: remove in the next major version.
63#[deprecated(
64    note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
65            use `compare_exchange` or `compare_exchange_weak instead`"
66)]
67pub trait CompareAndSetOrdering {
68    /// The ordering of the operation when it succeeds.
69    fn success(&self) -> Ordering;
70
71    /// The ordering of the operation when it fails.
72    ///
73    /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
74    /// the success ordering.
75    fn failure(&self) -> Ordering;
76}
77
78#[allow(deprecated)]
79impl CompareAndSetOrdering for Ordering {
80    #[inline]
81    fn success(&self) -> Ordering {
82        *self
83    }
84
85    #[inline]
86    fn failure(&self) -> Ordering {
87        strongest_failure_ordering(*self)
88    }
89}
90
91#[allow(deprecated)]
92impl CompareAndSetOrdering for (Ordering, Ordering) {
93    #[inline]
94    fn success(&self) -> Ordering {
95        self.0
96    }
97
98    #[inline]
99    fn failure(&self) -> Ordering {
100        self.1
101    }
102}
103
104/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
105#[inline]
106fn low_bits<T: ?Sized + Pointable>() -> usize {
107    (1 << T::ALIGN.trailing_zeros()) - 1
108}
109
110/// Panics if the pointer is not properly unaligned.
111#[inline]
112fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113    assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
114}
115
116/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
117///
118/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
119#[inline]
120fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
121    (data & !low_bits::<T>()) | (tag & low_bits::<T>())
122}
123
124/// Decomposes a tagged pointer `data` into the pointer and the tag.
125#[inline]
126fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127    (data & !low_bits::<T>(), data & low_bits::<T>())
128}
129
130/// Types that are pointed to by a single word.
131///
132/// In concurrent programming, it is necessary to represent an object within a word because atomic
133/// operations (e.g., reads, writes, read-modify-writes) support only single words.  This trait
134/// qualifies such types that are pointed to by a single word.
135///
136/// The trait generalizes `Box<T>` for a sized type `T`.  In a box, an object of type `T` is
137/// allocated in heap and it is owned by a single-word pointer.  This trait is also implemented for
138/// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
139/// size and elements.
140///
141/// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`].  In
142/// particular, Crossbeam supports dynamically sized slices as follows.
143///
144/// ```
145/// use std::mem::MaybeUninit;
146/// use crossbeam_epoch::Owned;
147///
148/// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
149/// ```
150pub trait Pointable {
151    /// The alignment of pointer.
152    const ALIGN: usize;
153
154    /// The type for initializers.
155    type Init;
156
157    /// Initializes a with the given initializer.
158    ///
159    /// # Safety
160    ///
161    /// The result should be a multiple of `ALIGN`.
162    unsafe fn init(init: Self::Init) -> usize;
163
164    /// Dereferences the given pointer.
165    ///
166    /// # Safety
167    ///
168    /// - The given `ptr` should have been initialized with [`Pointable::init`].
169    /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
170    /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
171    unsafe fn deref<'a>(ptr: usize) -> &'a Self;
172
173    /// Mutably dereferences the given pointer.
174    ///
175    /// # Safety
176    ///
177    /// - The given `ptr` should have been initialized with [`Pointable::init`].
178    /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
179    /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
180    ///   concurrently.
181    unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
182
183    /// Drops the object pointed to by the given pointer.
184    ///
185    /// # Safety
186    ///
187    /// - The given `ptr` should have been initialized with [`Pointable::init`].
188    /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
189    /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
190    ///   concurrently.
191    unsafe fn drop(ptr: usize);
192}
193
194impl<T> Pointable for T {
195    const ALIGN: usize = mem::align_of::<T>();
196
197    type Init = T;
198
199    unsafe fn init(init: Self::Init) -> usize {
200        Box::into_raw(Box::new(init)) as usize
201    }
202
203    unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204        &*(ptr as *const T)
205    }
206
207    unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
208        &mut *(ptr as *mut T)
209    }
210
211    unsafe fn drop(ptr: usize) {
212        drop(Box::from_raw(ptr as *mut T));
213    }
214}
215
216/// Array with size.
217///
218/// # Memory layout
219///
220/// An array consisting of size and elements:
221///
222/// ```text
223///          elements
224///          |
225///          |
226/// ------------------------------------
227/// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
228/// ------------------------------------
229/// ```
230///
231/// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
232/// along with pointer as in `Box<[T]>`).
233///
234/// Elements are not present in the type, but they will be in the allocation.
235/// ```
236///
237// TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
238// [`alloc::alloc::Layout::extend`] instead.
239#[repr(C)]
240struct Array<T> {
241    size: usize,
242    elements: [MaybeUninit<T>; 0],
243}
244
245impl<T> Pointable for [MaybeUninit<T>] {
246    const ALIGN: usize = mem::align_of::<Array<T>>();
247
248    type Init = usize;
249
250    unsafe fn init(size: Self::Init) -> usize {
251        let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * size;
252        let align = mem::align_of::<Array<T>>();
253        let layout = alloc::Layout::from_size_align(size, align).unwrap();
254        let ptr = alloc::alloc(layout) as *mut Array<T>;
255        if ptr.is_null() {
256            alloc::handle_alloc_error(layout);
257        }
258        (*ptr).size = size;
259        ptr as usize
260    }
261
262    unsafe fn deref<'a>(ptr: usize) -> &'a Self {
263        let array = &*(ptr as *const Array<T>);
264        slice::from_raw_parts(array.elements.as_ptr() as *const _, array.size)
265    }
266
267    unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
268        let array = &*(ptr as *mut Array<T>);
269        slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.size)
270    }
271
272    unsafe fn drop(ptr: usize) {
273        let array = &*(ptr as *mut Array<T>);
274        let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.size;
275        let align = mem::align_of::<Array<T>>();
276        let layout = alloc::Layout::from_size_align(size, align).unwrap();
277        alloc::dealloc(ptr as *mut u8, layout);
278    }
279}
280
281/// An atomic pointer that can be safely shared between threads.
282///
283/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
284/// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
285/// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
286///
287/// Any method that loads the pointer must be passed a reference to a [`Guard`].
288///
289/// Crossbeam supports dynamically sized types.  See [`Pointable`] for details.
290pub struct Atomic<T: ?Sized + Pointable> {
291    data: AtomicUsize,
292    _marker: PhantomData<*mut T>,
293}
294
295unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
296unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
297
298impl<T> Atomic<T> {
299    /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
300    ///
301    /// # Examples
302    ///
303    /// ```
304    /// use crossbeam_epoch::Atomic;
305    ///
306    /// let a = Atomic::new(1234);
307    /// ```
308    pub fn new(init: T) -> Atomic<T> {
309        Self::init(init)
310    }
311}
312
313impl<T: ?Sized + Pointable> Atomic<T> {
314    /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
315    ///
316    /// # Examples
317    ///
318    /// ```
319    /// use crossbeam_epoch::Atomic;
320    ///
321    /// let a = Atomic::<i32>::init(1234);
322    /// ```
323    pub fn init(init: T::Init) -> Atomic<T> {
324        Self::from(Owned::init(init))
325    }
326
327    /// Returns a new atomic pointer pointing to the tagged pointer `data`.
328    fn from_usize(data: usize) -> Self {
329        Self {
330            data: AtomicUsize::new(data),
331            _marker: PhantomData,
332        }
333    }
334
335    /// Returns a new null atomic pointer.
336    ///
337    /// # Examples
338    ///
339    /// ```
340    /// use crossbeam_epoch::Atomic;
341    ///
342    /// let a = Atomic::<i32>::null();
343    /// ```
344    ///
345    #[cfg_attr(all(feature = "nightly", not(crossbeam_loom)), const_fn::const_fn)]
346    pub fn null() -> Atomic<T> {
347        Self {
348            data: AtomicUsize::new(0),
349            _marker: PhantomData,
350        }
351    }
352
353    /// Loads a `Shared` from the atomic pointer.
354    ///
355    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
356    /// operation.
357    ///
358    /// # Examples
359    ///
360    /// ```
361    /// use crossbeam_epoch::{self as epoch, Atomic};
362    /// use std::sync::atomic::Ordering::SeqCst;
363    ///
364    /// let a = Atomic::new(1234);
365    /// let guard = &epoch::pin();
366    /// let p = a.load(SeqCst, guard);
367    /// ```
368    pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
369        unsafe { Shared::from_usize(self.data.load(ord)) }
370    }
371
372    /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
373    ///
374    /// This is similar to the "acquire" ordering, except that an ordering is
375    /// only guaranteed with operations that "depend on" the result of the load.
376    /// However consume loads are usually much faster than acquire loads on
377    /// architectures with a weak memory model since they don't require memory
378    /// fence instructions.
379    ///
380    /// The exact definition of "depend on" is a bit vague, but it works as you
381    /// would expect in practice since a lot of software, especially the Linux
382    /// kernel, rely on this behavior.
383    ///
384    /// # Examples
385    ///
386    /// ```
387    /// use crossbeam_epoch::{self as epoch, Atomic};
388    ///
389    /// let a = Atomic::new(1234);
390    /// let guard = &epoch::pin();
391    /// let p = a.load_consume(guard);
392    /// ```
393    pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
394        unsafe { Shared::from_usize(self.data.load_consume()) }
395    }
396
397    /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
398    ///
399    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
400    /// operation.
401    ///
402    /// # Examples
403    ///
404    /// ```
405    /// use crossbeam_epoch::{Atomic, Owned, Shared};
406    /// use std::sync::atomic::Ordering::SeqCst;
407    ///
408    /// let a = Atomic::new(1234);
409    /// a.store(Shared::null(), SeqCst);
410    /// a.store(Owned::new(1234), SeqCst);
411    /// ```
412    pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
413        self.data.store(new.into_usize(), ord);
414    }
415
416    /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
417    /// `Shared`.
418    ///
419    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
420    /// operation.
421    ///
422    /// # Examples
423    ///
424    /// ```
425    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
426    /// use std::sync::atomic::Ordering::SeqCst;
427    ///
428    /// let a = Atomic::new(1234);
429    /// let guard = &epoch::pin();
430    /// let p = a.swap(Shared::null(), SeqCst, guard);
431    /// ```
432    pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
433        unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
434    }
435
436    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
437    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
438    /// same object, but with different tags, will not be considered equal.
439    ///
440    /// The return value is a result indicating whether the new pointer was written. On success the
441    /// pointer that was written is returned. On failure the actual current value and `new` are
442    /// returned.
443    ///
444    /// This method takes two `Ordering` arguments to describe the memory
445    /// ordering of this operation. `success` describes the required ordering for the
446    /// read-modify-write operation that takes place if the comparison with `current` succeeds.
447    /// `failure` describes the required ordering for the load operation that takes place when
448    /// the comparison fails. Using `Acquire` as success ordering makes the store part
449    /// of this operation `Relaxed`, and using `Release` makes the successful load
450    /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
451    /// and must be equivalent to or weaker than the success ordering.
452    ///
453    /// # Examples
454    ///
455    /// ```
456    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
457    /// use std::sync::atomic::Ordering::SeqCst;
458    ///
459    /// let a = Atomic::new(1234);
460    ///
461    /// let guard = &epoch::pin();
462    /// let curr = a.load(SeqCst, guard);
463    /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
464    /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
465    /// ```
466    pub fn compare_exchange<'g, P>(
467        &self,
468        current: Shared<'_, T>,
469        new: P,
470        success: Ordering,
471        failure: Ordering,
472        _: &'g Guard,
473    ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
474    where
475        P: Pointer<T>,
476    {
477        let new = new.into_usize();
478        self.data
479            .compare_exchange(current.into_usize(), new, success, failure)
480            .map(|_| unsafe { Shared::from_usize(new) })
481            .map_err(|current| unsafe {
482                CompareExchangeError {
483                    current: Shared::from_usize(current),
484                    new: P::from_usize(new),
485                }
486            })
487    }
488
489    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
490    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
491    /// same object, but with different tags, will not be considered equal.
492    ///
493    /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
494    /// succeeds, which can result in more efficient code on some platforms.  The return value is a
495    /// result indicating whether the new pointer was written. On success the pointer that was
496    /// written is returned. On failure the actual current value and `new` are returned.
497    ///
498    /// This method takes two `Ordering` arguments to describe the memory
499    /// ordering of this operation. `success` describes the required ordering for the
500    /// read-modify-write operation that takes place if the comparison with `current` succeeds.
501    /// `failure` describes the required ordering for the load operation that takes place when
502    /// the comparison fails. Using `Acquire` as success ordering makes the store part
503    /// of this operation `Relaxed`, and using `Release` makes the successful load
504    /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
505    /// and must be equivalent to or weaker than the success ordering.
506    ///
507    /// [`compare_exchange`]: Atomic::compare_exchange
508    ///
509    /// # Examples
510    ///
511    /// ```
512    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
513    /// use std::sync::atomic::Ordering::SeqCst;
514    ///
515    /// let a = Atomic::new(1234);
516    /// let guard = &epoch::pin();
517    ///
518    /// let mut new = Owned::new(5678);
519    /// let mut ptr = a.load(SeqCst, guard);
520    /// loop {
521    ///     match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
522    ///         Ok(p) => {
523    ///             ptr = p;
524    ///             break;
525    ///         }
526    ///         Err(err) => {
527    ///             ptr = err.current;
528    ///             new = err.new;
529    ///         }
530    ///     }
531    /// }
532    ///
533    /// let mut curr = a.load(SeqCst, guard);
534    /// loop {
535    ///     match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
536    ///         Ok(_) => break,
537    ///         Err(err) => curr = err.current,
538    ///     }
539    /// }
540    /// ```
541    pub fn compare_exchange_weak<'g, P>(
542        &self,
543        current: Shared<'_, T>,
544        new: P,
545        success: Ordering,
546        failure: Ordering,
547        _: &'g Guard,
548    ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
549    where
550        P: Pointer<T>,
551    {
552        let new = new.into_usize();
553        self.data
554            .compare_exchange_weak(current.into_usize(), new, success, failure)
555            .map(|_| unsafe { Shared::from_usize(new) })
556            .map_err(|current| unsafe {
557                CompareExchangeError {
558                    current: Shared::from_usize(current),
559                    new: P::from_usize(new),
560                }
561            })
562    }
563
564    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
565    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
566    /// same object, but with different tags, will not be considered equal.
567    ///
568    /// The return value is a result indicating whether the new pointer was written. On success the
569    /// pointer that was written is returned. On failure the actual current value and `new` are
570    /// returned.
571    ///
572    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
573    /// ordering of this operation.
574    ///
575    /// # Migrating to `compare_exchange`
576    ///
577    /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
578    /// memory orderings:
579    ///
580    /// Original | Success | Failure
581    /// -------- | ------- | -------
582    /// Relaxed  | Relaxed | Relaxed
583    /// Acquire  | Acquire | Acquire
584    /// Release  | Release | Relaxed
585    /// AcqRel   | AcqRel  | Acquire
586    /// SeqCst   | SeqCst  | SeqCst
587    ///
588    /// # Examples
589    ///
590    /// ```
591    /// # #![allow(deprecated)]
592    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
593    /// use std::sync::atomic::Ordering::SeqCst;
594    ///
595    /// let a = Atomic::new(1234);
596    ///
597    /// let guard = &epoch::pin();
598    /// let curr = a.load(SeqCst, guard);
599    /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
600    /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
601    /// ```
602    // TODO: remove in the next major version.
603    #[allow(deprecated)]
604    #[deprecated(note = "Use `compare_exchange` instead")]
605    pub fn compare_and_set<'g, O, P>(
606        &self,
607        current: Shared<'_, T>,
608        new: P,
609        ord: O,
610        guard: &'g Guard,
611    ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
612    where
613        O: CompareAndSetOrdering,
614        P: Pointer<T>,
615    {
616        self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
617    }
618
619    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
620    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
621    /// same object, but with different tags, will not be considered equal.
622    ///
623    /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
624    /// succeeds, which can result in more efficient code on some platforms.  The return value is a
625    /// result indicating whether the new pointer was written. On success the pointer that was
626    /// written is returned. On failure the actual current value and `new` are returned.
627    ///
628    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
629    /// ordering of this operation.
630    ///
631    /// [`compare_and_set`]: Atomic::compare_and_set
632    ///
633    /// # Migrating to `compare_exchange_weak`
634    ///
635    /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
636    /// memory orderings:
637    ///
638    /// Original | Success | Failure
639    /// -------- | ------- | -------
640    /// Relaxed  | Relaxed | Relaxed
641    /// Acquire  | Acquire | Acquire
642    /// Release  | Release | Relaxed
643    /// AcqRel   | AcqRel  | Acquire
644    /// SeqCst   | SeqCst  | SeqCst
645    ///
646    /// # Examples
647    ///
648    /// ```
649    /// # #![allow(deprecated)]
650    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
651    /// use std::sync::atomic::Ordering::SeqCst;
652    ///
653    /// let a = Atomic::new(1234);
654    /// let guard = &epoch::pin();
655    ///
656    /// let mut new = Owned::new(5678);
657    /// let mut ptr = a.load(SeqCst, guard);
658    /// loop {
659    ///     match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
660    ///         Ok(p) => {
661    ///             ptr = p;
662    ///             break;
663    ///         }
664    ///         Err(err) => {
665    ///             ptr = err.current;
666    ///             new = err.new;
667    ///         }
668    ///     }
669    /// }
670    ///
671    /// let mut curr = a.load(SeqCst, guard);
672    /// loop {
673    ///     match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
674    ///         Ok(_) => break,
675    ///         Err(err) => curr = err.current,
676    ///     }
677    /// }
678    /// ```
679    // TODO: remove in the next major version.
680    #[allow(deprecated)]
681    #[deprecated(note = "Use `compare_exchange_weak` instead")]
682    pub fn compare_and_set_weak<'g, O, P>(
683        &self,
684        current: Shared<'_, T>,
685        new: P,
686        ord: O,
687        guard: &'g Guard,
688    ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
689    where
690        O: CompareAndSetOrdering,
691        P: Pointer<T>,
692    {
693        self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
694    }
695
696    /// Bitwise "and" with the current tag.
697    ///
698    /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
699    /// new tag to the result. Returns the previous pointer.
700    ///
701    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
702    /// operation.
703    ///
704    /// # Examples
705    ///
706    /// ```
707    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
708    /// use std::sync::atomic::Ordering::SeqCst;
709    ///
710    /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
711    /// let guard = &epoch::pin();
712    /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
713    /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
714    /// ```
715    pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
716        unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
717    }
718
719    /// Bitwise "or" with the current tag.
720    ///
721    /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
722    /// new tag to the result. Returns the previous pointer.
723    ///
724    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
725    /// operation.
726    ///
727    /// # Examples
728    ///
729    /// ```
730    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
731    /// use std::sync::atomic::Ordering::SeqCst;
732    ///
733    /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
734    /// let guard = &epoch::pin();
735    /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
736    /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
737    /// ```
738    pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
739        unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
740    }
741
742    /// Bitwise "xor" with the current tag.
743    ///
744    /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
745    /// new tag to the result. Returns the previous pointer.
746    ///
747    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
748    /// operation.
749    ///
750    /// # Examples
751    ///
752    /// ```
753    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
754    /// use std::sync::atomic::Ordering::SeqCst;
755    ///
756    /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
757    /// let guard = &epoch::pin();
758    /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
759    /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
760    /// ```
761    pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
762        unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
763    }
764
765    /// Takes ownership of the pointee.
766    ///
767    /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
768    /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
769    /// destructors of data structures.
770    ///
771    /// # Panics
772    ///
773    /// Panics if this pointer is null, but only in debug mode.
774    ///
775    /// # Safety
776    ///
777    /// This method may be called only if the pointer is valid and nobody else is holding a
778    /// reference to the same object.
779    ///
780    /// # Examples
781    ///
782    /// ```rust
783    /// # use std::mem;
784    /// # use crossbeam_epoch::Atomic;
785    /// struct DataStructure {
786    ///     ptr: Atomic<usize>,
787    /// }
788    ///
789    /// impl Drop for DataStructure {
790    ///     fn drop(&mut self) {
791    ///         // By now the DataStructure lives only in our thread and we are sure we don't hold
792    ///         // any Shared or & to it ourselves.
793    ///         unsafe {
794    ///             drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
795    ///         }
796    ///     }
797    /// }
798    /// ```
799    pub unsafe fn into_owned(self) -> Owned<T> {
800        #[cfg(crossbeam_loom)]
801        {
802            // FIXME: loom does not yet support into_inner, so we use unsync_load for now,
803            // which should have the same synchronization properties:
804            // https://github.com/tokio-rs/loom/issues/117
805            Owned::from_usize(self.data.unsync_load())
806        }
807        #[cfg(not(crossbeam_loom))]
808        {
809            Owned::from_usize(self.data.into_inner())
810        }
811    }
812}
813
814impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
815    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
816        let data = self.data.load(Ordering::SeqCst);
817        let (raw, tag) = decompose_tag::<T>(data);
818
819        f.debug_struct("Atomic")
820            .field("raw", &raw)
821            .field("tag", &tag)
822            .finish()
823    }
824}
825
826impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
827    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
828        let data = self.data.load(Ordering::SeqCst);
829        let (raw, _) = decompose_tag::<T>(data);
830        fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f)
831    }
832}
833
834impl<T: ?Sized + Pointable> Clone for Atomic<T> {
835    /// Returns a copy of the atomic value.
836    ///
837    /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
838    /// atomics or fences.
839    fn clone(&self) -> Self {
840        let data = self.data.load(Ordering::Relaxed);
841        Atomic::from_usize(data)
842    }
843}
844
845impl<T: ?Sized + Pointable> Default for Atomic<T> {
846    fn default() -> Self {
847        Atomic::null()
848    }
849}
850
851impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
852    /// Returns a new atomic pointer pointing to `owned`.
853    ///
854    /// # Examples
855    ///
856    /// ```
857    /// use crossbeam_epoch::{Atomic, Owned};
858    ///
859    /// let a = Atomic::<i32>::from(Owned::new(1234));
860    /// ```
861    fn from(owned: Owned<T>) -> Self {
862        let data = owned.data;
863        mem::forget(owned);
864        Self::from_usize(data)
865    }
866}
867
868impl<T> From<Box<T>> for Atomic<T> {
869    fn from(b: Box<T>) -> Self {
870        Self::from(Owned::from(b))
871    }
872}
873
874impl<T> From<T> for Atomic<T> {
875    fn from(t: T) -> Self {
876        Self::new(t)
877    }
878}
879
880impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
881    /// Returns a new atomic pointer pointing to `ptr`.
882    ///
883    /// # Examples
884    ///
885    /// ```
886    /// use crossbeam_epoch::{Atomic, Shared};
887    ///
888    /// let a = Atomic::<i32>::from(Shared::<i32>::null());
889    /// ```
890    fn from(ptr: Shared<'g, T>) -> Self {
891        Self::from_usize(ptr.data)
892    }
893}
894
895impl<T> From<*const T> for Atomic<T> {
896    /// Returns a new atomic pointer pointing to `raw`.
897    ///
898    /// # Examples
899    ///
900    /// ```
901    /// use std::ptr;
902    /// use crossbeam_epoch::Atomic;
903    ///
904    /// let a = Atomic::<i32>::from(ptr::null::<i32>());
905    /// ```
906    fn from(raw: *const T) -> Self {
907        Self::from_usize(raw as usize)
908    }
909}
910
911/// A trait for either `Owned` or `Shared` pointers.
912pub trait Pointer<T: ?Sized + Pointable> {
913    /// Returns the machine representation of the pointer.
914    fn into_usize(self) -> usize;
915
916    /// Returns a new pointer pointing to the tagged pointer `data`.
917    ///
918    /// # Safety
919    ///
920    /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
921    /// not be converted back by `Pointer::from_usize()` multiple times.
922    unsafe fn from_usize(data: usize) -> Self;
923}
924
925/// An owned heap-allocated object.
926///
927/// This type is very similar to `Box<T>`.
928///
929/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
930/// least significant bits of the address.
931pub struct Owned<T: ?Sized + Pointable> {
932    data: usize,
933    _marker: PhantomData<Box<T>>,
934}
935
936impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
937    #[inline]
938    fn into_usize(self) -> usize {
939        let data = self.data;
940        mem::forget(self);
941        data
942    }
943
944    /// Returns a new pointer pointing to the tagged pointer `data`.
945    ///
946    /// # Panics
947    ///
948    /// Panics if the data is zero in debug mode.
949    #[inline]
950    unsafe fn from_usize(data: usize) -> Self {
951        debug_assert!(data != 0, "converting zero into `Owned`");
952        Owned {
953            data,
954            _marker: PhantomData,
955        }
956    }
957}
958
959impl<T> Owned<T> {
960    /// Returns a new owned pointer pointing to `raw`.
961    ///
962    /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
963    /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
964    /// the same raw pointer.
965    ///
966    /// # Panics
967    ///
968    /// Panics if `raw` is not properly aligned.
969    ///
970    /// # Safety
971    ///
972    /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
973    /// back by `Owned::from_raw()` multiple times.
974    ///
975    /// # Examples
976    ///
977    /// ```
978    /// use crossbeam_epoch::Owned;
979    ///
980    /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
981    /// ```
982    pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
983        let raw = raw as usize;
984        ensure_aligned::<T>(raw);
985        Self::from_usize(raw)
986    }
987
988    /// Converts the owned pointer into a `Box`.
989    ///
990    /// # Examples
991    ///
992    /// ```
993    /// use crossbeam_epoch::Owned;
994    ///
995    /// let o = Owned::new(1234);
996    /// let b: Box<i32> = o.into_box();
997    /// assert_eq!(*b, 1234);
998    /// ```
999    pub fn into_box(self) -> Box<T> {
1000        let (raw, _) = decompose_tag::<T>(self.data);
1001        mem::forget(self);
1002        unsafe { Box::from_raw(raw as *mut _) }
1003    }
1004
1005    /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1006    ///
1007    /// # Examples
1008    ///
1009    /// ```
1010    /// use crossbeam_epoch::Owned;
1011    ///
1012    /// let o = Owned::new(1234);
1013    /// ```
1014    pub fn new(init: T) -> Owned<T> {
1015        Self::init(init)
1016    }
1017}
1018
1019impl<T: ?Sized + Pointable> Owned<T> {
1020    /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1021    ///
1022    /// # Examples
1023    ///
1024    /// ```
1025    /// use crossbeam_epoch::Owned;
1026    ///
1027    /// let o = Owned::<i32>::init(1234);
1028    /// ```
1029    pub fn init(init: T::Init) -> Owned<T> {
1030        unsafe { Self::from_usize(T::init(init)) }
1031    }
1032
1033    /// Converts the owned pointer into a [`Shared`].
1034    ///
1035    /// # Examples
1036    ///
1037    /// ```
1038    /// use crossbeam_epoch::{self as epoch, Owned};
1039    ///
1040    /// let o = Owned::new(1234);
1041    /// let guard = &epoch::pin();
1042    /// let p = o.into_shared(guard);
1043    /// ```
1044    #[allow(clippy::needless_lifetimes)]
1045    pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1046        unsafe { Shared::from_usize(self.into_usize()) }
1047    }
1048
1049    /// Returns the tag stored within the pointer.
1050    ///
1051    /// # Examples
1052    ///
1053    /// ```
1054    /// use crossbeam_epoch::Owned;
1055    ///
1056    /// assert_eq!(Owned::new(1234).tag(), 0);
1057    /// ```
1058    pub fn tag(&self) -> usize {
1059        let (_, tag) = decompose_tag::<T>(self.data);
1060        tag
1061    }
1062
1063    /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1064    /// unused bits of the pointer to `T`.
1065    ///
1066    /// # Examples
1067    ///
1068    /// ```
1069    /// use crossbeam_epoch::Owned;
1070    ///
1071    /// let o = Owned::new(0u64);
1072    /// assert_eq!(o.tag(), 0);
1073    /// let o = o.with_tag(2);
1074    /// assert_eq!(o.tag(), 2);
1075    /// ```
1076    pub fn with_tag(self, tag: usize) -> Owned<T> {
1077        let data = self.into_usize();
1078        unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
1079    }
1080}
1081
1082impl<T: ?Sized + Pointable> Drop for Owned<T> {
1083    fn drop(&mut self) {
1084        let (raw, _) = decompose_tag::<T>(self.data);
1085        unsafe {
1086            T::drop(raw);
1087        }
1088    }
1089}
1090
1091impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
1092    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1093        let (raw, tag) = decompose_tag::<T>(self.data);
1094
1095        f.debug_struct("Owned")
1096            .field("raw", &raw)
1097            .field("tag", &tag)
1098            .finish()
1099    }
1100}
1101
1102impl<T: Clone> Clone for Owned<T> {
1103    fn clone(&self) -> Self {
1104        Owned::new((**self).clone()).with_tag(self.tag())
1105    }
1106}
1107
1108impl<T: ?Sized + Pointable> Deref for Owned<T> {
1109    type Target = T;
1110
1111    fn deref(&self) -> &T {
1112        let (raw, _) = decompose_tag::<T>(self.data);
1113        unsafe { T::deref(raw) }
1114    }
1115}
1116
1117impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
1118    fn deref_mut(&mut self) -> &mut T {
1119        let (raw, _) = decompose_tag::<T>(self.data);
1120        unsafe { T::deref_mut(raw) }
1121    }
1122}
1123
1124impl<T> From<T> for Owned<T> {
1125    fn from(t: T) -> Self {
1126        Owned::new(t)
1127    }
1128}
1129
1130impl<T> From<Box<T>> for Owned<T> {
1131    /// Returns a new owned pointer pointing to `b`.
1132    ///
1133    /// # Panics
1134    ///
1135    /// Panics if the pointer (the `Box`) is not properly aligned.
1136    ///
1137    /// # Examples
1138    ///
1139    /// ```
1140    /// use crossbeam_epoch::Owned;
1141    ///
1142    /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1143    /// ```
1144    fn from(b: Box<T>) -> Self {
1145        unsafe { Self::from_raw(Box::into_raw(b)) }
1146    }
1147}
1148
1149impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
1150    fn borrow(&self) -> &T {
1151        self.deref()
1152    }
1153}
1154
1155impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
1156    fn borrow_mut(&mut self) -> &mut T {
1157        self.deref_mut()
1158    }
1159}
1160
1161impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
1162    fn as_ref(&self) -> &T {
1163        self.deref()
1164    }
1165}
1166
1167impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
1168    fn as_mut(&mut self) -> &mut T {
1169        self.deref_mut()
1170    }
1171}
1172
1173/// A pointer to an object protected by the epoch GC.
1174///
1175/// The pointer is valid for use only during the lifetime `'g`.
1176///
1177/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1178/// least significant bits of the address.
1179pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
1180    data: usize,
1181    _marker: PhantomData<(&'g (), *const T)>,
1182}
1183
1184impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
1185    fn clone(&self) -> Self {
1186        Self {
1187            data: self.data,
1188            _marker: PhantomData,
1189        }
1190    }
1191}
1192
1193impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
1194
1195impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
1196    #[inline]
1197    fn into_usize(self) -> usize {
1198        self.data
1199    }
1200
1201    #[inline]
1202    unsafe fn from_usize(data: usize) -> Self {
1203        Shared {
1204            data,
1205            _marker: PhantomData,
1206        }
1207    }
1208}
1209
1210impl<'g, T> Shared<'g, T> {
1211    /// Converts the pointer to a raw pointer (without the tag).
1212    ///
1213    /// # Examples
1214    ///
1215    /// ```
1216    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1217    /// use std::sync::atomic::Ordering::SeqCst;
1218    ///
1219    /// let o = Owned::new(1234);
1220    /// let raw = &*o as *const _;
1221    /// let a = Atomic::from(o);
1222    ///
1223    /// let guard = &epoch::pin();
1224    /// let p = a.load(SeqCst, guard);
1225    /// assert_eq!(p.as_raw(), raw);
1226    /// ```
1227    #[allow(clippy::trivially_copy_pass_by_ref)]
1228    pub fn as_raw(&self) -> *const T {
1229        let (raw, _) = decompose_tag::<T>(self.data);
1230        raw as *const _
1231    }
1232}
1233
1234impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
1235    /// Returns a new null pointer.
1236    ///
1237    /// # Examples
1238    ///
1239    /// ```
1240    /// use crossbeam_epoch::Shared;
1241    ///
1242    /// let p = Shared::<i32>::null();
1243    /// assert!(p.is_null());
1244    /// ```
1245    pub fn null() -> Shared<'g, T> {
1246        Shared {
1247            data: 0,
1248            _marker: PhantomData,
1249        }
1250    }
1251
1252    /// Returns `true` if the pointer is null.
1253    ///
1254    /// # Examples
1255    ///
1256    /// ```
1257    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1258    /// use std::sync::atomic::Ordering::SeqCst;
1259    ///
1260    /// let a = Atomic::null();
1261    /// let guard = &epoch::pin();
1262    /// assert!(a.load(SeqCst, guard).is_null());
1263    /// a.store(Owned::new(1234), SeqCst);
1264    /// assert!(!a.load(SeqCst, guard).is_null());
1265    /// ```
1266    #[allow(clippy::trivially_copy_pass_by_ref)]
1267    pub fn is_null(&self) -> bool {
1268        let (raw, _) = decompose_tag::<T>(self.data);
1269        raw == 0
1270    }
1271
1272    /// Dereferences the pointer.
1273    ///
1274    /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1275    ///
1276    /// # Safety
1277    ///
1278    /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1279    ///
1280    /// Another concern is the possibility of data races due to lack of proper synchronization.
1281    /// For example, consider the following scenario:
1282    ///
1283    /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1284    /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1285    ///
1286    /// The problem is that relaxed orderings don't synchronize initialization of the object with
1287    /// the read from the second thread. This is a data race. A possible solution would be to use
1288    /// `Release` and `Acquire` orderings.
1289    ///
1290    /// # Examples
1291    ///
1292    /// ```
1293    /// use crossbeam_epoch::{self as epoch, Atomic};
1294    /// use std::sync::atomic::Ordering::SeqCst;
1295    ///
1296    /// let a = Atomic::new(1234);
1297    /// let guard = &epoch::pin();
1298    /// let p = a.load(SeqCst, guard);
1299    /// unsafe {
1300    ///     assert_eq!(p.deref(), &1234);
1301    /// }
1302    /// ```
1303    #[allow(clippy::trivially_copy_pass_by_ref)]
1304    #[allow(clippy::should_implement_trait)]
1305    pub unsafe fn deref(&self) -> &'g T {
1306        let (raw, _) = decompose_tag::<T>(self.data);
1307        T::deref(raw)
1308    }
1309
1310    /// Dereferences the pointer.
1311    ///
1312    /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1313    ///
1314    /// # Safety
1315    ///
1316    /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1317    ///   actual object at the same time.
1318    ///
1319    ///   The user must know that there are no concurrent accesses towards the object itself.
1320    ///
1321    /// * Other than the above, all safety concerns of `deref()` applies here.
1322    ///
1323    /// # Examples
1324    ///
1325    /// ```
1326    /// use crossbeam_epoch::{self as epoch, Atomic};
1327    /// use std::sync::atomic::Ordering::SeqCst;
1328    ///
1329    /// let a = Atomic::new(vec![1, 2, 3, 4]);
1330    /// let guard = &epoch::pin();
1331    ///
1332    /// let mut p = a.load(SeqCst, guard);
1333    /// unsafe {
1334    ///     assert!(!p.is_null());
1335    ///     let b = p.deref_mut();
1336    ///     assert_eq!(b, &vec![1, 2, 3, 4]);
1337    ///     b.push(5);
1338    ///     assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1339    /// }
1340    ///
1341    /// let p = a.load(SeqCst, guard);
1342    /// unsafe {
1343    ///     assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1344    /// }
1345    /// ```
1346    #[allow(clippy::should_implement_trait)]
1347    pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1348        let (raw, _) = decompose_tag::<T>(self.data);
1349        T::deref_mut(raw)
1350    }
1351
1352    /// Converts the pointer to a reference.
1353    ///
1354    /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1355    ///
1356    /// # Safety
1357    ///
1358    /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1359    ///
1360    /// Another concern is the possibility of data races due to lack of proper synchronization.
1361    /// For example, consider the following scenario:
1362    ///
1363    /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1364    /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1365    ///
1366    /// The problem is that relaxed orderings don't synchronize initialization of the object with
1367    /// the read from the second thread. This is a data race. A possible solution would be to use
1368    /// `Release` and `Acquire` orderings.
1369    ///
1370    /// # Examples
1371    ///
1372    /// ```
1373    /// use crossbeam_epoch::{self as epoch, Atomic};
1374    /// use std::sync::atomic::Ordering::SeqCst;
1375    ///
1376    /// let a = Atomic::new(1234);
1377    /// let guard = &epoch::pin();
1378    /// let p = a.load(SeqCst, guard);
1379    /// unsafe {
1380    ///     assert_eq!(p.as_ref(), Some(&1234));
1381    /// }
1382    /// ```
1383    #[allow(clippy::trivially_copy_pass_by_ref)]
1384    pub unsafe fn as_ref(&self) -> Option<&'g T> {
1385        let (raw, _) = decompose_tag::<T>(self.data);
1386        if raw == 0 {
1387            None
1388        } else {
1389            Some(T::deref(raw))
1390        }
1391    }
1392
1393    /// Takes ownership of the pointee.
1394    ///
1395    /// # Panics
1396    ///
1397    /// Panics if this pointer is null, but only in debug mode.
1398    ///
1399    /// # Safety
1400    ///
1401    /// This method may be called only if the pointer is valid and nobody else is holding a
1402    /// reference to the same object.
1403    ///
1404    /// # Examples
1405    ///
1406    /// ```
1407    /// use crossbeam_epoch::{self as epoch, Atomic};
1408    /// use std::sync::atomic::Ordering::SeqCst;
1409    ///
1410    /// let a = Atomic::new(1234);
1411    /// unsafe {
1412    ///     let guard = &epoch::unprotected();
1413    ///     let p = a.load(SeqCst, guard);
1414    ///     drop(p.into_owned());
1415    /// }
1416    /// ```
1417    pub unsafe fn into_owned(self) -> Owned<T> {
1418        debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1419        Owned::from_usize(self.data)
1420    }
1421
1422    /// Returns the tag stored within the pointer.
1423    ///
1424    /// # Examples
1425    ///
1426    /// ```
1427    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1428    /// use std::sync::atomic::Ordering::SeqCst;
1429    ///
1430    /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1431    /// let guard = &epoch::pin();
1432    /// let p = a.load(SeqCst, guard);
1433    /// assert_eq!(p.tag(), 2);
1434    /// ```
1435    #[allow(clippy::trivially_copy_pass_by_ref)]
1436    pub fn tag(&self) -> usize {
1437        let (_, tag) = decompose_tag::<T>(self.data);
1438        tag
1439    }
1440
1441    /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1442    /// unused bits of the pointer to `T`.
1443    ///
1444    /// # Examples
1445    ///
1446    /// ```
1447    /// use crossbeam_epoch::{self as epoch, Atomic};
1448    /// use std::sync::atomic::Ordering::SeqCst;
1449    ///
1450    /// let a = Atomic::new(0u64);
1451    /// let guard = &epoch::pin();
1452    /// let p1 = a.load(SeqCst, guard);
1453    /// let p2 = p1.with_tag(2);
1454    ///
1455    /// assert_eq!(p1.tag(), 0);
1456    /// assert_eq!(p2.tag(), 2);
1457    /// assert_eq!(p1.as_raw(), p2.as_raw());
1458    /// ```
1459    #[allow(clippy::trivially_copy_pass_by_ref)]
1460    pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1461        unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1462    }
1463}
1464
1465impl<T> From<*const T> for Shared<'_, T> {
1466    /// Returns a new pointer pointing to `raw`.
1467    ///
1468    /// # Panics
1469    ///
1470    /// Panics if `raw` is not properly aligned.
1471    ///
1472    /// # Examples
1473    ///
1474    /// ```
1475    /// use crossbeam_epoch::Shared;
1476    ///
1477    /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1478    /// assert!(!p.is_null());
1479    /// ```
1480    fn from(raw: *const T) -> Self {
1481        let raw = raw as usize;
1482        ensure_aligned::<T>(raw);
1483        unsafe { Self::from_usize(raw) }
1484    }
1485}
1486
1487impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1488    fn eq(&self, other: &Self) -> bool {
1489        self.data == other.data
1490    }
1491}
1492
1493impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
1494
1495impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1496    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1497        self.data.partial_cmp(&other.data)
1498    }
1499}
1500
1501impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
1502    fn cmp(&self, other: &Self) -> cmp::Ordering {
1503        self.data.cmp(&other.data)
1504    }
1505}
1506
1507impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
1508    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1509        let (raw, tag) = decompose_tag::<T>(self.data);
1510
1511        f.debug_struct("Shared")
1512            .field("raw", &raw)
1513            .field("tag", &tag)
1514            .finish()
1515    }
1516}
1517
1518impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
1519    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1520        fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
1521    }
1522}
1523
1524impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
1525    fn default() -> Self {
1526        Shared::null()
1527    }
1528}
1529
1530#[cfg(all(test, not(crossbeam_loom)))]
1531mod tests {
1532    use super::Shared;
1533
1534    #[test]
1535    fn valid_tag_i8() {
1536        Shared::<i8>::null().with_tag(0);
1537    }
1538
1539    #[test]
1540    fn valid_tag_i64() {
1541        Shared::<i64>::null().with_tag(7);
1542    }
1543
1544    #[cfg(feature = "nightly")]
1545    #[test]
1546    fn const_atomic_null() {
1547        use super::Atomic;
1548        const _: Atomic<u8> = Atomic::<u8>::null();
1549    }
1550}