netstack3_sync/
rc.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Synchronized reference counting primitives.
6//!
7//! This module introduces a family of reference counted types that allows
8//! marking the underlying data for destruction before all strongly references
9//! to the data are dropped. This enables the following features:
10//!   * Upgrading a weak reference to a strong reference succeeds iff at least
11//!     one strong reference exists _and_ the data has not been marked for
12//!     destruction.
13//!   * Allow waiting for all strongly-held references to be dropped after
14//!     marking the data.
15
16use core::hash::{Hash, Hasher};
17use core::ops::Deref;
18use core::panic::Location;
19use core::sync::atomic::{AtomicBool, Ordering};
20
21use derivative::Derivative;
22use netstack3_trace::TraceResourceId;
23
24mod caller {
25    //! Provides tracking of instances via tracked caller location.
26    //!
27    //! Callers are only tracked in debug builds. All operations and types
28    //! are no-ops and empty unless the `rc-debug-names` feature is enabled.
29
30    use core::panic::Location;
31
32    /// Records reference-counted names of instances.
33    #[derive(Default)]
34    pub(super) struct Callers {
35        /// The names that were inserted and aren't known to be gone.
36        ///
37        /// This holds weak references to allow callers to drop without
38        /// synchronizing. Invalid weak pointers are cleaned up periodically but
39        /// are not logically present.
40        ///
41        /// Note that using [`std::sync::Mutex`] here is intentional to opt this
42        /// out of loom checking, which makes testing with `rc-debug-names`
43        /// impossibly slow.
44        #[cfg(feature = "rc-debug-names")]
45        pub(super) callers: std::sync::Mutex<std::collections::HashMap<Location<'static>, usize>>,
46    }
47
48    impl core::fmt::Debug for Callers {
49        #[cfg(not(feature = "rc-debug-names"))]
50        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
51            write!(f, "(Not Tracked)")
52        }
53        #[cfg(feature = "rc-debug-names")]
54        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
55            let Self { callers } = self;
56            let callers = callers.lock().unwrap();
57            write!(f, "[\n")?;
58            for (l, c) in callers.iter() {
59                write!(f, "   {l} => {c},\n")?;
60            }
61            write!(f, "]")
62        }
63    }
64
65    impl Callers {
66        /// Creates a new [`Callers`] from the given [`Location`].
67        ///
68        /// On non-debug builds, this is a no-op.
69        pub(super) fn insert(&self, caller: &Location<'static>) -> TrackedCaller {
70            #[cfg(not(feature = "rc-debug-names"))]
71            {
72                let _ = caller;
73                TrackedCaller {}
74            }
75            #[cfg(feature = "rc-debug-names")]
76            {
77                let Self { callers } = self;
78                let mut callers = callers.lock().unwrap();
79                let count = callers.entry(caller.clone()).or_insert(0);
80                *count += 1;
81                TrackedCaller { location: caller.clone() }
82            }
83        }
84    }
85
86    #[derive(Debug)]
87    pub(super) struct TrackedCaller {
88        #[cfg(feature = "rc-debug-names")]
89        pub(super) location: Location<'static>,
90    }
91
92    impl TrackedCaller {
93        #[cfg(not(feature = "rc-debug-names"))]
94        pub(super) fn release(&mut self, Callers {}: &Callers) {
95            let Self {} = self;
96        }
97
98        #[cfg(feature = "rc-debug-names")]
99        pub(super) fn release(&mut self, Callers { callers }: &Callers) {
100            let Self { location } = self;
101            let mut callers = callers.lock().unwrap();
102            let mut entry = match callers.entry(location.clone()) {
103                std::collections::hash_map::Entry::Vacant(_) => {
104                    panic!("location {location:?} was not in the callers map")
105                }
106                std::collections::hash_map::Entry::Occupied(o) => o,
107            };
108
109            let sub = entry
110                .get()
111                .checked_sub(1)
112                .unwrap_or_else(|| panic!("zero-count location {location:?} in map"));
113            if sub == 0 {
114                let _: usize = entry.remove();
115            } else {
116                *entry.get_mut() = sub;
117            }
118        }
119    }
120}
121
122mod debug_id {
123    use core::sync::atomic::{AtomicU64, Ordering};
124    use netstack3_trace::TraceResourceId;
125
126    /// An opaque token to be used for debugging.
127    ///
128    /// The [`Debug`] implementation is guaranteed to produce a unique
129    /// representation for all instances of [`DebugToken`]. When paired with the
130    /// various RC types exposed in the parent module, this ensures that each
131    /// underlying value can be differentiated from one another. This is an
132    /// improvement over, say, using the underlying value's address, which may
133    /// be reused when the underlying value has been dropped.
134    #[derive(Clone)]
135    pub(super) struct DebugToken(u64);
136
137    impl core::fmt::Debug for DebugToken {
138        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
139            let DebugToken(inner) = self;
140            write!(f, "{}", inner)
141        }
142    }
143
144    impl Default for DebugToken {
145        fn default() -> Self {
146            static NEXT_TOKEN: AtomicU64 = AtomicU64::new(0);
147            // NB: Fetch add will cause the counter to rollback to 0 if we
148            // happen to exceed `u64::MAX` instantiations. In practice, that's
149            // an impossibility (at 1 billion instantiations per second, the
150            // counter is valid for > 500 years). Spare the CPU cycles and don't
151            // bother attempting to detect/handle overflow.
152            DebugToken(NEXT_TOKEN.fetch_add(1, Ordering::Relaxed))
153        }
154    }
155
156    impl DebugToken {
157        pub(super) fn trace_id(&self) -> TraceResourceId<'_> {
158            let Self(inner) = self;
159            TraceResourceId::new(*inner)
160        }
161    }
162
163    /// A debug identifier for the RC types exposed in the parent module.
164    ///
165    /// Encompasses the underlying pointer for the RC type, as well as
166    /// (optionally) the globally unique [`DebugToken`].
167    pub(super) enum DebugId<T> {
168        /// Used in contexts that have access to the [`DebugToken`], e.g.
169        /// [`Primary`], [`Strong`], and sometimes [`Weak`] RC types.
170        WithToken { ptr: *const T, token: DebugToken },
171        /// Used in contexts that don't have access to the [`DebugToken`], e.g.
172        /// [`Weak`] RC types that cannot be upgraded.
173        WithoutToken { ptr: *const T },
174    }
175
176    impl<T> core::fmt::Debug for DebugId<T> {
177        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
178            match self {
179                DebugId::WithToken { ptr, token } => write!(f, "{:?}:{:?}", token, ptr),
180                DebugId::WithoutToken { ptr } => write!(f, "?:{:?}", ptr),
181            }
182        }
183    }
184}
185
186#[derive(Derivative)]
187#[derivative(Debug)]
188struct Inner<T> {
189    marked_for_destruction: AtomicBool,
190    callers: caller::Callers,
191    data: core::mem::ManuallyDrop<T>,
192    // NB: Notifier could be an atomic pointer or atomic box but this mutex is
193    // never contended and we don't have to import new code into the repository
194    // (i.e. atomicbox) or write unsafe code.
195    #[derivative(Debug = "ignore")]
196    notifier: crate::Mutex<Option<Box<dyn Notifier<T>>>>,
197    debug_token: debug_id::DebugToken,
198}
199
200impl<T> Inner<T> {
201    fn pre_drop_check(marked_for_destruction: &AtomicBool) {
202        // `Ordering::Acquire` because we want to synchronize with with the
203        // `Ordering::Release` write to `marked_for_destruction` so that all
204        // memory writes before the reference was marked for destruction is
205        // visible here.
206        assert!(marked_for_destruction.load(Ordering::Acquire), "Must be marked for destruction");
207    }
208
209    fn unwrap(mut self) -> T {
210        // We cannot destructure `self` by value since `Inner` implements
211        // `Drop`. So we must manually drop all the fields but data and then
212        // forget self.
213        let Inner { marked_for_destruction, data, callers: holders, notifier, debug_token } =
214            &mut self;
215
216        // Make sure that `inner` is in a valid state for destruction.
217        //
218        // Note that we do not actually destroy all of `self` here; we decompose
219        // it into its parts, keeping what we need & throwing away what we
220        // don't. Regardless, we perform the same checks.
221        Inner::<T>::pre_drop_check(marked_for_destruction);
222
223        // SAFETY: Safe since we own `self` and `self` is immediately forgotten
224        // below so the its destructor (and those of its fields) will not be run
225        // as a result of `self` being dropped.
226        let data = unsafe {
227            // Explicitly drop since we do not need these anymore.
228            core::ptr::drop_in_place(marked_for_destruction);
229            core::ptr::drop_in_place(holders);
230            core::ptr::drop_in_place(notifier);
231            core::ptr::drop_in_place(debug_token);
232
233            core::mem::ManuallyDrop::take(data)
234        };
235        // Forget self now to prevent its `Drop::drop` impl from being run which
236        // will attempt to destroy `data` but still perform pre-drop checks on
237        // `Inner`'s state.
238        core::mem::forget(self);
239
240        data
241    }
242
243    /// Sets the notifier for this `Inner`.
244    ///
245    /// Panics if notifier is already set.
246    fn set_notifier<N: Notifier<T> + 'static>(&self, notifier: N) {
247        let Self { notifier: slot, .. } = self;
248
249        // Using dynamic dispatch to notify allows us to not have to know the
250        // notifier that will be used from creation and spread the type on all
251        // reference types in this crate. The assumption is that the allocation
252        // and dynamic dispatch costs here are tiny compared to the overall work
253        // of destroying the resources this module is targeting.
254        let boxed: Box<dyn Notifier<T>> = Box::new(notifier);
255        let prev_notifier = { slot.lock().replace(boxed) };
256        // Uphold invariant that this can only be done from Primary.
257        assert!(prev_notifier.is_none(), "can't have a notifier already installed");
258    }
259}
260
261impl<T> Drop for Inner<T> {
262    fn drop(&mut self) {
263        let Inner { marked_for_destruction, data, callers: _, notifier, debug_token: _ } = self;
264        // Take data out of ManuallyDrop in case we panic in pre_drop_check.
265        // That'll ensure data is dropped if we hit the panic.
266        //
267        //  SAFETY: Safe because ManuallyDrop is not referenced again after
268        // taking.
269        let data = unsafe { core::mem::ManuallyDrop::take(data) };
270        Self::pre_drop_check(marked_for_destruction);
271        if let Some(mut notifier) = notifier.lock().take() {
272            notifier.notify(data);
273        }
274    }
275}
276
277/// A primary reference.
278///
279/// Note that only one `Primary` may be associated with data. This is
280/// enforced by not implementing [`Clone`].
281///
282/// For now, this reference is no different than a [`Strong`] but later changes
283/// will enable blocking the destruction of a primary reference until all
284/// strongly held references are dropped.
285#[derive(Debug)]
286pub struct Primary<T> {
287    inner: core::mem::ManuallyDrop<alloc::sync::Arc<Inner<T>>>,
288}
289
290impl<T> Drop for Primary<T> {
291    fn drop(&mut self) {
292        let was_marked = self.mark_for_destruction();
293        let Self { inner } = self;
294        // Take the inner out of ManuallyDrop early so its Drop impl will run in
295        // case we panic here.
296        // SAFETY: Safe because we don't reference ManuallyDrop again.
297        let inner = unsafe { core::mem::ManuallyDrop::take(inner) };
298
299        // Make debugging easier: don't panic if a panic is already happening
300        // since double-panics are annoying to debug. This means that the
301        // invariants provided by Primary are possibly violated during an
302        // unwind, but we're sidestepping that problem because Fuchsia is our
303        // only audience here.
304        if !std::thread::panicking() {
305            assert_eq!(was_marked, false, "Must not be marked for destruction yet");
306
307            let Inner { marked_for_destruction: _, callers, data: _, notifier: _, debug_token: _ } =
308                &*inner;
309
310            // Make sure that this `Primary` is the last thing to hold a strong
311            // reference to the underlying data when it is being dropped.
312            let refs = alloc::sync::Arc::strong_count(&inner).checked_sub(1).unwrap();
313            assert!(
314                refs == 0,
315                "dropped Primary with {refs} strong refs remaining, \
316                            Callers={callers:?}"
317            );
318        }
319    }
320}
321
322impl<T> AsRef<T> for Primary<T> {
323    fn as_ref(&self) -> &T {
324        self.deref()
325    }
326}
327
328impl<T> Deref for Primary<T> {
329    type Target = T;
330
331    fn deref(&self) -> &T {
332        let Self { inner } = self;
333        let Inner { marked_for_destruction: _, data, callers: _, notifier: _, debug_token: _ } =
334            &***inner;
335        data
336    }
337}
338
339impl<T> Primary<T> {
340    // Marks this primary reference as ready for destruction. Used by all
341    // dropping flows. We take &mut self here to ensure we have the only
342    // possible reference to Primary. Returns whether it was already marked for
343    // destruction.
344    fn mark_for_destruction(&mut self) -> bool {
345        let Self { inner } = self;
346        // `Ordering::Release` because want to make sure that all memory writes
347        // before dropping this `Primary` synchronizes with later attempts to
348        // upgrade weak pointers and the `Drop::drop` impl of `Inner`.
349        inner.marked_for_destruction.swap(true, Ordering::Release)
350    }
351
352    /// Returns a new strongly-held reference.
353    pub fn new(data: T) -> Primary<T> {
354        Primary {
355            inner: core::mem::ManuallyDrop::new(alloc::sync::Arc::new(Inner {
356                marked_for_destruction: AtomicBool::new(false),
357                callers: caller::Callers::default(),
358                data: core::mem::ManuallyDrop::new(data),
359                notifier: crate::Mutex::new(None),
360                debug_token: debug_id::DebugToken::default(),
361            })),
362        }
363    }
364
365    /// Constructs a new `Primary<T>` while giving you a Weak<T> to the
366    /// allocation, to allow you to construct a `T` which holds a weak pointer
367    /// to itself.
368    ///
369    /// Like for [`Arc::new_cyclic`], the `Weak` reference provided to `data_fn`
370    /// cannot be upgraded until the [`Primary`] is constructed.
371    pub fn new_cyclic(data_fn: impl FnOnce(Weak<T>) -> T) -> Primary<T> {
372        Primary {
373            inner: core::mem::ManuallyDrop::new(alloc::sync::Arc::new_cyclic(move |weak| Inner {
374                marked_for_destruction: AtomicBool::new(false),
375                callers: caller::Callers::default(),
376                data: core::mem::ManuallyDrop::new(data_fn(Weak(weak.clone()))),
377                notifier: crate::Mutex::new(None),
378                debug_token: debug_id::DebugToken::default(),
379            })),
380        }
381    }
382
383    /// Clones a strongly-held reference.
384    #[cfg_attr(feature = "rc-debug-names", track_caller)]
385    pub fn clone_strong(Self { inner }: &Self) -> Strong<T> {
386        let Inner { data: _, callers, marked_for_destruction: _, notifier: _, debug_token: _ } =
387            &***inner;
388        let caller = callers.insert(Location::caller());
389        Strong { inner: alloc::sync::Arc::clone(inner), caller }
390    }
391
392    /// Returns a weak reference pointing to the same underlying data.
393    pub fn downgrade(Self { inner }: &Self) -> Weak<T> {
394        Weak(alloc::sync::Arc::downgrade(inner))
395    }
396
397    /// Returns true if the two pointers point to the same allocation.
398    pub fn ptr_eq(
399        Self { inner: this }: &Self,
400        Strong { inner: other, caller: _ }: &Strong<T>,
401    ) -> bool {
402        alloc::sync::Arc::ptr_eq(this, other)
403    }
404
405    /// Returns [`core::fmt::Debug`] implementation that is stable and unique
406    /// for the data held behind this [`Primary`].
407    pub fn debug_id(&self) -> impl core::fmt::Debug + '_ {
408        let Self { inner } = self;
409        debug_id::DebugId::WithToken {
410            ptr: alloc::sync::Arc::as_ptr(inner),
411            token: inner.debug_token.clone(),
412        }
413    }
414
415    fn mark_for_destruction_and_take_inner(mut this: Self) -> alloc::sync::Arc<Inner<T>> {
416        // Prepare for destruction.
417        assert!(!this.mark_for_destruction());
418        let Self { inner } = &mut this;
419        // SAFETY: Safe because inner can't be used after this. We forget
420        // our Primary reference to prevent its Drop impl from running.
421        let inner = unsafe { core::mem::ManuallyDrop::take(inner) };
422        core::mem::forget(this);
423        inner
424    }
425
426    fn try_unwrap(this: Self) -> Result<T, alloc::sync::Arc<Inner<T>>> {
427        let inner = Self::mark_for_destruction_and_take_inner(this);
428        alloc::sync::Arc::try_unwrap(inner).map(Inner::unwrap)
429    }
430
431    /// Returns the inner value if no [`Strong`] references are held.
432    ///
433    /// # Panics
434    ///
435    /// Panics if [`Strong`] references are held when this function is called.
436    pub fn unwrap(this: Self) -> T {
437        Self::try_unwrap(this).unwrap_or_else(|inner| {
438            let callers = &inner.callers;
439            let refs = alloc::sync::Arc::strong_count(&inner).checked_sub(1).unwrap();
440            panic!("can't unwrap, still had {refs} strong refs: {callers:?}");
441        })
442    }
443
444    /// Marks this [`Primary`] for destruction and uses `notifier` as a signaler
445    /// for when destruction of all strong references is terminated. After
446    /// calling `unwrap_with_notifier` [`Weak`] references can no longer be
447    /// upgraded.
448    pub fn unwrap_with_notifier<N: Notifier<T> + 'static>(this: Self, notifier: N) {
449        let inner = Self::mark_for_destruction_and_take_inner(this);
450        inner.set_notifier(notifier);
451        // Now we can drop our inner reference, if we were the last this will
452        // trigger the notifier.
453        core::mem::drop(inner);
454    }
455
456    /// Marks this [`Primary`] for destruction and returns `Ok` if this was the
457    /// last strong reference standing for it. Otherwise `new_notifier` is
458    /// called to create a new notifier to observe deferred destruction.
459    ///
460    /// Like [`Primary::unwrap_with_notifier`], [`Weak`] references can no
461    /// longer be upgraded after calling `unwrap_or_notify_with`.
462    pub fn unwrap_or_notify_with<N: Notifier<T> + 'static, O, F: FnOnce() -> (N, O)>(
463        this: Self,
464        new_notifier: F,
465    ) -> Result<T, O> {
466        Self::try_unwrap(this).map_err(move |inner| {
467            let (notifier, output) = new_notifier();
468            inner.set_notifier(notifier);
469            output
470        })
471    }
472
473    /// Creates a [`DebugReferences`] instance.
474    pub fn debug_references(this: &Self) -> DebugReferences<T> {
475        let Self { inner } = this;
476        DebugReferences(alloc::sync::Arc::downgrade(&*inner))
477    }
478}
479
480/// A strongly-held reference.
481///
482/// Similar to an [`alloc::sync::Arc`] but holding a `Strong` acts as a witness
483/// to the live-ness of the underlying data. That is, holding a `Strong` implies
484/// that the underlying data has not yet been destroyed.
485///
486/// Note that `Strong`'s implementation of [`Hash`] and [`PartialEq`] operate on
487/// the pointer itself and not the underlying data.
488#[derive(Debug, Derivative)]
489pub struct Strong<T> {
490    inner: alloc::sync::Arc<Inner<T>>,
491    caller: caller::TrackedCaller,
492}
493
494impl<T> Drop for Strong<T> {
495    fn drop(&mut self) {
496        let Self { inner, caller } = self;
497        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, debug_token: _ } =
498            &**inner;
499        caller.release(callers);
500    }
501}
502
503impl<T> AsRef<T> for Strong<T> {
504    fn as_ref(&self) -> &T {
505        self.deref()
506    }
507}
508
509impl<T> Deref for Strong<T> {
510    type Target = T;
511
512    fn deref(&self) -> &T {
513        let Self { inner, caller: _ } = self;
514        let Inner { marked_for_destruction: _, data, callers: _, notifier: _, debug_token: _ } =
515            inner.deref();
516        data
517    }
518}
519
520impl<T> core::cmp::Eq for Strong<T> {}
521
522impl<T> core::cmp::PartialEq for Strong<T> {
523    fn eq(&self, other: &Self) -> bool {
524        Self::ptr_eq(self, other)
525    }
526}
527
528impl<T> Hash for Strong<T> {
529    fn hash<H: Hasher>(&self, state: &mut H) {
530        let Self { inner, caller: _ } = self;
531        alloc::sync::Arc::as_ptr(inner).hash(state)
532    }
533}
534
535impl<T> Clone for Strong<T> {
536    #[cfg_attr(feature = "rc-debug-names", track_caller)]
537    fn clone(&self) -> Self {
538        let Self { inner, caller: _ } = self;
539        let Inner { data: _, marked_for_destruction: _, callers, notifier: _, debug_token: _ } =
540            &**inner;
541        let caller = callers.insert(Location::caller());
542        Self { inner: alloc::sync::Arc::clone(inner), caller }
543    }
544}
545
546impl<T> Strong<T> {
547    /// Returns a weak reference pointing to the same underlying data.
548    pub fn downgrade(Self { inner, caller: _ }: &Self) -> Weak<T> {
549        Weak(alloc::sync::Arc::downgrade(inner))
550    }
551
552    /// Returns [`core::fmt::Debug`] implementation that is stable and unique
553    /// for the data held behind this [`Strong`].
554    pub fn debug_id(&self) -> impl core::fmt::Debug + '_ {
555        let Self { inner, caller: _ } = self;
556        debug_id::DebugId::WithToken {
557            ptr: alloc::sync::Arc::as_ptr(inner),
558            token: inner.debug_token.clone(),
559        }
560    }
561
562    /// Returns a [`TraceResourceId`] that can be used to identify this
563    /// reference in tracing events.
564    pub fn trace_id(&self) -> TraceResourceId<'_> {
565        self.inner.debug_token.trace_id()
566    }
567
568    /// Returns true if the inner value has since been marked for destruction.
569    pub fn marked_for_destruction(Self { inner, caller: _ }: &Self) -> bool {
570        let Inner { marked_for_destruction, data: _, callers: _, notifier: _, debug_token: _ } =
571            inner.as_ref();
572        // `Ordering::Acquire` because we want to synchronize with with the
573        // `Ordering::Release` write to `marked_for_destruction` so that all
574        // memory writes before the reference was marked for destruction is
575        // visible here.
576        marked_for_destruction.load(Ordering::Acquire)
577    }
578
579    /// Returns true if the two pointers point to the same allocation.
580    pub fn weak_ptr_eq(Self { inner: this, caller: _ }: &Self, Weak(other): &Weak<T>) -> bool {
581        core::ptr::eq(alloc::sync::Arc::as_ptr(this), other.as_ptr())
582    }
583
584    /// Returns true if the two pointers point to the same allocation.
585    pub fn ptr_eq(
586        Self { inner: this, caller: _ }: &Self,
587        Self { inner: other, caller: _ }: &Self,
588    ) -> bool {
589        alloc::sync::Arc::ptr_eq(this, other)
590    }
591
592    /// Compares the two pointers.
593    pub fn ptr_cmp(
594        Self { inner: this, caller: _ }: &Self,
595        Self { inner: other, caller: _ }: &Self,
596    ) -> core::cmp::Ordering {
597        let this = alloc::sync::Arc::as_ptr(this);
598        let other = alloc::sync::Arc::as_ptr(other);
599        this.cmp(&other)
600    }
601
602    /// Creates a [`DebugReferences`] instance.
603    pub fn debug_references(this: &Self) -> DebugReferences<T> {
604        let Self { inner, caller: _ } = this;
605        DebugReferences(alloc::sync::Arc::downgrade(inner))
606    }
607}
608
609/// A weakly-held reference.
610///
611/// Similar to an [`alloc::sync::Weak`].
612///
613/// A `Weak` does not make any claim to the live-ness of the underlying data.
614/// Holders of a `Weak` must attempt to upgrade to a [`Strong`] through
615/// [`Weak::upgrade`] to access the underlying data.
616///
617/// Note that `Weak`'s implementation of [`Hash`] and [`PartialEq`] operate on
618/// the pointer itself and not the underlying data.
619#[derive(Debug)]
620pub struct Weak<T>(alloc::sync::Weak<Inner<T>>);
621
622impl<T> core::cmp::Eq for Weak<T> {}
623
624impl<T> core::cmp::PartialEq for Weak<T> {
625    fn eq(&self, other: &Self) -> bool {
626        Self::ptr_eq(self, other)
627    }
628}
629
630impl<T> Hash for Weak<T> {
631    fn hash<H: Hasher>(&self, state: &mut H) {
632        let Self(this) = self;
633        this.as_ptr().hash(state)
634    }
635}
636
637impl<T> Clone for Weak<T> {
638    fn clone(&self) -> Self {
639        let Self(this) = self;
640        Weak(this.clone())
641    }
642}
643
644impl<T> Weak<T> {
645    /// Returns true if the two pointers point to the same allocation.
646    pub fn ptr_eq(&self, Self(other): &Self) -> bool {
647        let Self(this) = self;
648        this.ptr_eq(other)
649    }
650
651    /// Returns [`core::fmt::Debug`] implementation that is stable and unique
652    /// for the data held behind this [`Weak`].
653    pub fn debug_id(&self) -> impl core::fmt::Debug + '_ {
654        match self.upgrade() {
655            Some(strong) => {
656                let Strong { inner, caller: _ } = &strong;
657                debug_id::DebugId::WithToken {
658                    ptr: alloc::sync::Arc::as_ptr(&inner),
659                    token: inner.debug_token.clone(),
660                }
661            }
662            None => {
663                let Self(this) = self;
664                // NB: If we can't upgrade the socket, we can't know the token.
665                debug_id::DebugId::WithoutToken { ptr: this.as_ptr() }
666            }
667        }
668    }
669
670    /// Attempts to upgrade to a [`Strong`].
671    ///
672    /// Returns `None` if the inner value has since been marked for destruction.
673    #[cfg_attr(feature = "rc-debug-names", track_caller)]
674    pub fn upgrade(&self) -> Option<Strong<T>> {
675        let Self(weak) = self;
676        let arc = weak.upgrade()?;
677        let Inner { marked_for_destruction, data: _, callers, notifier: _, debug_token: _ } =
678            arc.deref();
679
680        // `Ordering::Acquire` because we want to synchronize with with the
681        // `Ordering::Release` write to `marked_for_destruction` so that all
682        // memory writes before the reference was marked for destruction is
683        // visible here.
684        if !marked_for_destruction.load(Ordering::Acquire) {
685            let caller = callers.insert(Location::caller());
686            Some(Strong { inner: arc, caller })
687        } else {
688            None
689        }
690    }
691
692    /// Gets the number of [`Primary`] and [`Strong`] references to this allocation.
693    pub fn strong_count(&self) -> usize {
694        let Self(weak) = self;
695        weak.strong_count()
696    }
697
698    /// Creates a [`DebugReferences`] instance.
699    pub fn debug_references(&self) -> DebugReferences<T> {
700        let Self(inner) = self;
701        DebugReferences(inner.clone())
702    }
703}
704
705fn debug_refs(
706    refs: Option<(usize, &AtomicBool, &caller::Callers)>,
707    name: &'static str,
708    f: &mut core::fmt::Formatter<'_>,
709) -> core::fmt::Result {
710    let mut f = f.debug_struct(name);
711    match refs {
712        Some((strong_count, marked_for_destruction, callers)) => f
713            .field("strong_count", &strong_count)
714            .field("marked_for_destruction", marked_for_destruction)
715            .field("callers", callers)
716            .finish(),
717        None => {
718            let strong_count = 0_usize;
719            f.field("strong_count", &strong_count).finish_non_exhaustive()
720        }
721    }
722}
723
724/// Provides a [`Debug`] implementation that contains information helpful for
725/// debugging dangling references.
726#[derive(Clone)]
727pub struct DebugReferences<T>(alloc::sync::Weak<Inner<T>>);
728
729impl<T> core::fmt::Debug for DebugReferences<T> {
730    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
731        let Self(inner) = self;
732        let inner = inner.upgrade();
733        let refs = inner.as_ref().map(|inner| {
734            (alloc::sync::Arc::strong_count(inner), &inner.marked_for_destruction, &inner.callers)
735        });
736        debug_refs(refs, "DebugReferences", f)
737    }
738}
739
740impl<T: Send + Sync + 'static> DebugReferences<T> {
741    /// Transforms this `DebugReferences` into a [`DynDebugReferences`].
742    pub fn into_dyn(self) -> DynDebugReferences {
743        let Self(w) = self;
744        DynDebugReferences(w)
745    }
746}
747
748/// Like [`DebugReferences`], but type-erases the contained type.
749#[derive(Clone)]
750pub struct DynDebugReferences(alloc::sync::Weak<dyn ExposeRefs>);
751
752impl core::fmt::Debug for DynDebugReferences {
753    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
754        let Self(inner) = self;
755        let inner = inner.upgrade();
756        let refs = inner.as_ref().map(|inner| {
757            let (marked_for_destruction, callers) = inner.refs_info();
758            (alloc::sync::Arc::strong_count(inner), marked_for_destruction, callers)
759        });
760        debug_refs(refs, "DynDebugReferences", f)
761    }
762}
763
764/// A trait allowing [`DynDebugReferences`] to erase the `T` type on [`Inner`].
765trait ExposeRefs: Send + Sync + 'static {
766    fn refs_info(&self) -> (&AtomicBool, &caller::Callers);
767}
768
769impl<T: Send + Sync + 'static> ExposeRefs for Inner<T> {
770    fn refs_info(&self) -> (&AtomicBool, &caller::Callers) {
771        (&self.marked_for_destruction, &self.callers)
772    }
773}
774
775/// Provides delegated notification of all strong references of a [`Primary`]
776/// being dropped.
777///
778/// See [`Primary::unwrap_with_notifier`].
779pub trait Notifier<T>: Send {
780    /// Called when the data contained in the [`Primary`] reference can be
781    /// extracted out because there are no more strong references to it.
782    fn notify(&mut self, data: T);
783}
784
785/// An implementation of [`Notifier`] that stores the unwrapped data in a
786/// `Clone` type.
787///
788/// Useful for tests where completion assertions are possible and useful.
789#[derive(Debug, Derivative)]
790#[derivative(Clone(bound = ""))]
791pub struct ArcNotifier<T>(alloc::sync::Arc<crate::Mutex<Option<T>>>);
792
793impl<T> ArcNotifier<T> {
794    /// Creates a new `ArcNotifier`.
795    pub fn new() -> Self {
796        Self(alloc::sync::Arc::new(crate::Mutex::new(None)))
797    }
798
799    /// Takes the notified value, if any.
800    pub fn take(&self) -> Option<T> {
801        let Self(inner) = self;
802        inner.lock().take()
803    }
804}
805
806impl<T: Send> Notifier<T> for ArcNotifier<T> {
807    fn notify(&mut self, data: T) {
808        let Self(inner) = self;
809        assert!(inner.lock().replace(data).is_none(), "notified twice");
810    }
811}
812
813/// An implementation of [`Notifier`] that wraps another `Notifier` and applies
814/// a function on notified objects.
815pub struct MapNotifier<N, F> {
816    inner: N,
817    map: Option<F>,
818}
819
820impl<N, F> MapNotifier<N, F> {
821    /// Creates a new [`MapNotifier`] that wraps `notifier` with a mapping
822    /// function `F`.
823    pub fn new(notifier: N, map: F) -> Self {
824        Self { inner: notifier, map: Some(map) }
825    }
826}
827
828impl<A, B, N: Notifier<B>, F: FnOnce(A) -> B> Notifier<A> for MapNotifier<N, F>
829where
830    Self: Send,
831{
832    fn notify(&mut self, data: A) {
833        let Self { inner, map } = self;
834        let map = map.take().expect("notified twice");
835        inner.notify(map(data))
836    }
837}
838
839/// A handy implementation for the common Infallible "Never" type.
840impl<T> Notifier<T> for core::convert::Infallible {
841    fn notify(&mut self, _data: T) {
842        match *self {}
843    }
844}
845
846#[cfg(test)]
847mod tests {
848    use super::*;
849
850    #[test]
851    fn zombie_weak() {
852        let primary = Primary::new(());
853        let weak = {
854            let strong = Primary::clone_strong(&primary);
855            Strong::downgrade(&strong)
856        };
857        core::mem::drop(primary);
858
859        assert!(weak.upgrade().is_none());
860    }
861
862    #[test]
863    fn rcs() {
864        const INITIAL_VAL: u8 = 1;
865        const NEW_VAL: u8 = 2;
866
867        let primary = Primary::new(crate::sync::Mutex::new(INITIAL_VAL));
868        let strong = Primary::clone_strong(&primary);
869        let weak = Strong::downgrade(&strong);
870
871        *primary.lock().unwrap() = NEW_VAL;
872        assert_eq!(*primary.deref().lock().unwrap(), NEW_VAL);
873        assert_eq!(*strong.deref().lock().unwrap(), NEW_VAL);
874        assert_eq!(*weak.upgrade().unwrap().deref().lock().unwrap(), NEW_VAL);
875    }
876
877    #[test]
878    fn unwrap_primary_without_strong_held() {
879        const VAL: u16 = 6;
880        let primary = Primary::new(VAL);
881        assert_eq!(Primary::unwrap(primary), VAL);
882    }
883
884    #[test]
885    #[should_panic(expected = "can't unwrap, still had 1 strong refs")]
886    fn unwrap_primary_with_strong_held() {
887        let primary = Primary::new(8);
888        let _strong: Strong<_> = Primary::clone_strong(&primary);
889        let _: u16 = Primary::unwrap(primary);
890    }
891
892    #[test]
893    #[should_panic(expected = "dropped Primary with 1 strong refs remaining")]
894    fn drop_primary_with_strong_held() {
895        let primary = Primary::new(9);
896        let _strong: Strong<_> = Primary::clone_strong(&primary);
897        core::mem::drop(primary);
898    }
899
900    // This test trips LSAN on Fuchsia for some unknown reason. The host-side
901    // test should be enough to protect us against regressing on the panicking
902    // check.
903    #[cfg(not(target_os = "fuchsia"))]
904    #[test]
905    #[should_panic(expected = "oopsie")]
906    fn double_panic_protect() {
907        let primary = Primary::new(9);
908        let strong = Primary::clone_strong(&primary);
909        // This will cause primary to be dropped before strong and would yield a
910        // double panic if we didn't protect against it in Primary's Drop impl.
911        let _tuple_to_invert_drop_order = (primary, strong);
912        panic!("oopsie");
913    }
914
915    #[cfg(feature = "rc-debug-names")]
916    #[test]
917    fn tracked_callers() {
918        let primary = Primary::new(10);
919        // Mark this position so we ensure all track_caller marks are correct in
920        // the methods that support it.
921        let here = Location::caller();
922        let strong1 = Primary::clone_strong(&primary);
923        let strong2 = strong1.clone();
924        let weak = Strong::downgrade(&strong2);
925        let strong3 = weak.upgrade().unwrap();
926
927        let Primary { inner } = &primary;
928        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, debug_token: _ } =
929            &***inner;
930
931        let strongs = [strong1, strong2, strong3];
932        let _: &Location<'_> = strongs.iter().enumerate().fold(here, |prev, (i, cur)| {
933            let Strong { inner: _, caller: caller::TrackedCaller { location: cur } } = cur;
934            assert_eq!(prev.file(), cur.file(), "{i}");
935            assert!(prev.line() < cur.line(), "{prev} < {cur}, {i}");
936            {
937                let callers = callers.callers.lock().unwrap();
938                assert_eq!(callers.get(cur).copied(), Some(1));
939            }
940
941            cur
942        });
943
944        // All callers must be removed from the callers map on drop.
945        std::mem::drop(strongs);
946        {
947            let callers = callers.callers.lock().unwrap();
948            let callers = callers.deref();
949            assert!(callers.is_empty(), "{callers:?}");
950        }
951    }
952    #[cfg(feature = "rc-debug-names")]
953    #[test]
954    fn same_location_caller_tracking() {
955        fn clone_in_fn<T>(p: &Primary<T>) -> Strong<T> {
956            Primary::clone_strong(p)
957        }
958
959        let primary = Primary::new(10);
960        let strong1 = clone_in_fn(&primary);
961        let strong2 = clone_in_fn(&primary);
962        assert_eq!(strong1.caller.location, strong2.caller.location);
963
964        let Primary { inner } = &primary;
965        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, debug_token: _ } =
966            &***inner;
967
968        {
969            let callers = callers.callers.lock().unwrap();
970            assert_eq!(callers.get(&strong1.caller.location).copied(), Some(2));
971        }
972
973        std::mem::drop(strong1);
974        std::mem::drop(strong2);
975
976        {
977            let callers = callers.callers.lock().unwrap();
978            let callers = callers.deref();
979            assert!(callers.is_empty(), "{callers:?}");
980        }
981    }
982
983    #[cfg(feature = "rc-debug-names")]
984    #[test]
985    #[should_panic(expected = "core/sync/src/rc.rs")]
986    fn callers_in_panic() {
987        let primary = Primary::new(10);
988        let _strong = Primary::clone_strong(&primary);
989        drop(primary);
990    }
991
992    #[test]
993    fn unwrap_with_notifier() {
994        let primary = Primary::new(10);
995        let strong = Primary::clone_strong(&primary);
996        let notifier = ArcNotifier::new();
997        Primary::unwrap_with_notifier(primary, notifier.clone());
998        // Strong reference is still alive.
999        assert_eq!(notifier.take(), None);
1000        core::mem::drop(strong);
1001        assert_eq!(notifier.take(), Some(10));
1002    }
1003
1004    #[test]
1005    fn unwrap_or_notify_with_immediate() {
1006        let primary = Primary::new(10);
1007        let result = Primary::unwrap_or_notify_with::<ArcNotifier<_>, (), _>(primary, || {
1008            panic!("should not try to create notifier")
1009        });
1010        assert_eq!(result, Ok(10));
1011    }
1012
1013    #[test]
1014    fn unwrap_or_notify_with_deferred() {
1015        let primary = Primary::new(10);
1016        let strong = Primary::clone_strong(&primary);
1017        let result = Primary::unwrap_or_notify_with(primary, || {
1018            let notifier = ArcNotifier::new();
1019            (notifier.clone(), notifier)
1020        });
1021        let notifier = result.unwrap_err();
1022        assert_eq!(notifier.take(), None);
1023        core::mem::drop(strong);
1024        assert_eq!(notifier.take(), Some(10));
1025    }
1026
1027    #[test]
1028    fn map_notifier() {
1029        let primary = Primary::new(10);
1030        let notifier = ArcNotifier::new();
1031        let map_notifier = MapNotifier::new(notifier.clone(), |data| (data, data + 1));
1032        Primary::unwrap_with_notifier(primary, map_notifier);
1033        assert_eq!(notifier.take(), Some((10, 11)));
1034    }
1035
1036    #[test]
1037    fn new_cyclic() {
1038        #[derive(Debug)]
1039        struct Data {
1040            value: i32,
1041            weak: Weak<Data>,
1042        }
1043
1044        let primary = Primary::new_cyclic(|weak| Data { value: 2, weak });
1045        assert_eq!(primary.value, 2);
1046        let strong = primary.weak.upgrade().unwrap();
1047        assert_eq!(strong.value, 2);
1048        assert!(Primary::ptr_eq(&primary, &strong));
1049    }
1050
1051    macro_rules! assert_debug_id_eq {
1052        ($id1:expr, $id2:expr) => {
1053            assert_eq!(alloc::format!("{:?}", $id1), alloc::format!("{:?}", $id2))
1054        };
1055    }
1056    macro_rules! assert_debug_id_ne {
1057        ($id1:expr, $id2:expr) => {
1058            assert_ne!(alloc::format!("{:?}", $id1), alloc::format!("{:?}", $id2))
1059        };
1060    }
1061
1062    #[test]
1063    fn debug_ids_are_stable() {
1064        // Verify that transforming a given RC doesn't change it's debug_id.
1065        let primary = Primary::new(1);
1066        let strong = Primary::clone_strong(&primary);
1067        let weak_p = Primary::downgrade(&primary);
1068        let weak_s = Strong::downgrade(&strong);
1069        let weak_c = weak_p.clone();
1070        assert_debug_id_eq!(&primary.debug_id(), &strong.debug_id());
1071        assert_debug_id_eq!(&primary.debug_id(), &weak_p.debug_id());
1072        assert_debug_id_eq!(&primary.debug_id(), &weak_s.debug_id());
1073        assert_debug_id_eq!(&primary.debug_id(), &weak_c.debug_id());
1074    }
1075
1076    #[test]
1077    fn debug_ids_are_unique() {
1078        // Verify that RCs to different data have different debug_ids.
1079        let primary1 = Primary::new(1);
1080        let primary2 = Primary::new(1);
1081        assert_debug_id_ne!(&primary1.debug_id(), &primary2.debug_id());
1082
1083        // Verify that dropping an RC does not allow it's debug_id to be reused.
1084        let id1 = format!("{:?}", primary1.debug_id());
1085        std::mem::drop(primary1);
1086        let primary3 = Primary::new(1);
1087        assert_ne!(id1, format!("{:?}", primary3.debug_id()));
1088    }
1089}