netstack3_sync/
rc.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Synchronized reference counting primitives.
6//!
7//! This module introduces a family of reference counted types that allows
8//! marking the underlying data for destruction before all strongly references
9//! to the data are dropped. This enables the following features:
10//!   * Upgrading a weak reference to a strong reference succeeds iff at least
11//!     one strong reference exists _and_ the data has not been marked for
12//!     destruction.
13//!   * Allow waiting for all strongly-held references to be dropped after
14//!     marking the data.
15
16use core::fmt::Debug;
17use core::hash::{Hash, Hasher};
18use core::ops::Deref;
19use core::panic::Location;
20use core::sync::atomic::{AtomicBool, Ordering};
21
22use derivative::Derivative;
23
24mod caller {
25    //! Provides tracking of instances via tracked caller location.
26    //!
27    //! Callers are only tracked in debug builds. All operations and types
28    //! are no-ops and empty unless the `rc-debug-names` feature is enabled.
29
30    use core::fmt::Debug;
31    use core::panic::Location;
32
33    /// Records reference-counted names of instances.
34    #[derive(Default)]
35    pub(super) struct Callers {
36        /// The names that were inserted and aren't known to be gone.
37        ///
38        /// This holds weak references to allow callers to drop without
39        /// synchronizing. Invalid weak pointers are cleaned up periodically but
40        /// are not logically present.
41        ///
42        /// Note that using [`std::sync::Mutex`] here is intentional to opt this
43        /// out of loom checking, which makes testing with `rc-debug-names`
44        /// impossibly slow.
45        #[cfg(feature = "rc-debug-names")]
46        pub(super) callers: std::sync::Mutex<std::collections::HashMap<Location<'static>, usize>>,
47    }
48
49    impl Debug for Callers {
50        #[cfg(not(feature = "rc-debug-names"))]
51        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
52            write!(f, "(Not Tracked)")
53        }
54        #[cfg(feature = "rc-debug-names")]
55        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
56            let Self { callers } = self;
57            let callers = callers.lock().unwrap();
58            write!(f, "[\n")?;
59            for (l, c) in callers.iter() {
60                write!(f, "   {l} => {c},\n")?;
61            }
62            write!(f, "]")
63        }
64    }
65
66    impl Callers {
67        /// Creates a new [`Callers`] from the given [`Location`].
68        ///
69        /// On non-debug builds, this is a no-op.
70        pub(super) fn insert(&self, caller: &Location<'static>) -> TrackedCaller {
71            #[cfg(not(feature = "rc-debug-names"))]
72            {
73                let _ = caller;
74                TrackedCaller {}
75            }
76            #[cfg(feature = "rc-debug-names")]
77            {
78                let Self { callers } = self;
79                let mut callers = callers.lock().unwrap();
80                let count = callers.entry(caller.clone()).or_insert(0);
81                *count += 1;
82                TrackedCaller { location: caller.clone() }
83            }
84        }
85    }
86
87    #[derive(Debug)]
88    pub(super) struct TrackedCaller {
89        #[cfg(feature = "rc-debug-names")]
90        pub(super) location: Location<'static>,
91    }
92
93    impl TrackedCaller {
94        #[cfg(not(feature = "rc-debug-names"))]
95        pub(super) fn release(&mut self, Callers {}: &Callers) {
96            let Self {} = self;
97        }
98
99        #[cfg(feature = "rc-debug-names")]
100        pub(super) fn release(&mut self, Callers { callers }: &Callers) {
101            let Self { location } = self;
102            let mut callers = callers.lock().unwrap();
103            let mut entry = match callers.entry(location.clone()) {
104                std::collections::hash_map::Entry::Vacant(_) => {
105                    panic!("location {location:?} was not in the callers map")
106                }
107                std::collections::hash_map::Entry::Occupied(o) => o,
108            };
109
110            let sub = entry
111                .get()
112                .checked_sub(1)
113                .unwrap_or_else(|| panic!("zero-count location {location:?} in map"));
114            if sub == 0 {
115                let _: usize = entry.remove();
116            } else {
117                *entry.get_mut() = sub;
118            }
119        }
120    }
121}
122
123mod resource_token {
124    use core::fmt::Debug;
125    use core::sync::atomic::{AtomicU64, Ordering};
126    use std::marker::PhantomData;
127
128    /// An opaque token associated with a resource.
129    ///
130    /// It can be used to create debug and trace identifiers for the resource,
131    /// but it should not be used as a unique identifier of the resource inside
132    /// the netstack.
133    ///
134    /// By default the lifetime of a token is bound the resource that token
135    /// belongs to, but it can be extended by calling
136    /// [`ResourceToken::extend_lifetime`].
137    pub struct ResourceToken<'a> {
138        value: u64,
139        _marker: PhantomData<&'a ()>,
140    }
141
142    impl<'a> ResourceToken<'a> {
143        /// Extends lifetime of the token.
144        ///
145        /// # Discussion
146        ///
147        /// It's generally okay to extend the lifetime of the token, but prefer
148        /// to use tokens bound to the resource's lifetime whenever possible,
149        /// since it provides guardrails against identifiers that outlive the
150        /// resource itself.
151        pub fn extend_lifetime(self) -> ResourceToken<'static> {
152            ResourceToken { value: self.value, _marker: PhantomData }
153        }
154
155        /// Returns internal value. Consumes `self`.
156        ///
157        /// # Discussion
158        ///
159        /// Export to `u64` when a representation is needed for interaction with
160        /// other processes or components such as trace identifiers and eBPF
161        /// socket cookies.
162        ///
163        /// Refrain from using the returned value within the netstack otherwise.
164        pub fn export_value(self) -> u64 {
165            self.value
166        }
167    }
168
169    impl<'a> Debug for ResourceToken<'a> {
170        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
171            write!(f, "{}", self.value)
172        }
173    }
174
175    /// Holder of a value for `ResourceToken`. Vends `ResourceToken` instances
176    /// with the same value and the lifetime bound to the lifetime of the holder.
177    ///
178    /// The [`Default`] implementation generates a new unique value.
179    pub struct ResourceTokenValue(u64);
180
181    impl ResourceTokenValue {
182        /// Creates a new token.
183        pub fn token(&self) -> ResourceToken<'_> {
184            let ResourceTokenValue(value) = self;
185            ResourceToken { value: *value, _marker: PhantomData }
186        }
187    }
188
189    impl core::fmt::Debug for ResourceTokenValue {
190        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
191            let ResourceTokenValue(value) = self;
192            write!(f, "{}", value)
193        }
194    }
195
196    impl Default for ResourceTokenValue {
197        fn default() -> Self {
198            static NEXT_TOKEN: AtomicU64 = AtomicU64::new(0);
199            // NB: Fetch add will cause the counter to rollback to 0 if we
200            // happen to exceed `u64::MAX` instantiations. In practice, that's
201            // an impossibility (at 1 billion instantiations per second, the
202            // counter is valid for > 500 years). Spare the CPU cycles and don't
203            // bother attempting to detect/handle overflow.
204            Self(NEXT_TOKEN.fetch_add(1, Ordering::Relaxed))
205        }
206    }
207}
208
209pub use resource_token::{ResourceToken, ResourceTokenValue};
210
211mod debug_id {
212    use super::ResourceToken;
213    use core::fmt::Debug;
214
215    /// A debug identifier for the RC types exposed in the parent module.
216    ///
217    /// Encompasses the underlying pointer for the RC type, as well as
218    /// (optionally) the globally unique [`ResourceToken`].
219    pub(super) enum DebugId<T> {
220        /// Used in contexts that have access to the [`ResourceToken`], e.g.
221        /// [`Primary`], [`Strong`], and sometimes [`Weak`] RC types.
222        WithToken { ptr: *const T, token: ResourceToken<'static> },
223        /// Used in contexts that don't have access to the [`ResourceToken`], e.g.
224        /// [`Weak`] RC types that cannot be upgraded.
225        WithoutToken { ptr: *const T },
226    }
227
228    impl<T> Debug for DebugId<T> {
229        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
230            match self {
231                DebugId::WithToken { ptr, token } => write!(f, "{:?}:{:?}", token, ptr),
232                DebugId::WithoutToken { ptr } => write!(f, "?:{:?}", ptr),
233            }
234        }
235    }
236}
237
238#[derive(Derivative)]
239#[derivative(Debug)]
240struct Inner<T> {
241    marked_for_destruction: AtomicBool,
242    callers: caller::Callers,
243    data: core::mem::ManuallyDrop<T>,
244    // NB: Notifier could be an atomic pointer or atomic box but this mutex is
245    // never contended and we don't have to import new code into the repository
246    // (i.e. atomicbox) or write unsafe code.
247    #[derivative(Debug = "ignore")]
248    notifier: crate::Mutex<Option<Box<dyn Notifier<T>>>>,
249    resource_token: ResourceTokenValue,
250}
251
252impl<T> Inner<T> {
253    fn pre_drop_check(marked_for_destruction: &AtomicBool) {
254        // `Ordering::Acquire` because we want to synchronize with with the
255        // `Ordering::Release` write to `marked_for_destruction` so that all
256        // memory writes before the reference was marked for destruction is
257        // visible here.
258        assert!(marked_for_destruction.load(Ordering::Acquire), "Must be marked for destruction");
259    }
260
261    fn unwrap(mut self) -> T {
262        // We cannot destructure `self` by value since `Inner` implements
263        // `Drop`. So we must manually drop all the fields but data and then
264        // forget self.
265        let Inner { marked_for_destruction, data, callers: holders, notifier, resource_token } =
266            &mut self;
267
268        // Make sure that `inner` is in a valid state for destruction.
269        //
270        // Note that we do not actually destroy all of `self` here; we decompose
271        // it into its parts, keeping what we need & throwing away what we
272        // don't. Regardless, we perform the same checks.
273        Inner::<T>::pre_drop_check(marked_for_destruction);
274
275        // SAFETY: Safe since we own `self` and `self` is immediately forgotten
276        // below so the its destructor (and those of its fields) will not be run
277        // as a result of `self` being dropped.
278        let data = unsafe {
279            // Explicitly drop since we do not need these anymore.
280            core::ptr::drop_in_place(marked_for_destruction);
281            core::ptr::drop_in_place(holders);
282            core::ptr::drop_in_place(notifier);
283            core::ptr::drop_in_place(resource_token);
284
285            core::mem::ManuallyDrop::take(data)
286        };
287        // Forget self now to prevent its `Drop::drop` impl from being run which
288        // will attempt to destroy `data` but still perform pre-drop checks on
289        // `Inner`'s state.
290        core::mem::forget(self);
291
292        data
293    }
294
295    /// Sets the notifier for this `Inner`.
296    ///
297    /// Panics if notifier is already set.
298    fn set_notifier<N: Notifier<T> + 'static>(&self, notifier: N) {
299        let Self { notifier: slot, .. } = self;
300
301        // Using dynamic dispatch to notify allows us to not have to know the
302        // notifier that will be used from creation and spread the type on all
303        // reference types in this crate. The assumption is that the allocation
304        // and dynamic dispatch costs here are tiny compared to the overall work
305        // of destroying the resources this module is targeting.
306        let boxed: Box<dyn Notifier<T>> = Box::new(notifier);
307        let prev_notifier = { slot.lock().replace(boxed) };
308        // Uphold invariant that this can only be done from Primary.
309        assert!(prev_notifier.is_none(), "can't have a notifier already installed");
310    }
311}
312
313impl<T> Drop for Inner<T> {
314    fn drop(&mut self) {
315        let Inner { marked_for_destruction, data, callers: _, notifier, resource_token: _ } = self;
316        // Take data out of ManuallyDrop in case we panic in pre_drop_check.
317        // That'll ensure data is dropped if we hit the panic.
318        //
319        //  SAFETY: Safe because ManuallyDrop is not referenced again after
320        // taking.
321        let data = unsafe { core::mem::ManuallyDrop::take(data) };
322        Self::pre_drop_check(marked_for_destruction);
323        if let Some(mut notifier) = notifier.lock().take() {
324            notifier.notify(data);
325        }
326    }
327}
328
329/// A primary reference.
330///
331/// Note that only one `Primary` may be associated with data. This is
332/// enforced by not implementing [`Clone`].
333///
334/// For now, this reference is no different than a [`Strong`] but later changes
335/// will enable blocking the destruction of a primary reference until all
336/// strongly held references are dropped.
337#[derive(Debug)]
338pub struct Primary<T> {
339    inner: core::mem::ManuallyDrop<alloc::sync::Arc<Inner<T>>>,
340}
341
342impl<T> Drop for Primary<T> {
343    fn drop(&mut self) {
344        let was_marked = self.mark_for_destruction();
345        let Self { inner } = self;
346        // Take the inner out of ManuallyDrop early so its Drop impl will run in
347        // case we panic here.
348        // SAFETY: Safe because we don't reference ManuallyDrop again.
349        let inner = unsafe { core::mem::ManuallyDrop::take(inner) };
350
351        // Make debugging easier: don't panic if a panic is already happening
352        // since double-panics are annoying to debug. This means that the
353        // invariants provided by Primary are possibly violated during an
354        // unwind, but we're sidestepping that problem because Fuchsia is our
355        // only audience here.
356        if !std::thread::panicking() {
357            assert_eq!(was_marked, false, "Must not be marked for destruction yet");
358
359            let Inner {
360                marked_for_destruction: _,
361                callers,
362                data: _,
363                notifier: _,
364                resource_token: _,
365            } = &*inner;
366
367            // Make sure that this `Primary` is the last thing to hold a strong
368            // reference to the underlying data when it is being dropped.
369            let refs = alloc::sync::Arc::strong_count(&inner).checked_sub(1).unwrap();
370            assert!(
371                refs == 0,
372                "dropped Primary with {refs} strong refs remaining, \
373                            Callers={callers:?}"
374            );
375        }
376    }
377}
378
379impl<T> AsRef<T> for Primary<T> {
380    fn as_ref(&self) -> &T {
381        self.deref()
382    }
383}
384
385impl<T> Deref for Primary<T> {
386    type Target = T;
387
388    fn deref(&self) -> &T {
389        let Self { inner } = self;
390        let Inner { marked_for_destruction: _, data, callers: _, notifier: _, resource_token: _ } =
391            &***inner;
392        data
393    }
394}
395
396impl<T> Primary<T> {
397    // Marks this primary reference as ready for destruction. Used by all
398    // dropping flows. We take &mut self here to ensure we have the only
399    // possible reference to Primary. Returns whether it was already marked for
400    // destruction.
401    fn mark_for_destruction(&mut self) -> bool {
402        let Self { inner } = self;
403        // `Ordering::Release` because want to make sure that all memory writes
404        // before dropping this `Primary` synchronizes with later attempts to
405        // upgrade weak pointers and the `Drop::drop` impl of `Inner`.
406        inner.marked_for_destruction.swap(true, Ordering::Release)
407    }
408
409    /// Returns a new strongly-held reference.
410    pub fn new(data: T) -> Primary<T> {
411        Primary {
412            inner: core::mem::ManuallyDrop::new(alloc::sync::Arc::new(Inner {
413                marked_for_destruction: AtomicBool::new(false),
414                callers: caller::Callers::default(),
415                data: core::mem::ManuallyDrop::new(data),
416                notifier: crate::Mutex::new(None),
417                resource_token: ResourceTokenValue::default(),
418            })),
419        }
420    }
421
422    /// Constructs a new `Primary<T>` while giving you a Weak<T> to the
423    /// allocation, to allow you to construct a `T` which holds a weak pointer
424    /// to itself.
425    ///
426    /// Like for [`Arc::new_cyclic`], the `Weak` reference provided to `data_fn`
427    /// cannot be upgraded until the [`Primary`] is constructed.
428    pub fn new_cyclic(data_fn: impl FnOnce(Weak<T>) -> T) -> Primary<T> {
429        Primary {
430            inner: core::mem::ManuallyDrop::new(alloc::sync::Arc::new_cyclic(move |weak| Inner {
431                marked_for_destruction: AtomicBool::new(false),
432                callers: caller::Callers::default(),
433                data: core::mem::ManuallyDrop::new(data_fn(Weak(weak.clone()))),
434                notifier: crate::Mutex::new(None),
435                resource_token: ResourceTokenValue::default(),
436            })),
437        }
438    }
439
440    /// Clones a strongly-held reference.
441    #[cfg_attr(feature = "rc-debug-names", track_caller)]
442    pub fn clone_strong(Self { inner }: &Self) -> Strong<T> {
443        let Inner { data: _, callers, marked_for_destruction: _, notifier: _, resource_token: _ } =
444            &***inner;
445        let caller = callers.insert(Location::caller());
446        Strong { inner: alloc::sync::Arc::clone(inner), caller }
447    }
448
449    /// Returns a weak reference pointing to the same underlying data.
450    pub fn downgrade(Self { inner }: &Self) -> Weak<T> {
451        Weak(alloc::sync::Arc::downgrade(inner))
452    }
453
454    /// Returns true if the two pointers point to the same allocation.
455    pub fn ptr_eq(
456        Self { inner: this }: &Self,
457        Strong { inner: other, caller: _ }: &Strong<T>,
458    ) -> bool {
459        alloc::sync::Arc::ptr_eq(this, other)
460    }
461
462    /// Returns [`Debug`] implementation that is stable and unique
463    /// for the data held behind this [`Primary`].
464    pub fn debug_id(&self) -> impl Debug + '_ {
465        let Self { inner } = self;
466
467        // The lifetime of the returned `DebugId` is bound to the lifetime
468        // of `self`.
469        let token = inner.resource_token.token().extend_lifetime();
470
471        debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(inner), token }
472    }
473
474    fn mark_for_destruction_and_take_inner(mut this: Self) -> alloc::sync::Arc<Inner<T>> {
475        // Prepare for destruction.
476        assert!(!this.mark_for_destruction());
477        let Self { inner } = &mut this;
478        // SAFETY: Safe because inner can't be used after this. We forget
479        // our Primary reference to prevent its Drop impl from running.
480        let inner = unsafe { core::mem::ManuallyDrop::take(inner) };
481        core::mem::forget(this);
482        inner
483    }
484
485    fn try_unwrap(this: Self) -> Result<T, alloc::sync::Arc<Inner<T>>> {
486        let inner = Self::mark_for_destruction_and_take_inner(this);
487        alloc::sync::Arc::try_unwrap(inner).map(Inner::unwrap)
488    }
489
490    /// Returns the inner value if no [`Strong`] references are held.
491    ///
492    /// # Panics
493    ///
494    /// Panics if [`Strong`] references are held when this function is called.
495    pub fn unwrap(this: Self) -> T {
496        Self::try_unwrap(this).unwrap_or_else(|inner| {
497            let callers = &inner.callers;
498            let refs = alloc::sync::Arc::strong_count(&inner).checked_sub(1).unwrap();
499            panic!("can't unwrap, still had {refs} strong refs: {callers:?}");
500        })
501    }
502
503    /// Marks this [`Primary`] for destruction and uses `notifier` as a signaler
504    /// for when destruction of all strong references is terminated. After
505    /// calling `unwrap_with_notifier` [`Weak`] references can no longer be
506    /// upgraded.
507    pub fn unwrap_with_notifier<N: Notifier<T> + 'static>(this: Self, notifier: N) {
508        let inner = Self::mark_for_destruction_and_take_inner(this);
509        inner.set_notifier(notifier);
510        // Now we can drop our inner reference, if we were the last this will
511        // trigger the notifier.
512        core::mem::drop(inner);
513    }
514
515    /// Marks this [`Primary`] for destruction and returns `Ok` if this was the
516    /// last strong reference standing for it. Otherwise `new_notifier` is
517    /// called to create a new notifier to observe deferred destruction.
518    ///
519    /// Like [`Primary::unwrap_with_notifier`], [`Weak`] references can no
520    /// longer be upgraded after calling `unwrap_or_notify_with`.
521    pub fn unwrap_or_notify_with<N: Notifier<T> + 'static, O, F: FnOnce() -> (N, O)>(
522        this: Self,
523        new_notifier: F,
524    ) -> Result<T, O> {
525        Self::try_unwrap(this).map_err(move |inner| {
526            let (notifier, output) = new_notifier();
527            inner.set_notifier(notifier);
528            output
529        })
530    }
531
532    /// Creates a [`DebugReferences`] instance.
533    pub fn debug_references(this: &Self) -> DebugReferences<T> {
534        let Self { inner } = this;
535        DebugReferences(alloc::sync::Arc::downgrade(&*inner))
536    }
537}
538
539/// A strongly-held reference.
540///
541/// Similar to an [`alloc::sync::Arc`] but holding a `Strong` acts as a witness
542/// to the live-ness of the underlying data. That is, holding a `Strong` implies
543/// that the underlying data has not yet been destroyed.
544///
545/// Note that `Strong`'s implementation of [`Hash`] and [`PartialEq`] operate on
546/// the pointer itself and not the underlying data.
547#[derive(Debug, Derivative)]
548pub struct Strong<T> {
549    inner: alloc::sync::Arc<Inner<T>>,
550    caller: caller::TrackedCaller,
551}
552
553impl<T> Drop for Strong<T> {
554    fn drop(&mut self) {
555        let Self { inner, caller } = self;
556        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
557            &**inner;
558        caller.release(callers);
559    }
560}
561
562impl<T> AsRef<T> for Strong<T> {
563    fn as_ref(&self) -> &T {
564        self.deref()
565    }
566}
567
568impl<T> Deref for Strong<T> {
569    type Target = T;
570
571    fn deref(&self) -> &T {
572        let Self { inner, caller: _ } = self;
573        let Inner { marked_for_destruction: _, data, callers: _, notifier: _, resource_token: _ } =
574            inner.deref();
575        data
576    }
577}
578
579impl<T> core::cmp::Eq for Strong<T> {}
580
581impl<T> core::cmp::PartialEq for Strong<T> {
582    fn eq(&self, other: &Self) -> bool {
583        Self::ptr_eq(self, other)
584    }
585}
586
587impl<T> Hash for Strong<T> {
588    fn hash<H: Hasher>(&self, state: &mut H) {
589        let Self { inner, caller: _ } = self;
590        alloc::sync::Arc::as_ptr(inner).hash(state)
591    }
592}
593
594impl<T> Clone for Strong<T> {
595    #[cfg_attr(feature = "rc-debug-names", track_caller)]
596    fn clone(&self) -> Self {
597        let Self { inner, caller: _ } = self;
598        let Inner { data: _, marked_for_destruction: _, callers, notifier: _, resource_token: _ } =
599            &**inner;
600        let caller = callers.insert(Location::caller());
601        Self { inner: alloc::sync::Arc::clone(inner), caller }
602    }
603}
604
605impl<T> Strong<T> {
606    /// Returns a weak reference pointing to the same underlying data.
607    pub fn downgrade(Self { inner, caller: _ }: &Self) -> Weak<T> {
608        Weak(alloc::sync::Arc::downgrade(inner))
609    }
610
611    /// Returns [`Debug`] implementation that is stable and unique
612    /// for the data held behind this [`Strong`].
613    pub fn debug_id(&self) -> impl Debug + '_ {
614        let Self { inner, caller: _ } = self;
615
616        // The lifetime of the returned `DebugId` is bound to the lifetime
617        // of `self`.
618        let token = inner.resource_token.token().extend_lifetime();
619
620        debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(inner), token }
621    }
622
623    /// Returns a [`ResourceToken`] that corresponds to this object.
624    pub fn resource_token(&self) -> ResourceToken<'_> {
625        self.inner.resource_token.token()
626    }
627
628    /// Returns true if the inner value has since been marked for destruction.
629    pub fn marked_for_destruction(Self { inner, caller: _ }: &Self) -> bool {
630        let Inner { marked_for_destruction, data: _, callers: _, notifier: _, resource_token: _ } =
631            inner.as_ref();
632        // `Ordering::Acquire` because we want to synchronize with with the
633        // `Ordering::Release` write to `marked_for_destruction` so that all
634        // memory writes before the reference was marked for destruction is
635        // visible here.
636        marked_for_destruction.load(Ordering::Acquire)
637    }
638
639    /// Returns true if the two pointers point to the same allocation.
640    pub fn weak_ptr_eq(Self { inner: this, caller: _ }: &Self, Weak(other): &Weak<T>) -> bool {
641        core::ptr::eq(alloc::sync::Arc::as_ptr(this), other.as_ptr())
642    }
643
644    /// Returns true if the two pointers point to the same allocation.
645    pub fn ptr_eq(
646        Self { inner: this, caller: _ }: &Self,
647        Self { inner: other, caller: _ }: &Self,
648    ) -> bool {
649        alloc::sync::Arc::ptr_eq(this, other)
650    }
651
652    /// Compares the two pointers.
653    pub fn ptr_cmp(
654        Self { inner: this, caller: _ }: &Self,
655        Self { inner: other, caller: _ }: &Self,
656    ) -> core::cmp::Ordering {
657        let this = alloc::sync::Arc::as_ptr(this);
658        let other = alloc::sync::Arc::as_ptr(other);
659        this.cmp(&other)
660    }
661
662    /// Creates a [`DebugReferences`] instance.
663    pub fn debug_references(this: &Self) -> DebugReferences<T> {
664        let Self { inner, caller: _ } = this;
665        DebugReferences(alloc::sync::Arc::downgrade(inner))
666    }
667}
668
669/// A weakly-held reference.
670///
671/// Similar to an [`alloc::sync::Weak`].
672///
673/// A `Weak` does not make any claim to the live-ness of the underlying data.
674/// Holders of a `Weak` must attempt to upgrade to a [`Strong`] through
675/// [`Weak::upgrade`] to access the underlying data.
676///
677/// Note that `Weak`'s implementation of [`Hash`] and [`PartialEq`] operate on
678/// the pointer itself and not the underlying data.
679#[derive(Debug)]
680pub struct Weak<T>(alloc::sync::Weak<Inner<T>>);
681
682impl<T> core::cmp::Eq for Weak<T> {}
683
684impl<T> core::cmp::PartialEq for Weak<T> {
685    fn eq(&self, other: &Self) -> bool {
686        Self::ptr_eq(self, other)
687    }
688}
689
690impl<T> Hash for Weak<T> {
691    fn hash<H: Hasher>(&self, state: &mut H) {
692        let Self(this) = self;
693        this.as_ptr().hash(state)
694    }
695}
696
697impl<T> Clone for Weak<T> {
698    fn clone(&self) -> Self {
699        let Self(this) = self;
700        Weak(this.clone())
701    }
702}
703
704impl<T> Weak<T> {
705    /// Returns true if the two pointers point to the same allocation.
706    pub fn ptr_eq(&self, Self(other): &Self) -> bool {
707        let Self(this) = self;
708        this.ptr_eq(other)
709    }
710
711    /// Returns [`Debug`] implementation that is stable and unique
712    /// for the data held behind this [`Weak`].
713    pub fn debug_id(&self) -> impl Debug + '_ {
714        match self.upgrade() {
715            Some(strong) => {
716                let Strong { inner, caller: _ } = &strong;
717
718                // The lifetime of the returned `DebugId` is still bound to the
719                // lifetime of `self`.
720                let token = inner.resource_token.token().extend_lifetime();
721
722                debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(&inner), token }
723            }
724            None => {
725                let Self(this) = self;
726                // NB: If we can't upgrade the socket, we can't know the token.
727                debug_id::DebugId::WithoutToken { ptr: this.as_ptr() }
728            }
729        }
730    }
731
732    /// Attempts to upgrade to a [`Strong`].
733    ///
734    /// Returns `None` if the inner value has since been marked for destruction.
735    #[cfg_attr(feature = "rc-debug-names", track_caller)]
736    pub fn upgrade(&self) -> Option<Strong<T>> {
737        let Self(weak) = self;
738        let arc = weak.upgrade()?;
739        let Inner { marked_for_destruction, data: _, callers, notifier: _, resource_token: _ } =
740            arc.deref();
741
742        // `Ordering::Acquire` because we want to synchronize with with the
743        // `Ordering::Release` write to `marked_for_destruction` so that all
744        // memory writes before the reference was marked for destruction is
745        // visible here.
746        if !marked_for_destruction.load(Ordering::Acquire) {
747            let caller = callers.insert(Location::caller());
748            Some(Strong { inner: arc, caller })
749        } else {
750            None
751        }
752    }
753
754    /// Gets the number of [`Primary`] and [`Strong`] references to this allocation.
755    pub fn strong_count(&self) -> usize {
756        let Self(weak) = self;
757        weak.strong_count()
758    }
759
760    /// Creates a [`DebugReferences`] instance.
761    pub fn debug_references(&self) -> DebugReferences<T> {
762        let Self(inner) = self;
763        DebugReferences(inner.clone())
764    }
765}
766
767fn debug_refs(
768    refs: Option<(usize, &AtomicBool, &caller::Callers)>,
769    name: &'static str,
770    f: &mut core::fmt::Formatter<'_>,
771) -> core::fmt::Result {
772    let mut f = f.debug_struct(name);
773    match refs {
774        Some((strong_count, marked_for_destruction, callers)) => f
775            .field("strong_count", &strong_count)
776            .field("marked_for_destruction", marked_for_destruction)
777            .field("callers", callers)
778            .finish(),
779        None => {
780            let strong_count = 0_usize;
781            f.field("strong_count", &strong_count).finish_non_exhaustive()
782        }
783    }
784}
785
786/// Provides a [`Debug`] implementation that contains information helpful for
787/// debugging dangling references.
788#[derive(Clone)]
789pub struct DebugReferences<T>(alloc::sync::Weak<Inner<T>>);
790
791impl<T> Debug for DebugReferences<T> {
792    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
793        let Self(inner) = self;
794        let inner = inner.upgrade();
795        let refs = inner.as_ref().map(|inner| {
796            (alloc::sync::Arc::strong_count(inner), &inner.marked_for_destruction, &inner.callers)
797        });
798        debug_refs(refs, "DebugReferences", f)
799    }
800}
801
802impl<T: Send + Sync + 'static> DebugReferences<T> {
803    /// Transforms this `DebugReferences` into a [`DynDebugReferences`].
804    pub fn into_dyn(self) -> DynDebugReferences {
805        let Self(w) = self;
806        DynDebugReferences(w)
807    }
808}
809
810/// Like [`DebugReferences`], but type-erases the contained type.
811#[derive(Clone)]
812pub struct DynDebugReferences(alloc::sync::Weak<dyn ExposeRefs>);
813
814impl Debug for DynDebugReferences {
815    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
816        let Self(inner) = self;
817        let inner = inner.upgrade();
818        let refs = inner.as_ref().map(|inner| {
819            let (marked_for_destruction, callers) = inner.refs_info();
820            (alloc::sync::Arc::strong_count(inner), marked_for_destruction, callers)
821        });
822        debug_refs(refs, "DynDebugReferences", f)
823    }
824}
825
826/// A trait allowing [`DynDebugReferences`] to erase the `T` type on [`Inner`].
827trait ExposeRefs: Send + Sync + 'static {
828    fn refs_info(&self) -> (&AtomicBool, &caller::Callers);
829}
830
831impl<T: Send + Sync + 'static> ExposeRefs for Inner<T> {
832    fn refs_info(&self) -> (&AtomicBool, &caller::Callers) {
833        (&self.marked_for_destruction, &self.callers)
834    }
835}
836
837/// Provides delegated notification of all strong references of a [`Primary`]
838/// being dropped.
839///
840/// See [`Primary::unwrap_with_notifier`].
841pub trait Notifier<T>: Send {
842    /// Called when the data contained in the [`Primary`] reference can be
843    /// extracted out because there are no more strong references to it.
844    fn notify(&mut self, data: T);
845}
846
847/// An implementation of [`Notifier`] that stores the unwrapped data in a
848/// `Clone` type.
849///
850/// Useful for tests where completion assertions are possible and useful.
851#[derive(Debug, Derivative)]
852#[derivative(Clone(bound = ""))]
853pub struct ArcNotifier<T>(alloc::sync::Arc<crate::Mutex<Option<T>>>);
854
855impl<T> ArcNotifier<T> {
856    /// Creates a new `ArcNotifier`.
857    pub fn new() -> Self {
858        Self(alloc::sync::Arc::new(crate::Mutex::new(None)))
859    }
860
861    /// Takes the notified value, if any.
862    pub fn take(&self) -> Option<T> {
863        let Self(inner) = self;
864        inner.lock().take()
865    }
866}
867
868impl<T: Send> Notifier<T> for ArcNotifier<T> {
869    fn notify(&mut self, data: T) {
870        let Self(inner) = self;
871        assert!(inner.lock().replace(data).is_none(), "notified twice");
872    }
873}
874
875/// An implementation of [`Notifier`] that wraps another `Notifier` and applies
876/// a function on notified objects.
877pub struct MapNotifier<N, F> {
878    inner: N,
879    map: Option<F>,
880}
881
882impl<N, F> MapNotifier<N, F> {
883    /// Creates a new [`MapNotifier`] that wraps `notifier` with a mapping
884    /// function `F`.
885    pub fn new(notifier: N, map: F) -> Self {
886        Self { inner: notifier, map: Some(map) }
887    }
888}
889
890impl<A, B, N: Notifier<B>, F: FnOnce(A) -> B> Notifier<A> for MapNotifier<N, F>
891where
892    Self: Send,
893{
894    fn notify(&mut self, data: A) {
895        let Self { inner, map } = self;
896        let map = map.take().expect("notified twice");
897        inner.notify(map(data))
898    }
899}
900
901/// A handy implementation for the common Infallible "Never" type.
902impl<T> Notifier<T> for core::convert::Infallible {
903    fn notify(&mut self, _data: T) {
904        match *self {}
905    }
906}
907
908#[cfg(test)]
909mod tests {
910    use super::*;
911
912    #[test]
913    fn zombie_weak() {
914        let primary = Primary::new(());
915        let weak = {
916            let strong = Primary::clone_strong(&primary);
917            Strong::downgrade(&strong)
918        };
919        core::mem::drop(primary);
920
921        assert!(weak.upgrade().is_none());
922    }
923
924    #[test]
925    fn rcs() {
926        const INITIAL_VAL: u8 = 1;
927        const NEW_VAL: u8 = 2;
928
929        let primary = Primary::new(crate::sync::Mutex::new(INITIAL_VAL));
930        let strong = Primary::clone_strong(&primary);
931        let weak = Strong::downgrade(&strong);
932
933        *primary.lock().unwrap() = NEW_VAL;
934        assert_eq!(*primary.deref().lock().unwrap(), NEW_VAL);
935        assert_eq!(*strong.deref().lock().unwrap(), NEW_VAL);
936        assert_eq!(*weak.upgrade().unwrap().deref().lock().unwrap(), NEW_VAL);
937    }
938
939    #[test]
940    fn unwrap_primary_without_strong_held() {
941        const VAL: u16 = 6;
942        let primary = Primary::new(VAL);
943        assert_eq!(Primary::unwrap(primary), VAL);
944    }
945
946    #[test]
947    #[should_panic(expected = "can't unwrap, still had 1 strong refs")]
948    fn unwrap_primary_with_strong_held() {
949        let primary = Primary::new(8);
950        let _strong: Strong<_> = Primary::clone_strong(&primary);
951        let _: u16 = Primary::unwrap(primary);
952    }
953
954    #[test]
955    #[should_panic(expected = "dropped Primary with 1 strong refs remaining")]
956    fn drop_primary_with_strong_held() {
957        let primary = Primary::new(9);
958        let _strong: Strong<_> = Primary::clone_strong(&primary);
959        core::mem::drop(primary);
960    }
961
962    // This test trips LSAN on Fuchsia for some unknown reason. The host-side
963    // test should be enough to protect us against regressing on the panicking
964    // check.
965    #[cfg(not(target_os = "fuchsia"))]
966    #[test]
967    #[should_panic(expected = "oopsie")]
968    fn double_panic_protect() {
969        let primary = Primary::new(9);
970        let strong = Primary::clone_strong(&primary);
971        // This will cause primary to be dropped before strong and would yield a
972        // double panic if we didn't protect against it in Primary's Drop impl.
973        let _tuple_to_invert_drop_order = (primary, strong);
974        panic!("oopsie");
975    }
976
977    #[cfg(feature = "rc-debug-names")]
978    #[test]
979    fn tracked_callers() {
980        let primary = Primary::new(10);
981        // Mark this position so we ensure all track_caller marks are correct in
982        // the methods that support it.
983        let here = Location::caller();
984        let strong1 = Primary::clone_strong(&primary);
985        let strong2 = strong1.clone();
986        let weak = Strong::downgrade(&strong2);
987        let strong3 = weak.upgrade().unwrap();
988
989        let Primary { inner } = &primary;
990        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
991            &***inner;
992
993        let strongs = [strong1, strong2, strong3];
994        let _: &Location<'_> = strongs.iter().enumerate().fold(here, |prev, (i, cur)| {
995            let Strong { inner: _, caller: caller::TrackedCaller { location: cur } } = cur;
996            assert_eq!(prev.file(), cur.file(), "{i}");
997            assert!(prev.line() < cur.line(), "{prev} < {cur}, {i}");
998            {
999                let callers = callers.callers.lock().unwrap();
1000                assert_eq!(callers.get(cur).copied(), Some(1));
1001            }
1002
1003            cur
1004        });
1005
1006        // All callers must be removed from the callers map on drop.
1007        std::mem::drop(strongs);
1008        {
1009            let callers = callers.callers.lock().unwrap();
1010            let callers = callers.deref();
1011            assert!(callers.is_empty(), "{callers:?}");
1012        }
1013    }
1014    #[cfg(feature = "rc-debug-names")]
1015    #[test]
1016    fn same_location_caller_tracking() {
1017        fn clone_in_fn<T>(p: &Primary<T>) -> Strong<T> {
1018            Primary::clone_strong(p)
1019        }
1020
1021        let primary = Primary::new(10);
1022        let strong1 = clone_in_fn(&primary);
1023        let strong2 = clone_in_fn(&primary);
1024        assert_eq!(strong1.caller.location, strong2.caller.location);
1025
1026        let Primary { inner } = &primary;
1027        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
1028            &***inner;
1029
1030        {
1031            let callers = callers.callers.lock().unwrap();
1032            assert_eq!(callers.get(&strong1.caller.location).copied(), Some(2));
1033        }
1034
1035        std::mem::drop(strong1);
1036        std::mem::drop(strong2);
1037
1038        {
1039            let callers = callers.callers.lock().unwrap();
1040            let callers = callers.deref();
1041            assert!(callers.is_empty(), "{callers:?}");
1042        }
1043    }
1044
1045    #[cfg(feature = "rc-debug-names")]
1046    #[test]
1047    #[should_panic(expected = "core/sync/src/rc.rs")]
1048    fn callers_in_panic() {
1049        let primary = Primary::new(10);
1050        let _strong = Primary::clone_strong(&primary);
1051        drop(primary);
1052    }
1053
1054    #[test]
1055    fn unwrap_with_notifier() {
1056        let primary = Primary::new(10);
1057        let strong = Primary::clone_strong(&primary);
1058        let notifier = ArcNotifier::new();
1059        Primary::unwrap_with_notifier(primary, notifier.clone());
1060        // Strong reference is still alive.
1061        assert_eq!(notifier.take(), None);
1062        core::mem::drop(strong);
1063        assert_eq!(notifier.take(), Some(10));
1064    }
1065
1066    #[test]
1067    fn unwrap_or_notify_with_immediate() {
1068        let primary = Primary::new(10);
1069        let result = Primary::unwrap_or_notify_with::<ArcNotifier<_>, (), _>(primary, || {
1070            panic!("should not try to create notifier")
1071        });
1072        assert_eq!(result, Ok(10));
1073    }
1074
1075    #[test]
1076    fn unwrap_or_notify_with_deferred() {
1077        let primary = Primary::new(10);
1078        let strong = Primary::clone_strong(&primary);
1079        let result = Primary::unwrap_or_notify_with(primary, || {
1080            let notifier = ArcNotifier::new();
1081            (notifier.clone(), notifier)
1082        });
1083        let notifier = result.unwrap_err();
1084        assert_eq!(notifier.take(), None);
1085        core::mem::drop(strong);
1086        assert_eq!(notifier.take(), Some(10));
1087    }
1088
1089    #[test]
1090    fn map_notifier() {
1091        let primary = Primary::new(10);
1092        let notifier = ArcNotifier::new();
1093        let map_notifier = MapNotifier::new(notifier.clone(), |data| (data, data + 1));
1094        Primary::unwrap_with_notifier(primary, map_notifier);
1095        assert_eq!(notifier.take(), Some((10, 11)));
1096    }
1097
1098    #[test]
1099    fn new_cyclic() {
1100        #[derive(Debug)]
1101        struct Data {
1102            value: i32,
1103            weak: Weak<Data>,
1104        }
1105
1106        let primary = Primary::new_cyclic(|weak| Data { value: 2, weak });
1107        assert_eq!(primary.value, 2);
1108        let strong = primary.weak.upgrade().unwrap();
1109        assert_eq!(strong.value, 2);
1110        assert!(Primary::ptr_eq(&primary, &strong));
1111    }
1112
1113    macro_rules! assert_debug_id_eq {
1114        ($id1:expr, $id2:expr) => {
1115            assert_eq!(alloc::format!("{:?}", $id1), alloc::format!("{:?}", $id2))
1116        };
1117    }
1118    macro_rules! assert_debug_id_ne {
1119        ($id1:expr, $id2:expr) => {
1120            assert_ne!(alloc::format!("{:?}", $id1), alloc::format!("{:?}", $id2))
1121        };
1122    }
1123
1124    #[test]
1125    fn debug_ids_are_stable() {
1126        // Verify that transforming a given RC doesn't change it's debug_id.
1127        let primary = Primary::new(1);
1128        let strong = Primary::clone_strong(&primary);
1129        let weak_p = Primary::downgrade(&primary);
1130        let weak_s = Strong::downgrade(&strong);
1131        let weak_c = weak_p.clone();
1132        assert_debug_id_eq!(&primary.debug_id(), &strong.debug_id());
1133        assert_debug_id_eq!(&primary.debug_id(), &weak_p.debug_id());
1134        assert_debug_id_eq!(&primary.debug_id(), &weak_s.debug_id());
1135        assert_debug_id_eq!(&primary.debug_id(), &weak_c.debug_id());
1136    }
1137
1138    #[test]
1139    fn debug_ids_are_unique() {
1140        // Verify that RCs to different data have different debug_ids.
1141        let primary1 = Primary::new(1);
1142        let primary2 = Primary::new(1);
1143        assert_debug_id_ne!(&primary1.debug_id(), &primary2.debug_id());
1144
1145        // Verify that dropping an RC does not allow it's debug_id to be reused.
1146        let id1 = format!("{:?}", primary1.debug_id());
1147        std::mem::drop(primary1);
1148        let primary3 = Primary::new(1);
1149        assert_ne!(id1, format!("{:?}", primary3.debug_id()));
1150    }
1151}